chore: Update discovery artifacts (#1291)
* chore: update docs/dyn/index.md
* chore(abusiveexperiencereport): update the api
* chore(acceleratedmobilepageurl): update the api
* chore(accessapproval): update the api
* chore(accesscontextmanager): update the api
* chore(adexchangebuyer2): update the api
* chore(adexperiencereport): update the api
* chore(admob): update the api
* chore(analytics): update the api
* chore(analyticsreporting): update the api
* chore(androiddeviceprovisioning): update the api
* chore(androidenterprise): update the api
* chore(androidpublisher): update the api
* chore(apigateway): update the api
* chore(artifactregistry): update the api
* chore(bigqueryconnection): update the api
* chore(bigquerydatatransfer): update the api
* chore(billingbudgets): update the api
* chore(binaryauthorization): update the api
* chore(blogger): update the api
* chore(books): update the api
* chore(calendar): update the api
* chore(chat): update the api
* chore(chromeuxreport): update the api
* chore(civicinfo): update the api
* chore(classroom): update the api
* chore(cloudbilling): update the api
* chore(cloudbuild): update the api
* chore(clouddebugger): update the api
* chore(clouderrorreporting): update the api
* chore(cloudfunctions): update the api
* chore(cloudidentity): update the api
* chore(cloudiot): update the api
* chore(cloudkms): update the api
* chore(cloudprofiler): update the api
* chore(cloudresourcemanager): update the api
* chore(cloudscheduler): update the api
* chore(cloudshell): update the api
* chore(cloudtasks): update the api
* chore(cloudtrace): update the api
* chore(composer): update the api
* chore(containeranalysis): update the api
* chore(content): update the api
* chore(customsearch): update the api
* chore(datacatalog): update the api
* chore(datafusion): update the api
* chore(datamigration): update the api
* chore(datastore): update the api
* chore(deploymentmanager): update the api
* chore(digitalassetlinks): update the api
* chore(displayvideo): update the api
* chore(dlp): update the api
* chore(dns): update the api
* chore(docs): update the api
* chore(domains): update the api
* chore(domainsrdap): update the api
* chore(doubleclickbidmanager): update the api
* chore(doubleclicksearch): update the api
* chore(drive): update the api
* chore(driveactivity): update the api
* chore(eventarc): update the api
* chore(factchecktools): update the api
* chore(fcm): update the api
* chore(file): update the api
* chore(firebase): update the api
* chore(firebasedatabase): update the api
* chore(firebasedynamiclinks): update the api
* chore(firebasehosting): update the api
* chore(firebaseml): update the api
* chore(firebaserules): update the api
* chore(firestore): update the api
* chore(fitness): update the api
* chore(gamesConfiguration): update the api
* chore(gamesManagement): update the api
* chore(gameservices): update the api
* chore(genomics): update the api
* chore(gmail): update the api
* chore(gmailpostmastertools): update the api
* chore(groupsmigration): update the api
* chore(groupssettings): update the api
* chore(healthcare): update the api
* chore(iam): update the api
* chore(iamcredentials): update the api
* chore(iap): update the api
* chore(identitytoolkit): update the api
* chore(indexing): update the api
* chore(jobs): update the api
* chore(kgsearch): update the api
* chore(language): update the api
* chore(libraryagent): update the api
* chore(licensing): update the api
* chore(lifesciences): update the api
* chore(logging): update the api
* chore(managedidentities): update the api
* chore(manufacturers): update the api
* chore(memcache): update the api
* chore(ml): update the api
* chore(monitoring): update the api
* chore(networkmanagement): update the api
* chore(osconfig): update the api
* chore(pagespeedonline): update the api
* chore(playablelocations): update the api
* chore(playcustomapp): update the api
* chore(policytroubleshooter): update the api
* chore(poly): update the api
* chore(privateca): update the api
* chore(prod_tt_sasportal): update the api
* chore(pubsub): update the api
* chore(pubsublite): update the api
* chore(realtimebidding): update the api
* chore(recommendationengine): update the api
* chore(recommender): update the api
* chore(redis): update the api
* chore(remotebuildexecution): update the api
* chore(reseller): update the api
* chore(runtimeconfig): update the api
* chore(safebrowsing): update the api
* chore(sasportal): update the api
* chore(script): update the api
* chore(searchconsole): update the api
* chore(secretmanager): update the api
* chore(servicecontrol): update the api
* chore(servicedirectory): update the api
* chore(siteVerification): update the api
* chore(slides): update the api
* chore(smartdevicemanagement): update the api
* chore(sourcerepo): update the api
* chore(sqladmin): update the api
* chore(storage): update the api
* chore(storagetransfer): update the api
* chore(streetviewpublish): update the api
* chore(sts): update the api
* chore(tagmanager): update the api
* chore(tasks): update the api
* chore(testing): update the api
* chore(texttospeech): update the api
* chore(toolresults): update the api
* chore(trafficdirector): update the api
* chore(transcoder): update the api
* chore(translate): update the api
* chore(vault): update the api
* chore(vectortile): update the api
* chore(verifiedaccess): update the api
* chore(videointelligence): update the api
* chore(vision): update the api
* chore(webfonts): update the api
* chore(webmasters): update the api
* chore(websecurityscanner): update the api
* chore(workflowexecutions): update the api
* chore(workflows): update the api
* chore(youtubeAnalytics): update the api
* chore(youtubereporting): update the api
* chore(docs): Add new discovery artifacts and reference documents
diff --git a/docs/dyn/speech_v1p1beta1.speech.html b/docs/dyn/speech_v1p1beta1.speech.html
index b9df605..f738246 100644
--- a/docs/dyn/speech_v1p1beta1.speech.html
+++ b/docs/dyn/speech_v1p1beta1.speech.html
@@ -103,7 +103,7 @@
"uri": "A String", # URI that points to a file that contains audio data bytes as specified in `RecognitionConfig`. The file must not be compressed (for example, gzip). Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: `gs://bucket_name/object_name` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
},
"config": { # Provides information to the recognizer that specifies how to process the request. # Required. Provides information to the recognizer that specifies how to process the request.
- "adaptation": { # Speech adaptation configuration. # Speech adaptation configuration improves the accuracy of speech recognition. When speech adaptation is set it supersedes the `speech_contexts` field. For more information, see the [speech adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) documentation.
+ "adaptation": { # Speech adaptation configuration. # Speech adaptation configuration improves the accuracy of speech recognition. When speech adaptation is set it supersedes the `speech_contexts` field. For more information, see the [speech adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) documentation.
"customClasses": [ # A collection of custom classes. To specify the classes inline, leave the class' `name` blank and fill in the rest of its fields, giving it a unique `custom_class_id`. Refer to the inline defined class in phrase hints by its `custom_class_id`.
{ # A set of words or phrases that represents a common concept likely to appear in your audio, for example a list of passenger ship names. CustomClass items can be substituted into placeholders that you set in PhraseSet phrases.
"customClassId": "A String", # If this custom class is a resource, the custom_class_id is the resource id of the CustomClass. Case sensitive.
@@ -145,6 +145,8 @@
"enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add punctuation to result hypotheses.
"enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1 to get each channel recognized separately. The recognition result will contain a `channel_tag` field to state which channel that result belongs to. If this is not true, we will only recognize the first channel. The request is billed cumulatively for all channels recognized: `audio_channel_count` multiplied by the length of the audio.
"enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in the top alternative of the recognition result using a speaker_tag provided in the WordInfo. Note: Use diarization_config instead.
+ "enableSpokenEmojis": True or False, # The spoken emoji behavior for the call If not set, uses default behavior based on model of choice If 'true', adds spoken emoji formatting for the request. This will replace spoken emojis with the corresponding Unicode symbols in the final transcript. If 'false', spoken emojis are not replaced.
+ "enableSpokenPunctuation": True or False, # The spoken punctuation behavior for the call If not set, uses default behavior based on model of choice e.g. command_and_search will enable spoken punctuation by default If 'true', replaces spoken punctuation with the corresponding symbols in the request. For example, "how are you question mark" becomes "how are you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation for support. If 'false', spoken punctuation is not replaced.
"enableWordConfidence": True or False, # If `true`, the top result includes a list of words and the confidence for those words. If `false`, no word-level confidence information is returned. The default is `false`.
"enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If `false`, no word-level time offset information is returned. The default is `false`.
"encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV` audio files and required for all other audio formats. For details, see AudioEncoding.
@@ -164,7 +166,7 @@
"model": "A String", # Which model to select for the given request. Select the model best suited to your domain to get best results. If a model is not explicitly specified, then we auto-select a model based on the parameters in the RecognitionConfig. *Model* *Description* command_and_search Best for short queries such as voice commands or voice search. phone_call Best for audio that originated from a phone call (typically recorded at an 8khz sampling rate). video Best for audio that originated from from video or includes multiple speakers. Ideally the audio is recorded at a 16khz or greater sampling rate. This is a premium model that costs more than the standard rate. default Best for audio that is not one of the specific audio models. For example, long-form audio. Ideally the audio is high-fidelity, recorded at a 16khz or greater sampling rate.
"profanityFilter": True or False, # If set to `true`, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
"sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of re-sampling). This field is optional for FLAC and WAV audio files, but is required for all other audio formats. For details, see AudioEncoding.
- "speechContexts": [ # Array of SpeechContext. A means to provide context to assist the speech recognition. For more information, see [speech adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
+ "speechContexts": [ # Array of SpeechContext. A means to provide context to assist the speech recognition. For more information, see [speech adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
{ # Provides "hints" to the speech recognizer to favor specific words and phrases in the results.
"boost": 3.14, # Hint Boost. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Negative boost values would correspond to anti-biasing. Anti-biasing is not enabled, so negative boost will simply be ignored. Though `boost` can accept a wide range of positive values, most use cases are best served with values between 0 and 20. We recommend using a binary search approach to finding the optimal value for your use case.
"phrases": [ # A list of strings containing words and phrases "hints" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. See [usage limits](https://cloud.google.com/speech-to-text/quotas#content). List items can also be set to classes for groups of words that represent common concepts that occur in natural language. For example, rather than providing phrase hints for every month of the year, using the $MONTH class improves the likelihood of correctly transcribing audio that includes months.
@@ -222,7 +224,7 @@
"uri": "A String", # URI that points to a file that contains audio data bytes as specified in `RecognitionConfig`. The file must not be compressed (for example, gzip). Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: `gs://bucket_name/object_name` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
},
"config": { # Provides information to the recognizer that specifies how to process the request. # Required. Provides information to the recognizer that specifies how to process the request.
- "adaptation": { # Speech adaptation configuration. # Speech adaptation configuration improves the accuracy of speech recognition. When speech adaptation is set it supersedes the `speech_contexts` field. For more information, see the [speech adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) documentation.
+ "adaptation": { # Speech adaptation configuration. # Speech adaptation configuration improves the accuracy of speech recognition. When speech adaptation is set it supersedes the `speech_contexts` field. For more information, see the [speech adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) documentation.
"customClasses": [ # A collection of custom classes. To specify the classes inline, leave the class' `name` blank and fill in the rest of its fields, giving it a unique `custom_class_id`. Refer to the inline defined class in phrase hints by its `custom_class_id`.
{ # A set of words or phrases that represents a common concept likely to appear in your audio, for example a list of passenger ship names. CustomClass items can be substituted into placeholders that you set in PhraseSet phrases.
"customClassId": "A String", # If this custom class is a resource, the custom_class_id is the resource id of the CustomClass. Case sensitive.
@@ -264,6 +266,8 @@
"enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add punctuation to result hypotheses.
"enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1 to get each channel recognized separately. The recognition result will contain a `channel_tag` field to state which channel that result belongs to. If this is not true, we will only recognize the first channel. The request is billed cumulatively for all channels recognized: `audio_channel_count` multiplied by the length of the audio.
"enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in the top alternative of the recognition result using a speaker_tag provided in the WordInfo. Note: Use diarization_config instead.
+ "enableSpokenEmojis": True or False, # The spoken emoji behavior for the call If not set, uses default behavior based on model of choice If 'true', adds spoken emoji formatting for the request. This will replace spoken emojis with the corresponding Unicode symbols in the final transcript. If 'false', spoken emojis are not replaced.
+ "enableSpokenPunctuation": True or False, # The spoken punctuation behavior for the call If not set, uses default behavior based on model of choice e.g. command_and_search will enable spoken punctuation by default If 'true', replaces spoken punctuation with the corresponding symbols in the request. For example, "how are you question mark" becomes "how are you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation for support. If 'false', spoken punctuation is not replaced.
"enableWordConfidence": True or False, # If `true`, the top result includes a list of words and the confidence for those words. If `false`, no word-level confidence information is returned. The default is `false`.
"enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If `false`, no word-level time offset information is returned. The default is `false`.
"encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV` audio files and required for all other audio formats. For details, see AudioEncoding.
@@ -283,7 +287,7 @@
"model": "A String", # Which model to select for the given request. Select the model best suited to your domain to get best results. If a model is not explicitly specified, then we auto-select a model based on the parameters in the RecognitionConfig. *Model* *Description* command_and_search Best for short queries such as voice commands or voice search. phone_call Best for audio that originated from a phone call (typically recorded at an 8khz sampling rate). video Best for audio that originated from from video or includes multiple speakers. Ideally the audio is recorded at a 16khz or greater sampling rate. This is a premium model that costs more than the standard rate. default Best for audio that is not one of the specific audio models. For example, long-form audio. Ideally the audio is high-fidelity, recorded at a 16khz or greater sampling rate.
"profanityFilter": True or False, # If set to `true`, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
"sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of re-sampling). This field is optional for FLAC and WAV audio files, but is required for all other audio formats. For details, see AudioEncoding.
- "speechContexts": [ # Array of SpeechContext. A means to provide context to assist the speech recognition. For more information, see [speech adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
+ "speechContexts": [ # Array of SpeechContext. A means to provide context to assist the speech recognition. For more information, see [speech adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
{ # Provides "hints" to the speech recognizer to favor specific words and phrases in the results.
"boost": 3.14, # Hint Boost. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Negative boost values would correspond to anti-biasing. Anti-biasing is not enabled, so negative boost will simply be ignored. Though `boost` can accept a wide range of positive values, most use cases are best served with values between 0 and 20. We recommend using a binary search approach to finding the optimal value for your use case.
"phrases": [ # A list of strings containing words and phrases "hints" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. See [usage limits](https://cloud.google.com/speech-to-text/quotas#content). List items can also be set to classes for groups of words that represent common concepts that occur in natural language. For example, rather than providing phrase hints for every month of the year, using the $MONTH class improves the likelihood of correctly transcribing audio that includes months.