docs: update docs (#916)
* fix: re-run script
* test: fix noxfile
diff --git a/docs/dyn/videointelligence_v1.videos.html b/docs/dyn/videointelligence_v1.videos.html
index c7dd597..b257ad9 100644
--- a/docs/dyn/videointelligence_v1.videos.html
+++ b/docs/dyn/videointelligence_v1.videos.html
@@ -101,11 +101,33 @@
# [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
"videoContext": { # Video context and/or feature-specific parameters. # Additional video context and/or feature-specific parameters.
"speechTranscriptionConfig": { # Config for SPEECH_TRANSCRIPTION. # Config for SPEECH_TRANSCRIPTION.
+ "enableAutomaticPunctuation": True or False, # Optional. If 'true', adds punctuation to recognition result hypotheses.
+ # This feature is only available in select languages. Setting this for
+ # requests in other languages has no effect at all. The default 'false' value
+ # does not add punctuation to result hypotheses. NOTE: "This is currently
+ # offered as an experimental service, complimentary to all users. In the
+ # future this may be exclusively available as a premium feature."
+ "audioTracks": [ # Optional. For file formats, such as MXF or MKV, supporting multiple audio
+ # tracks, specify up to two tracks. Default: track 0.
+ 42,
+ ],
+ "maxAlternatives": 42, # Optional. Maximum number of recognition hypotheses to be returned.
+ # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+ # within each `SpeechTranscription`. The server may return fewer than
+ # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
+ # return a maximum of one. If omitted, will return a maximum of one.
+ "diarizationSpeakerCount": 42, # Optional. If set, specifies the estimated number of speakers in the conversation.
+ # If not set, defaults to '2'.
+ # Ignored unless enable_speaker_diarization is set to true.
"languageCode": "A String", # Required. *Required* The language of the supplied audio as a
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
# Example: "en-US".
# See [Language Support](https://cloud.google.com/speech/docs/languages)
# for a list of the currently supported language codes.
+ "filterProfanity": True or False, # Optional. If set to `true`, the server will attempt to filter out
+ # profanities, replacing all but the initial character in each filtered word
+ # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+ # won't be filtered out.
"speechContexts": [ # Optional. A means to provide context to assist the speech recognition.
{ # Provides "hints" to the speech recognizer to favor specific words and phrases
# in the results.
@@ -119,10 +141,6 @@
],
},
],
- "filterProfanity": True or False, # Optional. If set to `true`, the server will attempt to filter out
- # profanities, replacing all but the initial character in each filtered word
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- # won't be filtered out.
"enableSpeakerDiarization": True or False, # Optional. If 'true', enables speaker detection for each recognized word in
# the top alternative of the recognition result using a speaker_tag provided
# in the WordInfo.
@@ -133,24 +151,6 @@
"enableWordConfidence": True or False, # Optional. If `true`, the top result includes a list of words and the
# confidence for those words. If `false`, no word-level confidence
# information is returned. The default is `false`.
- "enableAutomaticPunctuation": True or False, # Optional. If 'true', adds punctuation to recognition result hypotheses.
- # This feature is only available in select languages. Setting this for
- # requests in other languages has no effect at all. The default 'false' value
- # does not add punctuation to result hypotheses. NOTE: "This is currently
- # offered as an experimental service, complimentary to all users. In the
- # future this may be exclusively available as a premium feature."
- "audioTracks": [ # Optional. For file formats, such as MXF or MKV, supporting multiple audio
- # tracks, specify up to two tracks. Default: track 0.
- 42,
- ],
- "diarizationSpeakerCount": 42, # Optional. If set, specifies the estimated number of speakers in the conversation.
- # If not set, defaults to '2'.
- # Ignored unless enable_speaker_diarization is set to true.
- "maxAlternatives": 42, # Optional. Maximum number of recognition hypotheses to be returned.
- # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- # within each `SpeechTranscription`. The server may return fewer than
- # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
- # return a maximum of one. If omitted, will return a maximum of one.
},
"segments": [ # Video segments to annotate. The segments may overlap and are not required
# to be contiguous or span the whole video. If unspecified, each video is
@@ -162,6 +162,11 @@
# corresponding to the start of the segment (inclusive).
},
],
+ "shotChangeDetectionConfig": { # Config for SHOT_CHANGE_DETECTION. # Config for SHOT_CHANGE_DETECTION.
+ "model": "A String", # Model to use for shot change detection.
+ # Supported values: "builtin/stable" (the default if unset) and
+ # "builtin/latest".
+ },
"textDetectionConfig": { # Config for TEXT_DETECTION. # Config for TEXT_DETECTION.
"languageHints": [ # Language hint can be specified if the language to be detected is known a
# priori. It can increase the accuracy of the detection. Language hint must
@@ -174,11 +179,6 @@
# Supported values: "builtin/stable" (the default if unset) and
# "builtin/latest".
},
- "shotChangeDetectionConfig": { # Config for SHOT_CHANGE_DETECTION. # Config for SHOT_CHANGE_DETECTION.
- "model": "A String", # Model to use for shot change detection.
- # Supported values: "builtin/stable" (the default if unset) and
- # "builtin/latest".
- },
"labelDetectionConfig": { # Config for LABEL_DETECTION. # Config for LABEL_DETECTION.
"model": "A String", # Model to use for label detection.
# Supported values: "builtin/stable" (the default if unset) and
@@ -242,6 +242,30 @@
{ # This resource represents a long-running operation that is the result of a
# network API call.
+ "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
+ # different programming environments, including REST APIs and RPC APIs. It is
+ # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+ # three pieces of data: error code, error message, and error details.
+ #
+ # You can find out more about this error model and how to work with it in the
+ # [API Design Guide](https://cloud.google.com/apis/design/errors).
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
+ "details": [ # A list of messages that carry the error details. There is a common set of
+ # message types for APIs to use.
+ {
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ ],
+ },
+ "metadata": { # Service-specific metadata associated with the operation. It typically
+ # contains progress information and common metadata such as create time.
+ # Some services might not provide such metadata. Any method that returns a
+ # long-running operation should document the metadata type, if any.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
"done": True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
# available.
@@ -258,30 +282,6 @@
"name": "A String", # The server-assigned name, which is only unique within the same service that
# originally returns it. If you use the default HTTP mapping, the
# `name` should be a resource name ending with `operations/{unique_id}`.
- "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
- # different programming environments, including REST APIs and RPC APIs. It is
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
- # three pieces of data: error code, error message, and error details.
- #
- # You can find out more about this error model and how to work with it in the
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
- "details": [ # A list of messages that carry the error details. There is a common set of
- # message types for APIs to use.
- {
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- ],
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
- },
- "metadata": { # Service-specific metadata associated with the operation. It typically
- # contains progress information and common metadata such as create time.
- # Some services might not provide such metadata. Any method that returns a
- # long-running operation should document the metadata type, if any.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
}</pre>
</div>