Update documentation
diff --git a/docs/dyn/speech_v1beta1.speech.html b/docs/dyn/speech_v1beta1.speech.html
new file mode 100644
index 0000000..4607dee
--- /dev/null
+++ b/docs/dyn/speech_v1beta1.speech.html
@@ -0,0 +1,340 @@
+<html><body>
+<style>
+
+body, h1, h2, h3, div, span, p, pre, a {
+  margin: 0;
+  padding: 0;
+  border: 0;
+  font-weight: inherit;
+  font-style: inherit;
+  font-size: 100%;
+  font-family: inherit;
+  vertical-align: baseline;
+}
+
+body {
+  font-size: 13px;
+  padding: 1em;
+}
+
+h1 {
+  font-size: 26px;
+  margin-bottom: 1em;
+}
+
+h2 {
+  font-size: 24px;
+  margin-bottom: 1em;
+}
+
+h3 {
+  font-size: 20px;
+  margin-bottom: 1em;
+  margin-top: 1em;
+}
+
+pre, code {
+  line-height: 1.5;
+  font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
+}
+
+pre {
+  margin-top: 0.5em;
+}
+
+h1, h2, h3, p {
+  font-family: Arial, sans serif;
+}
+
+h1, h2, h3 {
+  border-bottom: solid #CCC 1px;
+}
+
+.toc_element {
+  margin-top: 0.5em;
+}
+
+.firstline {
+  margin-left: 2 em;
+}
+
+.method  {
+  margin-top: 1em;
+  border: solid 1px #CCC;
+  padding: 1em;
+  background: #EEE;
+}
+
+.details {
+  font-weight: bold;
+  font-size: 14px;
+}
+
+</style>
+
+<h1><a href="speech_v1beta1.html">Google Cloud Speech API</a> . <a href="speech_v1beta1.speech.html">speech</a></h1>
+<h2>Instance Methods</h2>
+<p class="toc_element">
+  <code><a href="#asyncrecognize">asyncrecognize(body, x__xgafv=None)</a></code></p>
+<p class="firstline">Perform asynchronous speech-recognition: receive results via the</p>
+<p class="toc_element">
+  <code><a href="#syncrecognize">syncrecognize(body, x__xgafv=None)</a></code></p>
+<p class="firstline">Perform synchronous speech-recognition: receive results after all audio</p>
+<h3>Method Details</h3>
+<div class="method">
+    <code class="details" id="asyncrecognize">asyncrecognize(body, x__xgafv=None)</code>
+  <pre>Perform asynchronous speech-recognition: receive results via the
+google.longrunning.Operations interface. Returns either an
+`Operation.error` or an `Operation.response` which contains
+an `AsyncRecognizeResponse` message.
+
+Args:
+  body: object, The request body. (required)
+    The object takes the form of:
+
+{ # `AsyncRecognizeRequest` is the top-level message sent by the client for
+      # the `AsyncRecognize` method.
+    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # [Required] The audio data to be recognized.
+        # Either `content` or `uri` must be supplied. Supplying both or neither
+        # returns google.rpc.Code.INVALID_ARGUMENT. See
+        # [audio limits](https://cloud.google.com/speech/limits#content).
+      "content": "A String", # The audio data bytes encoded as specified in
+          # `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+          # pure binary representation, whereas JSON representations use base64.
+      "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
+          # `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
+          # supported, which must be specified in the following format:
+          # `gs://bucket_name/object_name` (other URI formats return
+          # google.rpc.Code.INVALID_ARGUMENT). For more information, see
+          # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+    },
+    "config": { # The `RecognitionConfig` message provides information to the recognizer # [Required] The `config` message provides information to the recognizer
+        # that specifies how to process the request.
+        # that specifies how to process the request.
+      "languageCode": "A String", # [Optional] The language of the supplied audio as a BCP-47 language tag.
+          # Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
+          # If omitted, defaults to "en-US". See
+          # [Language Support](https://cloud.google.com/speech/docs/best-practices#language_support)
+          # for a list of the currently supported language codes.
+      "speechContext": { # Provides "hints" to the speech recognizer to favor specific words and phrases # [Optional] A means to provide context to assist the speech recognition.
+          # in the results.
+        "phrases": [ # [Optional] A list of strings containing words and phrases "hints" so that
+            # the speech recognition is more likely to recognize them. This can be used
+            # to improve the accuracy for specific words and phrases, for example, if
+            # specific commands are typically spoken by the user. This can also be used
+            # to add additional words to the vocabulary of the recognizer. See
+            # [usage limits](https://cloud.google.com/speech/limits#content).
+          "A String",
+        ],
+      },
+      "encoding": "A String", # [Required] Encoding of audio data sent in all `RecognitionAudio` messages.
+      "maxAlternatives": 42, # [Optional] Maximum number of recognition hypotheses to be returned.
+          # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+          # within each `SpeechRecognitionResult`.
+          # The server may return fewer than `max_alternatives`.
+          # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
+          # `1`. If omitted, defaults to `1`.
+      "profanityFilter": True or False, # [Optional] If set to `true`, the server will attempt to filter out
+          # profanities, replacing all but the initial character in each filtered word
+          # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+          # won't be filtered out.
+      "sampleRate": 42, # [Required] Sample rate in Hertz of the audio data sent in all
+          # `RecognitionAudio` messages. Valid values are: 8000-48000.
+          # 16000 is optimal. For best results, set the sampling rate of the audio
+          # source to 16000 Hz. If that's not possible, use the native sample rate of
+          # the audio source (instead of re-sampling).
+    },
+  }
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a
+      # network API call.
+    "metadata": { # Service-specific metadata associated with the operation.  It typically
+        # contains progress information and common metadata such as create time.
+        # Some services might not provide such metadata.  Any method that returns a
+        # long-running operation should document the metadata type, if any.
+      "a_key": "", # Properties of the object. Contains field @type with type URL.
+    },
+    "done": True or False, # If the value is `false`, it means the operation is still in progress.
+        # If true, the operation is completed, and either `error` or `response` is
+        # available.
+    "response": { # The normal response of the operation in case of success.  If the original
+        # method returns no data on success, such as `Delete`, the response is
+        # `google.protobuf.Empty`.  If the original method is standard
+        # `Get`/`Create`/`Update`, the response should be the resource.  For other
+        # methods, the response should have the type `XxxResponse`, where `Xxx`
+        # is the original method name.  For example, if the original method name
+        # is `TakeSnapshot()`, the inferred response type is
+        # `TakeSnapshotResponse`.
+      "a_key": "", # Properties of the object. Contains field @type with type URL.
+    },
+    "name": "A String", # The server-assigned name, which is only unique within the same service that
+        # originally returns it. If you use the default HTTP mapping, the
+        # `name` should have the format of `operations/some/unique/name`.
+    "error": { # The `Status` type defines a logical error model that is suitable for different # The error result of the operation in case of failure.
+        # programming environments, including REST APIs and RPC APIs. It is used by
+        # [gRPC](https://github.com/grpc). The error model is designed to be:
+        #
+        # - Simple to use and understand for most users
+        # - Flexible enough to meet unexpected needs
+        #
+        # # Overview
+        #
+        # The `Status` message contains three pieces of data: error code, error message,
+        # and error details. The error code should be an enum value of
+        # google.rpc.Code, but it may accept additional error codes if needed.  The
+        # error message should be a developer-facing English message that helps
+        # developers *understand* and *resolve* the error. If a localized user-facing
+        # error message is needed, put the localized message in the error details or
+        # localize it in the client. The optional error details may contain arbitrary
+        # information about the error. There is a predefined set of error detail types
+        # in the package `google.rpc` which can be used for common error conditions.
+        #
+        # # Language mapping
+        #
+        # The `Status` message is the logical representation of the error model, but it
+        # is not necessarily the actual wire format. When the `Status` message is
+        # exposed in different client libraries and different wire protocols, it can be
+        # mapped differently. For example, it will likely be mapped to some exceptions
+        # in Java, but more likely mapped to some error codes in C.
+        #
+        # # Other uses
+        #
+        # The error model and the `Status` message can be used in a variety of
+        # environments, either with or without APIs, to provide a
+        # consistent developer experience across different environments.
+        #
+        # Example uses of this error model include:
+        #
+        # - Partial errors. If a service needs to return partial errors to the client,
+        #     it may embed the `Status` in the normal response to indicate the partial
+        #     errors.
+        #
+        # - Workflow errors. A typical workflow has multiple steps. Each step may
+        #     have a `Status` message for error reporting purpose.
+        #
+        # - Batch operations. If a client uses batch request and batch response, the
+        #     `Status` message should be used directly inside batch response, one for
+        #     each error sub-response.
+        #
+        # - Asynchronous operations. If an API call embeds asynchronous operation
+        #     results in its response, the status of those operations should be
+        #     represented directly using the `Status` message.
+        #
+        # - Logging. If some API errors are stored in logs, the message `Status` could
+        #     be used directly after any stripping needed for security/privacy reasons.
+      "message": "A String", # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+      "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+      "details": [ # A list of messages that carry the error details.  There will be a
+          # common set of message types for APIs to use.
+        {
+          "a_key": "", # Properties of the object. Contains field @type with type URL.
+        },
+      ],
+    },
+  }</pre>
+</div>
+
+<div class="method">
+    <code class="details" id="syncrecognize">syncrecognize(body, x__xgafv=None)</code>
+  <pre>Perform synchronous speech-recognition: receive results after all audio
+has been sent and processed.
+
+Args:
+  body: object, The request body. (required)
+    The object takes the form of:
+
+{ # `SyncRecognizeRequest` is the top-level message sent by the client for
+      # the `SyncRecognize` method.
+    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # [Required] The audio data to be recognized.
+        # Either `content` or `uri` must be supplied. Supplying both or neither
+        # returns google.rpc.Code.INVALID_ARGUMENT. See
+        # [audio limits](https://cloud.google.com/speech/limits#content).
+      "content": "A String", # The audio data bytes encoded as specified in
+          # `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+          # pure binary representation, whereas JSON representations use base64.
+      "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
+          # `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
+          # supported, which must be specified in the following format:
+          # `gs://bucket_name/object_name` (other URI formats return
+          # google.rpc.Code.INVALID_ARGUMENT). For more information, see
+          # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+    },
+    "config": { # The `RecognitionConfig` message provides information to the recognizer # [Required] The `config` message provides information to the recognizer
+        # that specifies how to process the request.
+        # that specifies how to process the request.
+      "languageCode": "A String", # [Optional] The language of the supplied audio as a BCP-47 language tag.
+          # Example: "en-GB"  https://www.rfc-editor.org/rfc/bcp/bcp47.txt
+          # If omitted, defaults to "en-US". See
+          # [Language Support](https://cloud.google.com/speech/docs/best-practices#language_support)
+          # for a list of the currently supported language codes.
+      "speechContext": { # Provides "hints" to the speech recognizer to favor specific words and phrases # [Optional] A means to provide context to assist the speech recognition.
+          # in the results.
+        "phrases": [ # [Optional] A list of strings containing words and phrases "hints" so that
+            # the speech recognition is more likely to recognize them. This can be used
+            # to improve the accuracy for specific words and phrases, for example, if
+            # specific commands are typically spoken by the user. This can also be used
+            # to add additional words to the vocabulary of the recognizer. See
+            # [usage limits](https://cloud.google.com/speech/limits#content).
+          "A String",
+        ],
+      },
+      "encoding": "A String", # [Required] Encoding of audio data sent in all `RecognitionAudio` messages.
+      "maxAlternatives": 42, # [Optional] Maximum number of recognition hypotheses to be returned.
+          # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+          # within each `SpeechRecognitionResult`.
+          # The server may return fewer than `max_alternatives`.
+          # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
+          # `1`. If omitted, defaults to `1`.
+      "profanityFilter": True or False, # [Optional] If set to `true`, the server will attempt to filter out
+          # profanities, replacing all but the initial character in each filtered word
+          # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+          # won't be filtered out.
+      "sampleRate": 42, # [Required] Sample rate in Hertz of the audio data sent in all
+          # `RecognitionAudio` messages. Valid values are: 8000-48000.
+          # 16000 is optimal. For best results, set the sampling rate of the audio
+          # source to 16000 Hz. If that's not possible, use the native sample rate of
+          # the audio source (instead of re-sampling).
+    },
+  }
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # `SyncRecognizeResponse` is the only message returned to the client by
+      # `SyncRecognize`. It contains the result as zero or more sequential
+      # `SpeechRecognitionResult` messages.
+    "results": [ # [Output-only] Sequential list of transcription results corresponding to
+        # sequential portions of audio.
+      { # A speech recognition result corresponding to a portion of the audio.
+        "alternatives": [ # [Output-only] May contain one or more recognition hypotheses (up to the
+            # maximum specified in `max_alternatives`).
+          { # Alternative hypotheses (a.k.a. n-best list).
+            "confidence": 3.14, # [Output-only] The confidence estimate between 0.0 and 1.0. A higher number
+                # means the system is more confident that the recognition is correct.
+                # This field is typically provided only for the top hypothesis, and only for
+                # `is_final=true` results.
+                # The default of 0.0 is a sentinel value indicating confidence was not set.
+            "transcript": "A String", # [Output-only] Transcript text representing the words that the user spoke.
+          },
+        ],
+      },
+    ],
+  }</pre>
+</div>
+
+</body></html>
\ No newline at end of file