docs: update docs (#916)

* fix: re-run script

* test: fix noxfile
diff --git a/docs/dyn/dialogflow_v2.projects.agent.environments.users.sessions.html b/docs/dyn/dialogflow_v2.projects.agent.environments.users.sessions.html
index f99a5d8..fa9ae76 100644
--- a/docs/dyn/dialogflow_v2.projects.agent.environments.users.sessions.html
+++ b/docs/dyn/dialogflow_v2.projects.agent.environments.users.sessions.html
@@ -143,24 +143,195 @@
     The object takes the form of:
 
 { # The request to detect user's intent.
+    "outputAudioConfig": { # Instructs the speech synthesizer on how to generate the output audio content. # Instructs the speech synthesizer how to generate the output
+        # audio. If this field is not set and agent-level speech synthesizer is not
+        # configured, no output audio is generated.
+        # If this audio config is supplied in a request, it overrides all existing
+        # text-to-speech settings applied to the agent.
+      "audioEncoding": "A String", # Required. Audio encoding of the synthesized audio content.
+      "synthesizeSpeechConfig": { # Configuration of how speech should be synthesized. # Configuration of how speech should be synthesized.
+        "volumeGainDb": 3.14, # Optional. Volume gain (in dB) of the normal native volume supported by the
+            # specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
+            # 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
+            # will play at approximately half the amplitude of the normal native signal
+            # amplitude. A value of +6.0 (dB) will play at approximately twice the
+            # amplitude of the normal native signal amplitude. We strongly recommend not
+            # to exceed +10 (dB) as there's usually no effective increase in loudness for
+            # any value greater than that.
+        "pitch": 3.14, # Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
+            # semitones from the original pitch. -20 means decrease 20 semitones from the
+            # original pitch.
+        "voice": { # Description of which voice to use for speech synthesis. # Optional. The desired voice of the synthesized audio.
+          "name": "A String", # Optional. The name of the voice. If not set, the service will choose a
+              # voice based on the other parameters such as language_code and
+              # ssml_gender.
+          "ssmlGender": "A String", # Optional. The preferred gender of the voice. If not set, the service will
+              # choose a voice based on the other parameters such as language_code and
+              # name. Note that this is only a preference, not requirement. If a
+              # voice of the appropriate gender is not available, the synthesizer should
+              # substitute a voice with a different gender rather than failing the request.
+        },
+        "speakingRate": 3.14, # Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
+            # native speed supported by the specific voice. 2.0 is twice as fast, and
+            # 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
+            # other values < 0.25 or > 4.0 will return an error.
+        "effectsProfileId": [ # Optional. An identifier which selects 'audio effects' profiles that are
+            # applied on (post synthesized) text to speech. Effects are applied on top of
+            # each other in the order they are given.
+          "A String",
+        ],
+      },
+      "sampleRateHertz": 42, # The synthesis sample rate (in hertz) for this audio. If not
+          # provided, then the synthesizer will use the default sample rate based on
+          # the audio encoding. If this is different from the voice's natural sample
+          # rate, then the synthesizer will honor this request by converting to the
+          # desired sample rate (which might result in worse audio quality).
+    },
+    "inputAudio": "A String", # The natural language speech audio to be processed. This field
+        # should be populated iff `query_input` is set to an input audio config.
+        # A single request can contain up to 1 minute of speech audio data.
+    "outputAudioConfigMask": "A String", # Mask for output_audio_config indicating which settings in this
+        # request-level config should override speech synthesizer settings defined at
+        # agent-level.
+        # 
+        # If unspecified or empty, output_audio_config replaces the agent-level
+        # config in its entirety.
+    "queryInput": { # Represents the query input. It can contain either: # Required. The input specification. It can be set to:
+        # 
+        # 1.  an audio config
+        #     which instructs the speech recognizer how to process the speech audio,
+        # 
+        # 2.  a conversational query in the form of text, or
+        # 
+        # 3.  an event that specifies which intent to trigger.
+        #
+        # 1.  An audio config which
+        #     instructs the speech recognizer how to process the speech audio.
+        #
+        # 2.  A conversational query in the form of text,.
+        #
+        # 3.  An event that specifies which intent to trigger.
+      "event": { # Events allow for matching intents by event name instead of the natural # The event to be processed.
+          # language input. For instance, input `<event: { name: "welcome_event",
+          # parameters: { name: "Sam" } }>` can trigger a personalized welcome response.
+          # The parameter `name` may be used by the agent in the response:
+          # `"Hello #welcome_event.name! What can I do for you today?"`.
+        "languageCode": "A String", # Required. The language of this query. See [Language
+            # Support](https://cloud.google.com/dialogflow/docs/reference/language)
+            # for a list of the currently supported language codes. Note that queries in
+            # the same session do not necessarily need to specify the same language.
+        "name": "A String", # Required. The unique identifier of the event.
+        "parameters": { # The collection of parameters associated with the event.
+            #
+            # Depending on your protocol or client library language, this is a
+            # map, associative array, symbol table, dictionary, or JSON object
+            # composed of a collection of (MapKey, MapValue) pairs:
+            #
+            # -   MapKey type: string
+            # -   MapKey value: parameter name
+            # -   MapValue type:
+            #     -   If parameter's entity type is a composite entity: map
+            #     -   Else: string or number, depending on parameter value type
+            # -   MapValue value:
+            #     -   If parameter's entity type is a composite entity:
+            #         map from composite entity property names to property values
+            #     -   Else: parameter value
+          "a_key": "", # Properties of the object.
+        },
+      },
+      "text": { # Represents the natural language text to be processed. # The natural language text to be processed.
+        "languageCode": "A String", # Required. The language of this conversational query. See [Language
+            # Support](https://cloud.google.com/dialogflow/docs/reference/language)
+            # for a list of the currently supported language codes. Note that queries in
+            # the same session do not necessarily need to specify the same language.
+        "text": "A String", # Required. The UTF-8 encoded natural language text to be processed.
+            # Text length must not exceed 256 characters.
+      },
+      "audioConfig": { # Instructs the speech recognizer how to process the audio content. # Instructs the speech recognizer how to process the speech audio.
+        "audioEncoding": "A String", # Required. Audio encoding of the audio content to process.
+        "singleUtterance": True or False, # If `false` (default), recognition does not cease until the
+            # client closes the stream.
+            # If `true`, the recognizer will detect a single spoken utterance in input
+            # audio. Recognition ceases when it detects the audio's voice has
+            # stopped or paused. In this case, once a detected intent is received, the
+            # client should close the stream and start a new request with a new stream as
+            # needed.
+            # Note: This setting is relevant only for streaming methods.
+            # Note: When specified, InputAudioConfig.single_utterance takes precedence
+            # over StreamingDetectIntentRequest.single_utterance.
+        "languageCode": "A String", # Required. The language of the supplied audio. Dialogflow does not do
+            # translations. See [Language
+            # Support](https://cloud.google.com/dialogflow/docs/reference/language)
+            # for a list of the currently supported language codes. Note that queries in
+            # the same session do not necessarily need to specify the same language.
+        "speechContexts": [ # Context information to assist speech recognition.
+            #
+            # See [the Cloud Speech
+            # documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
+            # for more details.
+          { # Hints for the speech recognizer to help with recognition in a specific
+              # conversation state.
+            "phrases": [ # Optional. A list of strings containing words and phrases that the speech
+                # recognizer should recognize with higher likelihood.
+                #
+                # This list can be used to:
+                # * improve accuracy for words and phrases you expect the user to say,
+                #   e.g. typical commands for your Dialogflow agent
+                # * add additional words to the speech recognizer vocabulary
+                # * ...
+                #
+                # See the [Cloud Speech
+                # documentation](https://cloud.google.com/speech-to-text/quotas) for usage
+                # limits.
+              "A String",
+            ],
+            "boost": 3.14, # Optional. Boost for this context compared to other contexts:
+                #
+                # * If the boost is positive, Dialogflow will increase the probability that
+                #   the phrases in this context are recognized over similar sounding phrases.
+                # * If the boost is unspecified or non-positive, Dialogflow will not apply
+                #   any boost.
+                #
+                # Dialogflow recommends that you use boosts in the range (0, 20] and that you
+                # find a value that fits your use case with binary search.
+          },
+        ],
+        "phraseHints": [ # A list of strings containing words and phrases that the speech
+            # recognizer should recognize with higher likelihood.
+            #
+            # See [the Cloud Speech
+            # documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
+            # for more details.
+            #
+            # This field is deprecated. Please use [speech_contexts]() instead. If you
+            # specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
+            # treat the [phrase_hints]() as a single additional [SpeechContext]().
+          "A String",
+        ],
+        "enableWordInfo": True or False, # If `true`, Dialogflow returns SpeechWordInfo in
+            # StreamingRecognitionResult with information about the recognized speech
+            # words, e.g. start and end time offsets. If false or unspecified, Speech
+            # doesn't return any word-level information.
+        "sampleRateHertz": 42, # Required. Sample rate (in Hertz) of the audio content sent in the query.
+            # Refer to
+            # [Cloud Speech API
+            # documentation](https://cloud.google.com/speech-to-text/docs/basics) for
+            # more details.
+        "model": "A String", # Which Speech model to select for the given request. Select the
+            # model best suited to your domain to get best results. If a model is not
+            # explicitly specified, then we auto-select a model based on the parameters
+            # in the InputAudioConfig.
+            # If enhanced speech model is enabled for the agent and an enhanced
+            # version of the specified model for the language does not exist, then the
+            # speech is recognized using the standard version of the specified model.
+            # Refer to
+            # [Cloud Speech API
+            # documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
+            # for more details.
+        "modelVariant": "A String", # Which variant of the Speech model to use.
+      },
+    },
     "queryParams": { # Represents the parameters of the conversational query. # The parameters of this query.
-      "payload": { # This field can be used to pass custom data to your webhook.
-          # Arbitrary JSON objects are supported.
-          # If supplied, the value is used to populate the
-          # `WebhookRequest.original_detect_intent_request.payload`
-          # field sent to your webhook.
-        "a_key": "", # Properties of the object.
-      },
-      "geoLocation": { # An object representing a latitude/longitude pair. This is expressed as a pair # The geo location of this conversational query.
-          # of doubles representing degrees latitude and degrees longitude. Unless
-          # specified otherwise, this must conform to the
-          # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-          # standard</a>. Values must be within normalized ranges.
-        "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-        "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-      },
-      "resetContexts": True or False, # Specifies whether to delete all contexts in the current session
-          # before the new ones are activated.
       "contexts": [ # The collection of contexts to be activated before this query is
           # executed.
         { # Represents a context.
@@ -264,193 +435,22 @@
           ],
         },
       ],
-    },
-    "outputAudioConfig": { # Instructs the speech synthesizer on how to generate the output audio content. # Instructs the speech synthesizer how to generate the output
-        # audio. If this field is not set and agent-level speech synthesizer is not
-        # configured, no output audio is generated.
-        # If this audio config is supplied in a request, it overrides all existing
-        # text-to-speech settings applied to the agent.
-      "sampleRateHertz": 42, # The synthesis sample rate (in hertz) for this audio. If not
-          # provided, then the synthesizer will use the default sample rate based on
-          # the audio encoding. If this is different from the voice's natural sample
-          # rate, then the synthesizer will honor this request by converting to the
-          # desired sample rate (which might result in worse audio quality).
-      "audioEncoding": "A String", # Required. Audio encoding of the synthesized audio content.
-      "synthesizeSpeechConfig": { # Configuration of how speech should be synthesized. # Configuration of how speech should be synthesized.
-        "effectsProfileId": [ # Optional. An identifier which selects 'audio effects' profiles that are
-            # applied on (post synthesized) text to speech. Effects are applied on top of
-            # each other in the order they are given.
-          "A String",
-        ],
-        "volumeGainDb": 3.14, # Optional. Volume gain (in dB) of the normal native volume supported by the
-            # specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
-            # 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
-            # will play at approximately half the amplitude of the normal native signal
-            # amplitude. A value of +6.0 (dB) will play at approximately twice the
-            # amplitude of the normal native signal amplitude. We strongly recommend not
-            # to exceed +10 (dB) as there's usually no effective increase in loudness for
-            # any value greater than that.
-        "pitch": 3.14, # Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
-            # semitones from the original pitch. -20 means decrease 20 semitones from the
-            # original pitch.
-        "voice": { # Description of which voice to use for speech synthesis. # Optional. The desired voice of the synthesized audio.
-          "name": "A String", # Optional. The name of the voice. If not set, the service will choose a
-              # voice based on the other parameters such as language_code and
-              # ssml_gender.
-          "ssmlGender": "A String", # Optional. The preferred gender of the voice. If not set, the service will
-              # choose a voice based on the other parameters such as language_code and
-              # name. Note that this is only a preference, not requirement. If a
-              # voice of the appropriate gender is not available, the synthesizer should
-              # substitute a voice with a different gender rather than failing the request.
-        },
-        "speakingRate": 3.14, # Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
-            # native speed supported by the specific voice. 2.0 is twice as fast, and
-            # 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
-            # other values < 0.25 or > 4.0 will return an error.
+      "resetContexts": True or False, # Specifies whether to delete all contexts in the current session
+          # before the new ones are activated.
+      "geoLocation": { # An object representing a latitude/longitude pair. This is expressed as a pair # The geo location of this conversational query.
+          # of doubles representing degrees latitude and degrees longitude. Unless
+          # specified otherwise, this must conform to the
+          # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+          # standard</a>. Values must be within normalized ranges.
+        "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+        "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
       },
-    },
-    "inputAudio": "A String", # The natural language speech audio to be processed. This field
-        # should be populated iff `query_input` is set to an input audio config.
-        # A single request can contain up to 1 minute of speech audio data.
-    "outputAudioConfigMask": "A String", # Mask for output_audio_config indicating which settings in this
-        # request-level config should override speech synthesizer settings defined at
-        # agent-level.
-        # 
-        # If unspecified or empty, output_audio_config replaces the agent-level
-        # config in its entirety.
-    "queryInput": { # Represents the query input. It can contain either: # Required. The input specification. It can be set to:
-        # 
-        # 1.  an audio config
-        #     which instructs the speech recognizer how to process the speech audio,
-        # 
-        # 2.  a conversational query in the form of text, or
-        # 
-        # 3.  an event that specifies which intent to trigger.
-        #
-        # 1.  An audio config which
-        #     instructs the speech recognizer how to process the speech audio.
-        #
-        # 2.  A conversational query in the form of text,.
-        #
-        # 3.  An event that specifies which intent to trigger.
-      "audioConfig": { # Instructs the speech recognizer how to process the audio content. # Instructs the speech recognizer how to process the speech audio.
-        "singleUtterance": True or False, # If `false` (default), recognition does not cease until the
-            # client closes the stream.
-            # If `true`, the recognizer will detect a single spoken utterance in input
-            # audio. Recognition ceases when it detects the audio's voice has
-            # stopped or paused. In this case, once a detected intent is received, the
-            # client should close the stream and start a new request with a new stream as
-            # needed.
-            # Note: This setting is relevant only for streaming methods.
-            # Note: When specified, InputAudioConfig.single_utterance takes precedence
-            # over StreamingDetectIntentRequest.single_utterance.
-        "languageCode": "A String", # Required. The language of the supplied audio. Dialogflow does not do
-            # translations. See [Language
-            # Support](https://cloud.google.com/dialogflow/docs/reference/language)
-            # for a list of the currently supported language codes. Note that queries in
-            # the same session do not necessarily need to specify the same language.
-        "phraseHints": [ # A list of strings containing words and phrases that the speech
-            # recognizer should recognize with higher likelihood.
-            #
-            # See [the Cloud Speech
-            # documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
-            # for more details.
-            #
-            # This field is deprecated. Please use [speech_contexts]() instead. If you
-            # specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
-            # treat the [phrase_hints]() as a single additional [SpeechContext]().
-          "A String",
-        ],
-        "speechContexts": [ # Context information to assist speech recognition.
-            #
-            # See [the Cloud Speech
-            # documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
-            # for more details.
-          { # Hints for the speech recognizer to help with recognition in a specific
-              # conversation state.
-            "boost": 3.14, # Optional. Boost for this context compared to other contexts:
-                #
-                # * If the boost is positive, Dialogflow will increase the probability that
-                #   the phrases in this context are recognized over similar sounding phrases.
-                # * If the boost is unspecified or non-positive, Dialogflow will not apply
-                #   any boost.
-                #
-                # Dialogflow recommends that you use boosts in the range (0, 20] and that you
-                # find a value that fits your use case with binary search.
-            "phrases": [ # Optional. A list of strings containing words and phrases that the speech
-                # recognizer should recognize with higher likelihood.
-                #
-                # This list can be used to:
-                # * improve accuracy for words and phrases you expect the user to say,
-                #   e.g. typical commands for your Dialogflow agent
-                # * add additional words to the speech recognizer vocabulary
-                # * ...
-                #
-                # See the [Cloud Speech
-                # documentation](https://cloud.google.com/speech-to-text/quotas) for usage
-                # limits.
-              "A String",
-            ],
-          },
-        ],
-        "enableWordInfo": True or False, # If `true`, Dialogflow returns SpeechWordInfo in
-            # StreamingRecognitionResult with information about the recognized speech
-            # words, e.g. start and end time offsets. If false or unspecified, Speech
-            # doesn't return any word-level information.
-        "model": "A String", # Which Speech model to select for the given request. Select the
-            # model best suited to your domain to get best results. If a model is not
-            # explicitly specified, then we auto-select a model based on the parameters
-            # in the InputAudioConfig.
-            # If enhanced speech model is enabled for the agent and an enhanced
-            # version of the specified model for the language does not exist, then the
-            # speech is recognized using the standard version of the specified model.
-            # Refer to
-            # [Cloud Speech API
-            # documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
-            # for more details.
-        "sampleRateHertz": 42, # Required. Sample rate (in Hertz) of the audio content sent in the query.
-            # Refer to
-            # [Cloud Speech API
-            # documentation](https://cloud.google.com/speech-to-text/docs/basics) for
-            # more details.
-        "modelVariant": "A String", # Which variant of the Speech model to use.
-        "audioEncoding": "A String", # Required. Audio encoding of the audio content to process.
-      },
-      "event": { # Events allow for matching intents by event name instead of the natural # The event to be processed.
-          # language input. For instance, input `<event: { name: "welcome_event",
-          # parameters: { name: "Sam" } }>` can trigger a personalized welcome response.
-          # The parameter `name` may be used by the agent in the response:
-          # `"Hello #welcome_event.name! What can I do for you today?"`.
-        "languageCode": "A String", # Required. The language of this query. See [Language
-            # Support](https://cloud.google.com/dialogflow/docs/reference/language)
-            # for a list of the currently supported language codes. Note that queries in
-            # the same session do not necessarily need to specify the same language.
-        "name": "A String", # Required. The unique identifier of the event.
-        "parameters": { # The collection of parameters associated with the event.
-            #
-            # Depending on your protocol or client library language, this is a
-            # map, associative array, symbol table, dictionary, or JSON object
-            # composed of a collection of (MapKey, MapValue) pairs:
-            #
-            # -   MapKey type: string
-            # -   MapKey value: parameter name
-            # -   MapValue type:
-            #     -   If parameter's entity type is a composite entity: map
-            #     -   Else: string or number, depending on parameter value type
-            # -   MapValue value:
-            #     -   If parameter's entity type is a composite entity:
-            #         map from composite entity property names to property values
-            #     -   Else: parameter value
-          "a_key": "", # Properties of the object.
-        },
-      },
-      "text": { # Represents the natural language text to be processed. # The natural language text to be processed.
-        "text": "A String", # Required. The UTF-8 encoded natural language text to be processed.
-            # Text length must not exceed 256 characters.
-        "languageCode": "A String", # Required. The language of this conversational query. See [Language
-            # Support](https://cloud.google.com/dialogflow/docs/reference/language)
-            # for a list of the currently supported language codes. Note that queries in
-            # the same session do not necessarily need to specify the same language.
+      "payload": { # This field can be used to pass custom data to your webhook.
+          # Arbitrary JSON objects are supported.
+          # If supplied, the value is used to populate the
+          # `WebhookRequest.original_detect_intent_request.payload`
+          # field sent to your webhook.
+        "a_key": "", # Properties of the object.
       },
     },
   }
@@ -467,18 +467,8 @@
     "outputAudioConfig": { # Instructs the speech synthesizer on how to generate the output audio content. # The config used by the speech synthesizer to generate the output audio.
         # If this audio config is supplied in a request, it overrides all existing
         # text-to-speech settings applied to the agent.
-      "sampleRateHertz": 42, # The synthesis sample rate (in hertz) for this audio. If not
-          # provided, then the synthesizer will use the default sample rate based on
-          # the audio encoding. If this is different from the voice's natural sample
-          # rate, then the synthesizer will honor this request by converting to the
-          # desired sample rate (which might result in worse audio quality).
       "audioEncoding": "A String", # Required. Audio encoding of the synthesized audio content.
       "synthesizeSpeechConfig": { # Configuration of how speech should be synthesized. # Configuration of how speech should be synthesized.
-        "effectsProfileId": [ # Optional. An identifier which selects 'audio effects' profiles that are
-            # applied on (post synthesized) text to speech. Effects are applied on top of
-            # each other in the order they are given.
-          "A String",
-        ],
         "volumeGainDb": 3.14, # Optional. Volume gain (in dB) of the normal native volume supported by the
             # specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
             # 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
@@ -504,10 +494,238 @@
             # native speed supported by the specific voice. 2.0 is twice as fast, and
             # 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
             # other values < 0.25 or > 4.0 will return an error.
+        "effectsProfileId": [ # Optional. An identifier which selects 'audio effects' profiles that are
+            # applied on (post synthesized) text to speech. Effects are applied on top of
+            # each other in the order they are given.
+          "A String",
+        ],
       },
+      "sampleRateHertz": 42, # The synthesis sample rate (in hertz) for this audio. If not
+          # provided, then the synthesizer will use the default sample rate based on
+          # the audio encoding. If this is different from the voice's natural sample
+          # rate, then the synthesizer will honor this request by converting to the
+          # desired sample rate (which might result in worse audio quality).
     },
     "queryResult": { # Represents the result of conversational query or event processing. # The selected results of the conversational query or event processing.
         # See `alternative_query_results` for additional potential results.
+      "fulfillmentMessages": [ # The collection of rich messages to present to the user.
+        { # A rich response message.
+            # Corresponds to the intent `Response` field in the Dialogflow console.
+            # For more information, see
+            # [Rich response
+            # messages](https://cloud.google.com/dialogflow/docs/intents-rich-messages).
+          "mediaContent": { # The media content card for Actions on Google. # The media content card for Actions on Google.
+            "mediaType": "A String", # Optional. What type of media is the content (ie "audio").
+            "mediaObjects": [ # Required. List of media objects.
+              { # Response media object for media content card.
+                "name": "A String", # Required. Name of media card.
+                "description": "A String", # Optional. Description of media card.
+                "contentUrl": "A String", # Required. Url where the media is stored.
+                "icon": { # The image response message. # Optional. Icon to display above media content.
+                  "imageUri": "A String", # Optional. The public URI to an image file.
+                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                      # e.g., screen readers.
+                },
+                "largeImage": { # The image response message. # Optional. Image to display above media content.
+                  "imageUri": "A String", # Optional. The public URI to an image file.
+                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                      # e.g., screen readers.
+                },
+              },
+            ],
+          },
+          "image": { # The image response message. # The image response.
+            "imageUri": "A String", # Optional. The public URI to an image file.
+            "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                # e.g., screen readers.
+          },
+          "payload": { # A custom platform-specific response.
+            "a_key": "", # Properties of the object.
+          },
+          "text": { # The text response message. # The text response.
+            "text": [ # Optional. The collection of the agent's responses.
+              "A String",
+            ],
+          },
+          "platform": "A String", # Optional. The platform that this message is intended for.
+          "suggestions": { # The collection of suggestions. # The suggestion chips for Actions on Google.
+            "suggestions": [ # Required. The list of suggested replies.
+              { # The suggestion chip message that the user can tap to quickly post a reply
+                  # to the conversation.
+                "title": "A String", # Required. The text shown the in the suggestion chip.
+              },
+            ],
+          },
+          "listSelect": { # The card for presenting a list of options to select from. # The list card response for Actions on Google.
+            "subtitle": "A String", # Optional. Subtitle of the list.
+            "items": [ # Required. List items.
+              { # An item in the list.
+                "title": "A String", # Required. The title of the list item.
+                "image": { # The image response message. # Optional. The image to display.
+                  "imageUri": "A String", # Optional. The public URI to an image file.
+                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                      # e.g., screen readers.
+                },
+                "description": "A String", # Optional. The main text describing the item.
+                "info": { # Additional info about the select item for when it is triggered in a # Required. Additional information about this option.
+                    # dialog.
+                  "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
+                      # item in dialog.
+                    "A String",
+                  ],
+                  "key": "A String", # Required. A unique key that will be sent back to the agent if this
+                      # response is given.
+                },
+              },
+            ],
+            "title": "A String", # Optional. The overall title of the list.
+          },
+          "quickReplies": { # The quick replies response message. # The quick replies response.
+            "title": "A String", # Optional. The title of the collection of quick replies.
+            "quickReplies": [ # Optional. The collection of quick replies.
+              "A String",
+            ],
+          },
+          "card": { # The card response message. # The card response.
+            "imageUri": "A String", # Optional. The public URI to an image file for the card.
+            "title": "A String", # Optional. The title of the card.
+            "buttons": [ # Optional. The collection of card buttons.
+              { # Contains information about a button.
+                "text": "A String", # Optional. The text to show on the button.
+                "postback": "A String", # Optional. The text to send back to the Dialogflow API or a URI to
+                    # open.
+              },
+            ],
+            "subtitle": "A String", # Optional. The subtitle of the card.
+          },
+          "basicCard": { # The basic card message. Useful for displaying information. # The basic card response for Actions on Google.
+            "title": "A String", # Optional. The title of the card.
+            "image": { # The image response message. # Optional. The image for the card.
+              "imageUri": "A String", # Optional. The public URI to an image file.
+              "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                  # e.g., screen readers.
+            },
+            "formattedText": "A String", # Required, unless image is present. The body text of the card.
+            "buttons": [ # Optional. The collection of card buttons.
+              { # The button object that appears at the bottom of a card.
+                "title": "A String", # Required. The title of the button.
+                "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
+                  "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
+                },
+              },
+            ],
+            "subtitle": "A String", # Optional. The subtitle of the card.
+          },
+          "tableCard": { # Table card for Actions on Google. # Table card for Actions on Google.
+            "title": "A String", # Required. Title of the card.
+            "rows": [ # Optional. Rows in this table of data.
+              { # Row of TableCard.
+                "dividerAfter": True or False, # Optional. Whether to add a visual divider after this row.
+                "cells": [ # Optional. List of cells that make up this row.
+                  { # Cell of TableCardRow.
+                    "text": "A String", # Required. Text in this cell.
+                  },
+                ],
+              },
+            ],
+            "subtitle": "A String", # Optional. Subtitle to the title.
+            "columnProperties": [ # Optional. Display properties for the columns in this table.
+              { # Column properties for TableCard.
+                "header": "A String", # Required. Column heading.
+                "horizontalAlignment": "A String", # Optional. Defines text alignment for all cells in this column.
+              },
+            ],
+            "image": { # The image response message. # Optional. Image which should be displayed on the card.
+              "imageUri": "A String", # Optional. The public URI to an image file.
+              "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                  # e.g., screen readers.
+            },
+            "buttons": [ # Optional. List of buttons for the card.
+              { # The button object that appears at the bottom of a card.
+                "title": "A String", # Required. The title of the button.
+                "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
+                  "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
+                },
+              },
+            ],
+          },
+          "carouselSelect": { # The card for presenting a carousel of options to select from. # The carousel card response for Actions on Google.
+            "items": [ # Required. Carousel items.
+              { # An item in the carousel.
+                "description": "A String", # Optional. The body text of the card.
+                "info": { # Additional info about the select item for when it is triggered in a # Required. Additional info about the option item.
+                    # dialog.
+                  "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
+                      # item in dialog.
+                    "A String",
+                  ],
+                  "key": "A String", # Required. A unique key that will be sent back to the agent if this
+                      # response is given.
+                },
+                "title": "A String", # Required. Title of the carousel item.
+                "image": { # The image response message. # Optional. The image to display.
+                  "imageUri": "A String", # Optional. The public URI to an image file.
+                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                      # e.g., screen readers.
+                },
+              },
+            ],
+          },
+          "linkOutSuggestion": { # The suggestion chip message that allows the user to jump out to the app # The link out suggestion chip for Actions on Google.
+              # or website associated with this agent.
+            "destinationName": "A String", # Required. The name of the app or site this chip is linking to.
+            "uri": "A String", # Required. The URI of the app or site to open when the user taps the
+                # suggestion chip.
+          },
+          "browseCarouselCard": { # Browse Carousel Card for Actions on Google. # Browse carousel card for Actions on Google.
+              # https://developers.google.com/actions/assistant/responses#browsing_carousel
+            "items": [ # Required. List of items in the Browse Carousel Card. Minimum of two
+                # items, maximum of ten.
+              { # Browsing carousel tile
+                "openUriAction": { # Actions on Google action to open a given url. # Required. Action to present to the user.
+                  "urlTypeHint": "A String", # Optional. Specifies the type of viewer that is used when opening
+                      # the URL. Defaults to opening via web browser.
+                  "url": "A String", # Required. URL
+                },
+                "footer": "A String", # Optional. Text that appears at the bottom of the Browse Carousel
+                    # Card. Maximum of one line of text.
+                "title": "A String", # Required. Title of the carousel item. Maximum of two lines of text.
+                "image": { # The image response message. # Optional. Hero image for the carousel item.
+                  "imageUri": "A String", # Optional. The public URI to an image file.
+                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                      # e.g., screen readers.
+                },
+                "description": "A String", # Optional. Description of the carousel item. Maximum of four lines of
+                    # text.
+              },
+            ],
+            "imageDisplayOptions": "A String", # Optional. Settings for displaying the image. Applies to every image in
+                # items.
+          },
+          "simpleResponses": { # The collection of simple response candidates. # The voice and text-only responses for Actions on Google.
+              # This message in `QueryResult.fulfillment_messages` and
+              # `WebhookResponse.fulfillment_messages` should contain only one
+              # `SimpleResponse`.
+            "simpleResponses": [ # Required. The list of simple responses.
+              { # The simple response message containing speech or text.
+                "textToSpeech": "A String", # One of text_to_speech or ssml must be provided. The plain text of the
+                    # speech output. Mutually exclusive with ssml.
+                "ssml": "A String", # One of text_to_speech or ssml must be provided. Structured spoken
+                    # response to the user in the SSML format. Mutually exclusive with
+                    # text_to_speech.
+                "displayText": "A String", # Optional. The text to display.
+              },
+            ],
+          },
+        },
+      ],
+      "webhookPayload": { # If the query was fulfilled by a webhook call, this field is set to the
+          # value of the `payload` field returned in the webhook response.
+        "a_key": "", # Properties of the object.
+      },
+      "action": "A String", # The action name from the matched intent.
+      "webhookSource": "A String", # If the query was fulfilled by a webhook call, this field is set to the
+          # value of the `source` field returned in the webhook response.
       "fulfillmentText": "A String", # The text to be pronounced to the user or shown on the screen.
           # Note: This is a legacy field, `fulfillment_messages` should be preferred.
       "parameters": { # The collection of extracted parameters.
@@ -656,6 +874,12 @@
             # copied from the messages in PLATFORM_UNSPECIFIED (i.e. default platform).
           "A String",
         ],
+        "action": "A String", # Optional. The name of the action associated with the intent.
+            # Note: The action name must not contain whitespaces.
+        "name": "A String", # Optional. The unique identifier of this intent.
+            # Required for Intents.UpdateIntent and Intents.BatchUpdateIntents
+            # methods.
+            # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
         "messages": [ # Optional. The collection of rich messages corresponding to the
             # `Response` field in the Dialogflow console.
           { # A rich response message.
@@ -663,8 +887,49 @@
               # For more information, see
               # [Rich response
               # messages](https://cloud.google.com/dialogflow/docs/intents-rich-messages).
+            "mediaContent": { # The media content card for Actions on Google. # The media content card for Actions on Google.
+              "mediaType": "A String", # Optional. What type of media is the content (ie "audio").
+              "mediaObjects": [ # Required. List of media objects.
+                { # Response media object for media content card.
+                  "name": "A String", # Required. Name of media card.
+                  "description": "A String", # Optional. Description of media card.
+                  "contentUrl": "A String", # Required. Url where the media is stored.
+                  "icon": { # The image response message. # Optional. Icon to display above media content.
+                    "imageUri": "A String", # Optional. The public URI to an image file.
+                    "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                        # e.g., screen readers.
+                  },
+                  "largeImage": { # The image response message. # Optional. Image to display above media content.
+                    "imageUri": "A String", # Optional. The public URI to an image file.
+                    "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                        # e.g., screen readers.
+                  },
+                },
+              ],
+            },
+            "image": { # The image response message. # The image response.
+              "imageUri": "A String", # Optional. The public URI to an image file.
+              "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                  # e.g., screen readers.
+            },
+            "payload": { # A custom platform-specific response.
+              "a_key": "", # Properties of the object.
+            },
+            "text": { # The text response message. # The text response.
+              "text": [ # Optional. The collection of the agent's responses.
+                "A String",
+              ],
+            },
+            "platform": "A String", # Optional. The platform that this message is intended for.
+            "suggestions": { # The collection of suggestions. # The suggestion chips for Actions on Google.
+              "suggestions": [ # Required. The list of suggested replies.
+                { # The suggestion chip message that the user can tap to quickly post a reply
+                    # to the conversation.
+                  "title": "A String", # Required. The text shown the in the suggestion chip.
+                },
+              ],
+            },
             "listSelect": { # The card for presenting a list of options to select from. # The list card response for Actions on Google.
-              "title": "A String", # Optional. The overall title of the list.
               "subtitle": "A String", # Optional. Subtitle of the list.
               "items": [ # Required. List items.
                 { # An item in the list.
@@ -686,6 +951,7 @@
                   },
                 },
               ],
+              "title": "A String", # Optional. The overall title of the list.
             },
             "quickReplies": { # The quick replies response message. # The quick replies response.
               "title": "A String", # Optional. The title of the collection of quick replies.
@@ -694,6 +960,7 @@
               ],
             },
             "card": { # The card response message. # The card response.
+              "imageUri": "A String", # Optional. The public URI to an image file for the card.
               "title": "A String", # Optional. The title of the card.
               "buttons": [ # Optional. The collection of card buttons.
                 { # Contains information about a button.
@@ -703,7 +970,6 @@
                 },
               ],
               "subtitle": "A String", # Optional. The subtitle of the card.
-              "imageUri": "A String", # Optional. The public URI to an image file for the card.
             },
             "basicCard": { # The basic card message. Useful for displaying information. # The basic card response for Actions on Google.
               "title": "A String", # Optional. The title of the card.
@@ -715,10 +981,10 @@
               "formattedText": "A String", # Required, unless image is present. The body text of the card.
               "buttons": [ # Optional. The collection of card buttons.
                 { # The button object that appears at the bottom of a card.
+                  "title": "A String", # Required. The title of the button.
                   "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
                     "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
                   },
-                  "title": "A String", # Required. The title of the button.
                 },
               ],
               "subtitle": "A String", # Optional. The subtitle of the card.
@@ -749,22 +1015,16 @@
               },
               "buttons": [ # Optional. List of buttons for the card.
                 { # The button object that appears at the bottom of a card.
+                  "title": "A String", # Required. The title of the button.
                   "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
                     "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
                   },
-                  "title": "A String", # Required. The title of the button.
                 },
               ],
             },
             "carouselSelect": { # The card for presenting a carousel of options to select from. # The carousel card response for Actions on Google.
               "items": [ # Required. Carousel items.
                 { # An item in the carousel.
-                  "title": "A String", # Required. Title of the carousel item.
-                  "image": { # The image response message. # Optional. The image to display.
-                    "imageUri": "A String", # Optional. The public URI to an image file.
-                    "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                        # e.g., screen readers.
-                  },
                   "description": "A String", # Optional. The body text of the card.
                   "info": { # Additional info about the select item for when it is triggered in a # Required. Additional info about the option item.
                       # dialog.
@@ -775,6 +1035,12 @@
                     "key": "A String", # Required. A unique key that will be sent back to the agent if this
                         # response is given.
                   },
+                  "title": "A String", # Required. Title of the carousel item.
+                  "image": { # The image response message. # Optional. The image to display.
+                    "imageUri": "A String", # Optional. The public URI to an image file.
+                    "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
+                        # e.g., screen readers.
+                  },
                 },
               ],
             },
@@ -789,8 +1055,6 @@
               "items": [ # Required. List of items in the Browse Carousel Card. Minimum of two
                   # items, maximum of ten.
                 { # Browsing carousel tile
-                  "description": "A String", # Optional. Description of the carousel item. Maximum of four lines of
-                      # text.
                   "openUriAction": { # Actions on Google action to open a given url. # Required. Action to present to the user.
                     "urlTypeHint": "A String", # Optional. Specifies the type of viewer that is used when opening
                         # the URL. Defaults to opening via web browser.
@@ -804,6 +1068,8 @@
                     "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
                         # e.g., screen readers.
                   },
+                  "description": "A String", # Optional. Description of the carousel item. Maximum of four lines of
+                      # text.
                 },
               ],
               "imageDisplayOptions": "A String", # Optional. Settings for displaying the image. Applies to every image in
@@ -815,71 +1081,23 @@
                 # `SimpleResponse`.
               "simpleResponses": [ # Required. The list of simple responses.
                 { # The simple response message containing speech or text.
-                  "displayText": "A String", # Optional. The text to display.
                   "textToSpeech": "A String", # One of text_to_speech or ssml must be provided. The plain text of the
                       # speech output. Mutually exclusive with ssml.
                   "ssml": "A String", # One of text_to_speech or ssml must be provided. Structured spoken
                       # response to the user in the SSML format. Mutually exclusive with
                       # text_to_speech.
+                  "displayText": "A String", # Optional. The text to display.
                 },
               ],
             },
-            "mediaContent": { # The media content card for Actions on Google. # The media content card for Actions on Google.
-              "mediaType": "A String", # Optional. What type of media is the content (ie "audio").
-              "mediaObjects": [ # Required. List of media objects.
-                { # Response media object for media content card.
-                  "icon": { # The image response message. # Optional. Icon to display above media content.
-                    "imageUri": "A String", # Optional. The public URI to an image file.
-                    "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                        # e.g., screen readers.
-                  },
-                  "largeImage": { # The image response message. # Optional. Image to display above media content.
-                    "imageUri": "A String", # Optional. The public URI to an image file.
-                    "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                        # e.g., screen readers.
-                  },
-                  "name": "A String", # Required. Name of media card.
-                  "description": "A String", # Optional. Description of media card.
-                  "contentUrl": "A String", # Required. Url where the media is stored.
-                },
-              ],
-            },
-            "image": { # The image response message. # The image response.
-              "imageUri": "A String", # Optional. The public URI to an image file.
-              "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                  # e.g., screen readers.
-            },
-            "payload": { # A custom platform-specific response.
-              "a_key": "", # Properties of the object.
-            },
-            "text": { # The text response message. # The text response.
-              "text": [ # Optional. The collection of the agent's responses.
-                "A String",
-              ],
-            },
-            "suggestions": { # The collection of suggestions. # The suggestion chips for Actions on Google.
-              "suggestions": [ # Required. The list of suggested replies.
-                { # The suggestion chip message that the user can tap to quickly post a reply
-                    # to the conversation.
-                  "title": "A String", # Required. The text shown the in the suggestion chip.
-                },
-              ],
-            },
-            "platform": "A String", # Optional. The platform that this message is intended for.
           },
         ],
-        "name": "A String", # Optional. The unique identifier of this intent.
-            # Required for Intents.UpdateIntent and Intents.BatchUpdateIntents
-            # methods.
-            # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
-        "action": "A String", # Optional. The name of the action associated with the intent.
-            # Note: The action name must not contain whitespaces.
+        "webhookState": "A String", # Optional. Indicates whether webhooks are enabled for the intent.
         "inputContextNames": [ # Optional. The list of context names required for this intent to be
             # triggered.
             # Format: `projects/<Project ID>/agent/sessions/-/contexts/<Context ID>`.
           "A String",
         ],
-        "webhookState": "A String", # Optional. Indicates whether webhooks are enabled for the intent.
         "followupIntentInfo": [ # Read-only. Information about all followup intents that have this intent as
             # a direct or indirect parent. We populate this field only in the output.
           { # Represents a single followup intent in the chain.
@@ -889,12 +1107,12 @@
                 # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
           },
         ],
-        "displayName": "A String", # Required. The name of this intent.
         "rootFollowupIntentName": "A String", # Read-only. The unique identifier of the root intent in the chain of
             # followup intents. It identifies the correct followup intents chain for
             # this intent. We populate this field only in the output.
             #
             # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
+        "displayName": "A String", # Required. The name of this intent.
         "mlDisabled": True or False, # Optional. Indicates whether Machine Learning is disabled for the intent.
             # Note: If `ml_disabled` setting is set to true, then this intent is not
             # taken into account during inference in `ML ONLY` match mode. Also,
@@ -903,6 +1121,11 @@
         "trainingPhrases": [ # Optional. The collection of examples that the agent is
             # trained on.
           { # Represents an example that the agent is trained on.
+            "name": "A String", # Output only. The unique identifier of this training phrase.
+            "timesAddedCount": 42, # Optional. Indicates how many times this example was added to
+                # the intent. Each time a developer adds an existing sample by editing an
+                # intent or training, this counter is increased.
+            "type": "A String", # Required. The type of the training phrase.
             "parts": [ # Required. The ordered list of training phrase parts.
                 # The parts are concatenated in order to form the training phrase.
                 #
@@ -923,6 +1146,9 @@
                 #     and the `entity_type`, `alias`, and `user_defined` fields are all
                 #     set.
               { # Represents a part of a training phrase.
+                "text": "A String", # Required. The text for this part.
+                "entityType": "A String", # Optional. The entity type name prefixed with `@`.
+                    # This field is required for annotated parts of the training phrase.
                 "alias": "A String", # Optional. The parameter name for the value extracted from the
                     # annotated part of the example.
                     # This field is required for annotated parts of the training phrase.
@@ -930,31 +1156,14 @@
                     # This field is set to true when the Dialogflow Console is used to
                     # manually annotate the part. When creating an annotated part with the
                     # API, you must set this to true.
-                "text": "A String", # Required. The text for this part.
-                "entityType": "A String", # Optional. The entity type name prefixed with `@`.
-                    # This field is required for annotated parts of the training phrase.
               },
             ],
-            "name": "A String", # Output only. The unique identifier of this training phrase.
-            "timesAddedCount": 42, # Optional. Indicates how many times this example was added to
-                # the intent. Each time a developer adds an existing sample by editing an
-                # intent or training, this counter is increased.
-            "type": "A String", # Required. The type of the training phrase.
           },
         ],
         "resetContexts": True or False, # Optional. Indicates whether to delete all contexts in the current
             # session when this intent is matched.
         "parameters": [ # Optional. The collection of parameters associated with the intent.
           { # Represents intent parameters.
-            "mandatory": True or False, # Optional. Indicates whether the parameter is required. That is,
-                # whether the intent cannot be completed without collecting the parameter
-                # value.
-            "defaultValue": "A String", # Optional. The default value to use when the `value` yields an empty
-                # result.
-                # Default values can be extracted from contexts by using the following
-                # syntax: `#context_name.parameter_name`.
-            "name": "A String", # The unique identifier of this parameter.
-            "isList": True or False, # Optional. Indicates whether the parameter represents a list of values.
             "value": "A String", # Optional. The definition of the parameter value. It can be:
                 #
                 # - a constant string,
@@ -970,6 +1179,15 @@
                 # user in order to collect a value for the parameter.
               "A String",
             ],
+            "mandatory": True or False, # Optional. Indicates whether the parameter is required. That is,
+                # whether the intent cannot be completed without collecting the parameter
+                # value.
+            "defaultValue": "A String", # Optional. The default value to use when the `value` yields an empty
+                # result.
+                # Default values can be extracted from contexts by using the following
+                # syntax: `#context_name.parameter_name`.
+            "name": "A String", # The unique identifier of this parameter.
+            "isList": True or False, # Optional. Indicates whether the parameter represents a list of values.
           },
         ],
       },
@@ -1022,224 +1240,6 @@
           },
         },
       ],
-      "fulfillmentMessages": [ # The collection of rich messages to present to the user.
-        { # A rich response message.
-            # Corresponds to the intent `Response` field in the Dialogflow console.
-            # For more information, see
-            # [Rich response
-            # messages](https://cloud.google.com/dialogflow/docs/intents-rich-messages).
-          "listSelect": { # The card for presenting a list of options to select from. # The list card response for Actions on Google.
-            "title": "A String", # Optional. The overall title of the list.
-            "subtitle": "A String", # Optional. Subtitle of the list.
-            "items": [ # Required. List items.
-              { # An item in the list.
-                "title": "A String", # Required. The title of the list item.
-                "image": { # The image response message. # Optional. The image to display.
-                  "imageUri": "A String", # Optional. The public URI to an image file.
-                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                      # e.g., screen readers.
-                },
-                "description": "A String", # Optional. The main text describing the item.
-                "info": { # Additional info about the select item for when it is triggered in a # Required. Additional information about this option.
-                    # dialog.
-                  "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
-                      # item in dialog.
-                    "A String",
-                  ],
-                  "key": "A String", # Required. A unique key that will be sent back to the agent if this
-                      # response is given.
-                },
-              },
-            ],
-          },
-          "quickReplies": { # The quick replies response message. # The quick replies response.
-            "title": "A String", # Optional. The title of the collection of quick replies.
-            "quickReplies": [ # Optional. The collection of quick replies.
-              "A String",
-            ],
-          },
-          "card": { # The card response message. # The card response.
-            "title": "A String", # Optional. The title of the card.
-            "buttons": [ # Optional. The collection of card buttons.
-              { # Contains information about a button.
-                "text": "A String", # Optional. The text to show on the button.
-                "postback": "A String", # Optional. The text to send back to the Dialogflow API or a URI to
-                    # open.
-              },
-            ],
-            "subtitle": "A String", # Optional. The subtitle of the card.
-            "imageUri": "A String", # Optional. The public URI to an image file for the card.
-          },
-          "basicCard": { # The basic card message. Useful for displaying information. # The basic card response for Actions on Google.
-            "title": "A String", # Optional. The title of the card.
-            "image": { # The image response message. # Optional. The image for the card.
-              "imageUri": "A String", # Optional. The public URI to an image file.
-              "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                  # e.g., screen readers.
-            },
-            "formattedText": "A String", # Required, unless image is present. The body text of the card.
-            "buttons": [ # Optional. The collection of card buttons.
-              { # The button object that appears at the bottom of a card.
-                "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
-                  "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
-                },
-                "title": "A String", # Required. The title of the button.
-              },
-            ],
-            "subtitle": "A String", # Optional. The subtitle of the card.
-          },
-          "tableCard": { # Table card for Actions on Google. # Table card for Actions on Google.
-            "title": "A String", # Required. Title of the card.
-            "rows": [ # Optional. Rows in this table of data.
-              { # Row of TableCard.
-                "dividerAfter": True or False, # Optional. Whether to add a visual divider after this row.
-                "cells": [ # Optional. List of cells that make up this row.
-                  { # Cell of TableCardRow.
-                    "text": "A String", # Required. Text in this cell.
-                  },
-                ],
-              },
-            ],
-            "subtitle": "A String", # Optional. Subtitle to the title.
-            "columnProperties": [ # Optional. Display properties for the columns in this table.
-              { # Column properties for TableCard.
-                "header": "A String", # Required. Column heading.
-                "horizontalAlignment": "A String", # Optional. Defines text alignment for all cells in this column.
-              },
-            ],
-            "image": { # The image response message. # Optional. Image which should be displayed on the card.
-              "imageUri": "A String", # Optional. The public URI to an image file.
-              "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                  # e.g., screen readers.
-            },
-            "buttons": [ # Optional. List of buttons for the card.
-              { # The button object that appears at the bottom of a card.
-                "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
-                  "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
-                },
-                "title": "A String", # Required. The title of the button.
-              },
-            ],
-          },
-          "carouselSelect": { # The card for presenting a carousel of options to select from. # The carousel card response for Actions on Google.
-            "items": [ # Required. Carousel items.
-              { # An item in the carousel.
-                "title": "A String", # Required. Title of the carousel item.
-                "image": { # The image response message. # Optional. The image to display.
-                  "imageUri": "A String", # Optional. The public URI to an image file.
-                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                      # e.g., screen readers.
-                },
-                "description": "A String", # Optional. The body text of the card.
-                "info": { # Additional info about the select item for when it is triggered in a # Required. Additional info about the option item.
-                    # dialog.
-                  "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
-                      # item in dialog.
-                    "A String",
-                  ],
-                  "key": "A String", # Required. A unique key that will be sent back to the agent if this
-                      # response is given.
-                },
-              },
-            ],
-          },
-          "linkOutSuggestion": { # The suggestion chip message that allows the user to jump out to the app # The link out suggestion chip for Actions on Google.
-              # or website associated with this agent.
-            "destinationName": "A String", # Required. The name of the app or site this chip is linking to.
-            "uri": "A String", # Required. The URI of the app or site to open when the user taps the
-                # suggestion chip.
-          },
-          "browseCarouselCard": { # Browse Carousel Card for Actions on Google. # Browse carousel card for Actions on Google.
-              # https://developers.google.com/actions/assistant/responses#browsing_carousel
-            "items": [ # Required. List of items in the Browse Carousel Card. Minimum of two
-                # items, maximum of ten.
-              { # Browsing carousel tile
-                "description": "A String", # Optional. Description of the carousel item. Maximum of four lines of
-                    # text.
-                "openUriAction": { # Actions on Google action to open a given url. # Required. Action to present to the user.
-                  "urlTypeHint": "A String", # Optional. Specifies the type of viewer that is used when opening
-                      # the URL. Defaults to opening via web browser.
-                  "url": "A String", # Required. URL
-                },
-                "footer": "A String", # Optional. Text that appears at the bottom of the Browse Carousel
-                    # Card. Maximum of one line of text.
-                "title": "A String", # Required. Title of the carousel item. Maximum of two lines of text.
-                "image": { # The image response message. # Optional. Hero image for the carousel item.
-                  "imageUri": "A String", # Optional. The public URI to an image file.
-                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                      # e.g., screen readers.
-                },
-              },
-            ],
-            "imageDisplayOptions": "A String", # Optional. Settings for displaying the image. Applies to every image in
-                # items.
-          },
-          "simpleResponses": { # The collection of simple response candidates. # The voice and text-only responses for Actions on Google.
-              # This message in `QueryResult.fulfillment_messages` and
-              # `WebhookResponse.fulfillment_messages` should contain only one
-              # `SimpleResponse`.
-            "simpleResponses": [ # Required. The list of simple responses.
-              { # The simple response message containing speech or text.
-                "displayText": "A String", # Optional. The text to display.
-                "textToSpeech": "A String", # One of text_to_speech or ssml must be provided. The plain text of the
-                    # speech output. Mutually exclusive with ssml.
-                "ssml": "A String", # One of text_to_speech or ssml must be provided. Structured spoken
-                    # response to the user in the SSML format. Mutually exclusive with
-                    # text_to_speech.
-              },
-            ],
-          },
-          "mediaContent": { # The media content card for Actions on Google. # The media content card for Actions on Google.
-            "mediaType": "A String", # Optional. What type of media is the content (ie "audio").
-            "mediaObjects": [ # Required. List of media objects.
-              { # Response media object for media content card.
-                "icon": { # The image response message. # Optional. Icon to display above media content.
-                  "imageUri": "A String", # Optional. The public URI to an image file.
-                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                      # e.g., screen readers.
-                },
-                "largeImage": { # The image response message. # Optional. Image to display above media content.
-                  "imageUri": "A String", # Optional. The public URI to an image file.
-                  "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                      # e.g., screen readers.
-                },
-                "name": "A String", # Required. Name of media card.
-                "description": "A String", # Optional. Description of media card.
-                "contentUrl": "A String", # Required. Url where the media is stored.
-              },
-            ],
-          },
-          "image": { # The image response message. # The image response.
-            "imageUri": "A String", # Optional. The public URI to an image file.
-            "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
-                # e.g., screen readers.
-          },
-          "payload": { # A custom platform-specific response.
-            "a_key": "", # Properties of the object.
-          },
-          "text": { # The text response message. # The text response.
-            "text": [ # Optional. The collection of the agent's responses.
-              "A String",
-            ],
-          },
-          "suggestions": { # The collection of suggestions. # The suggestion chips for Actions on Google.
-            "suggestions": [ # Required. The list of suggested replies.
-              { # The suggestion chip message that the user can tap to quickly post a reply
-                  # to the conversation.
-                "title": "A String", # Required. The text shown the in the suggestion chip.
-              },
-            ],
-          },
-          "platform": "A String", # Optional. The platform that this message is intended for.
-        },
-      ],
-      "webhookPayload": { # If the query was fulfilled by a webhook call, this field is set to the
-          # value of the `payload` field returned in the webhook response.
-        "a_key": "", # Properties of the object.
-      },
-      "action": "A String", # The action name from the matched intent.
-      "webhookSource": "A String", # If the query was fulfilled by a webhook call, this field is set to the
-          # value of the `source` field returned in the webhook response.
     },
     "outputAudio": "A String", # The audio data bytes encoded as specified in the request.
         # Note: The output audio is generated based on the values of default platform
@@ -1247,6 +1247,10 @@
         # multiple default text responses exist, they will be concatenated when
         # generating audio. If no default platform text responses exist, the
         # generated audio content will be empty.
+        #
+        # In some scenarios, multiple output audio fields may be present in the
+        # response structure. In these cases, only the top-most-level audio output
+        # has content.
     "webhookStatus": { # The `Status` type defines a logical error model that is suitable for # Specifies the status of the webhook request.
         # different programming environments, including REST APIs and RPC APIs. It is
         # used by [gRPC](https://github.com/grpc). Each `Status` message contains