blob: 9795a180986c60f55eacf046203ba94e034907b9 [file] [log] [blame]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="dialogflow_v2.html">Dialogflow API</a> . <a href="dialogflow_v2.projects.html">projects</a> . <a href="dialogflow_v2.projects.agent.html">agent</a> . <a href="dialogflow_v2.projects.agent.sessions.html">sessions</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="dialogflow_v2.projects.agent.sessions.contexts.html">contexts()</a></code>
79</p>
80<p class="firstline">Returns the contexts Resource.</p>
81
82<p class="toc_element">
83 <code><a href="dialogflow_v2.projects.agent.sessions.entityTypes.html">entityTypes()</a></code>
84</p>
85<p class="firstline">Returns the entityTypes Resource.</p>
86
87<p class="toc_element">
88 <code><a href="#deleteContexts">deleteContexts(parent, x__xgafv=None)</a></code></p>
89<p class="firstline">Deletes all active contexts in the specified session.</p>
90<p class="toc_element">
91 <code><a href="#detectIntent">detectIntent(session, body, x__xgafv=None)</a></code></p>
92<p class="firstline">Processes a natural language query and returns structured, actionable data</p>
93<h3>Method Details</h3>
94<div class="method">
95 <code class="details" id="deleteContexts">deleteContexts(parent, x__xgafv=None)</code>
96 <pre>Deletes all active contexts in the specified session.
97
98Args:
99 parent: string, Required. The name of the session to delete all contexts from. Format:
100`projects/<Project ID>/agent/sessions/<Session ID>`. (required)
101 x__xgafv: string, V1 error format.
102 Allowed values
103 1 - v1 error format
104 2 - v2 error format
105
106Returns:
107 An object of the form:
108
109 { # A generic empty message that you can re-use to avoid defining duplicated
110 # empty messages in your APIs. A typical example is to use it as the request
111 # or the response type of an API method. For instance:
112 #
113 # service Foo {
114 # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
115 # }
116 #
117 # The JSON representation for `Empty` is empty JSON object `{}`.
118 }</pre>
119</div>
120
121<div class="method">
122 <code class="details" id="detectIntent">detectIntent(session, body, x__xgafv=None)</code>
123 <pre>Processes a natural language query and returns structured, actionable data
124as a result. This method is not idempotent, because it may cause contexts
125and session entity types to be updated, which in turn might affect
126results of future queries.
127
128Args:
129 session: string, Required. The name of the session this query is sent to. Format:
130`projects/<Project ID>/agent/sessions/<Session ID>`. It's up to the API
131caller to choose an appropriate session ID. It can be a random number or
132some type of user identifier (preferably hashed). The length of the session
133ID must not exceed 36 bytes. (required)
134 body: object, The request body. (required)
135 The object takes the form of:
136
137{ # The request to detect user's intent.
138 "outputAudioConfig": { # Instructs the speech synthesizer on how to generate the output audio content. # Optional. Instructs the speech synthesizer how to generate the output
139 # audio. If this field is not set and agent-level speech synthesizer is not
140 # configured, no output audio is generated.
141 "sampleRateHertz": 42, # Optional. The synthesis sample rate (in hertz) for this audio. If not
142 # provided, then the synthesizer will use the default sample rate based on
143 # the audio encoding. If this is different from the voice's natural sample
144 # rate, then the synthesizer will honor this request by converting to the
145 # desired sample rate (which might result in worse audio quality).
146 "audioEncoding": "A String", # Required. Audio encoding of the synthesized audio content.
147 "synthesizeSpeechConfig": { # Configuration of how speech should be synthesized. # Optional. Configuration of how speech should be synthesized.
148 "effectsProfileId": [ # Optional. An identifier which selects 'audio effects' profiles that are
149 # applied on (post synthesized) text to speech. Effects are applied on top of
150 # each other in the order they are given.
151 "A String",
152 ],
153 "voice": { # Description of which voice to use for speech synthesis. # Optional. The desired voice of the synthesized audio.
154 "ssmlGender": "A String", # Optional. The preferred gender of the voice. If not set, the service will
155 # choose a voice based on the other parameters such as language_code and
156 # name. Note that this is only a preference, not requirement. If a
157 # voice of the appropriate gender is not available, the synthesizer should
158 # substitute a voice with a different gender rather than failing the request.
159 "name": "A String", # Optional. The name of the voice. If not set, the service will choose a
160 # voice based on the other parameters such as language_code and gender.
161 },
162 "speakingRate": 3.14, # Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
163 # native speed supported by the specific voice. 2.0 is twice as fast, and
164 # 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
165 # other values < 0.25 or > 4.0 will return an error.
166 "volumeGainDb": 3.14, # Optional. Volume gain (in dB) of the normal native volume supported by the
167 # specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
168 # 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
169 # will play at approximately half the amplitude of the normal native signal
170 # amplitude. A value of +6.0 (dB) will play at approximately twice the
171 # amplitude of the normal native signal amplitude. We strongly recommend not
172 # to exceed +10 (dB) as there's usually no effective increase in loudness for
173 # any value greater than that.
174 "pitch": 3.14, # Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
175 # semitones from the original pitch. -20 means decrease 20 semitones from the
176 # original pitch.
177 },
178 },
179 "inputAudio": "A String", # Optional. The natural language speech audio to be processed. This field
180 # should be populated iff `query_input` is set to an input audio config.
181 # A single request can contain up to 1 minute of speech audio data.
182 "queryInput": { # Represents the query input. It can contain either: # Required. The input specification. It can be set to:
183 #
184 # 1. an audio config
185 # which instructs the speech recognizer how to process the speech audio,
186 #
187 # 2. a conversational query in the form of text, or
188 #
189 # 3. an event that specifies which intent to trigger.
190 #
191 # 1. An audio config which
192 # instructs the speech recognizer how to process the speech audio.
193 #
194 # 2. A conversational query in the form of text,.
195 #
196 # 3. An event that specifies which intent to trigger.
197 "text": { # Represents the natural language text to be processed. # The natural language text to be processed.
198 "text": "A String", # Required. The UTF-8 encoded natural language text to be processed.
199 # Text length must not exceed 256 characters.
200 "languageCode": "A String", # Required. The language of this conversational query. See [Language
201 # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
202 # for a list of the currently supported language codes. Note that queries in
203 # the same session do not necessarily need to specify the same language.
204 },
205 "event": { # Events allow for matching intents by event name instead of the natural # The event to be processed.
206 # language input. For instance, input `<event: { name: "welcome_event",
207 # parameters: { name: "Sam" } }>` can trigger a personalized welcome response.
208 # The parameter `name` may be used by the agent in the response:
209 # `"Hello #welcome_event.name! What can I do for you today?"`.
210 "languageCode": "A String", # Required. The language of this query. See [Language
211 # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
212 # for a list of the currently supported language codes. Note that queries in
213 # the same session do not necessarily need to specify the same language.
214 "name": "A String", # Required. The unique identifier of the event.
215 "parameters": { # Optional. The collection of parameters associated with the event.
216 "a_key": "", # Properties of the object.
217 },
218 },
219 "audioConfig": { # Instructs the speech recognizer how to process the audio content. # Instructs the speech recognizer how to process the speech audio.
220 "phraseHints": [ # Optional. A list of strings containing words and phrases that the speech
221 # recognizer should recognize with higher likelihood.
222 #
223 # See [the Cloud Speech
224 # documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
225 # for more details.
226 "A String",
227 ],
228 "languageCode": "A String", # Required. The language of the supplied audio. Dialogflow does not do
229 # translations. See [Language
230 # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
231 # for a list of the currently supported language codes. Note that queries in
232 # the same session do not necessarily need to specify the same language.
233 "audioEncoding": "A String", # Required. Audio encoding of the audio content to process.
234 "sampleRateHertz": 42, # Required. Sample rate (in Hertz) of the audio content sent in the query.
235 # Refer to
236 # [Cloud Speech API
237 # documentation](https://cloud.google.com/speech-to-text/docs/basics) for
238 # more details.
239 "modelVariant": "A String", # Optional. Which variant of the Speech model to use.
240 },
241 },
242 "queryParams": { # Represents the parameters of the conversational query. # Optional. The parameters of this query.
243 "geoLocation": { # An object representing a latitude/longitude pair. This is expressed as a pair # Optional. The geo location of this conversational query.
244 # of doubles representing degrees latitude and degrees longitude. Unless
245 # specified otherwise, this must conform to the
246 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
247 # standard</a>. Values must be within normalized ranges.
248 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
249 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
250 },
251 "contexts": [ # Optional. The collection of contexts to be activated before this query is
252 # executed.
253 { # Represents a context.
254 "parameters": { # Optional. The collection of parameters associated with this context.
255 # Refer to [this
256 # doc](https://cloud.google.com/dialogflow-enterprise/docs/intents-actions-parameters)
257 # for syntax.
258 "a_key": "", # Properties of the object.
259 },
260 "name": "A String", # Required. The unique identifier of the context. Format:
261 # `projects/<Project ID>/agent/sessions/<Session ID>/contexts/<Context ID>`.
262 #
263 # The `Context ID` is always converted to lowercase, may only contain
264 # characters in [a-zA-Z0-9_-%] and may be at most 250 bytes long.
265 "lifespanCount": 42, # Optional. The number of conversational query requests after which the
266 # context expires. If set to `0` (the default) the context expires
267 # immediately. Contexts expire automatically after 20 minutes if there
268 # are no matching queries.
269 },
270 ],
271 "sentimentAnalysisRequestConfig": { # Configures the types of sentiment analysis to perform. # Optional. Configures the type of sentiment analysis to perform. If not
272 # provided, sentiment analysis is not performed.
273 "analyzeQueryTextSentiment": True or False, # Optional. Instructs the service to perform sentiment analysis on
274 # `query_text`. If not provided, sentiment analysis is not performed on
275 # `query_text`.
276 },
277 "resetContexts": True or False, # Optional. Specifies whether to delete all contexts in the current session
278 # before the new ones are activated.
279 "timeZone": "A String", # Optional. The time zone of this conversational query from the
280 # [time zone database](https://www.iana.org/time-zones), e.g.,
281 # America/New_York, Europe/Paris. If not provided, the time zone specified in
282 # agent settings is used.
283 "payload": { # Optional. This field can be used to pass custom data into the webhook
284 # associated with the agent. Arbitrary JSON objects are supported.
285 "a_key": "", # Properties of the object.
286 },
287 "sessionEntityTypes": [ # Optional. Additional session entity types to replace or extend developer
288 # entity types with. The entity synonyms apply to all languages and persist
289 # for the session of this query.
290 { # Represents a session entity type.
291 #
292 # Extends or replaces a developer entity type at the user session level (we
293 # refer to the entity types defined at the agent level as "developer entity
294 # types").
295 #
296 # Note: session entity types apply to all queries, regardless of the language.
297 "entities": [ # Required. The collection of entities associated with this session entity
298 # type.
299 { # An **entity entry** for an associated entity type.
300 "synonyms": [ # Required. A collection of value synonyms. For example, if the entity type
301 # is *vegetable*, and `value` is *scallions*, a synonym could be *green
302 # onions*.
303 #
304 # For `KIND_LIST` entity types:
305 #
306 # * This collection must contain exactly one synonym equal to `value`.
307 "A String",
308 ],
309 "value": "A String", # Required. The primary value associated with this entity entry.
310 # For example, if the entity type is *vegetable*, the value could be
311 # *scallions*.
312 #
313 # For `KIND_MAP` entity types:
314 #
315 # * A canonical value to be used in place of synonyms.
316 #
317 # For `KIND_LIST` entity types:
318 #
319 # * A string that can contain references to other entity types (with or
320 # without aliases).
321 },
322 ],
323 "name": "A String", # Required. The unique identifier of this session entity type. Format:
324 # `projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type
325 # Display Name>`.
326 #
327 # `<Entity Type Display Name>` must be the display name of an existing entity
328 # type in the same agent that will be overridden or supplemented.
329 "entityOverrideMode": "A String", # Required. Indicates whether the additional data should override or
330 # supplement the developer entity type definition.
331 },
332 ],
333 },
334 }
335
336 x__xgafv: string, V1 error format.
337 Allowed values
338 1 - v1 error format
339 2 - v2 error format
340
341Returns:
342 An object of the form:
343
344 { # The message returned from the DetectIntent method.
345 "outputAudioConfig": { # Instructs the speech synthesizer on how to generate the output audio content. # The config used by the speech synthesizer to generate the output audio.
346 "sampleRateHertz": 42, # Optional. The synthesis sample rate (in hertz) for this audio. If not
347 # provided, then the synthesizer will use the default sample rate based on
348 # the audio encoding. If this is different from the voice's natural sample
349 # rate, then the synthesizer will honor this request by converting to the
350 # desired sample rate (which might result in worse audio quality).
351 "audioEncoding": "A String", # Required. Audio encoding of the synthesized audio content.
352 "synthesizeSpeechConfig": { # Configuration of how speech should be synthesized. # Optional. Configuration of how speech should be synthesized.
353 "effectsProfileId": [ # Optional. An identifier which selects 'audio effects' profiles that are
354 # applied on (post synthesized) text to speech. Effects are applied on top of
355 # each other in the order they are given.
356 "A String",
357 ],
358 "voice": { # Description of which voice to use for speech synthesis. # Optional. The desired voice of the synthesized audio.
359 "ssmlGender": "A String", # Optional. The preferred gender of the voice. If not set, the service will
360 # choose a voice based on the other parameters such as language_code and
361 # name. Note that this is only a preference, not requirement. If a
362 # voice of the appropriate gender is not available, the synthesizer should
363 # substitute a voice with a different gender rather than failing the request.
364 "name": "A String", # Optional. The name of the voice. If not set, the service will choose a
365 # voice based on the other parameters such as language_code and gender.
366 },
367 "speakingRate": 3.14, # Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
368 # native speed supported by the specific voice. 2.0 is twice as fast, and
369 # 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
370 # other values < 0.25 or > 4.0 will return an error.
371 "volumeGainDb": 3.14, # Optional. Volume gain (in dB) of the normal native volume supported by the
372 # specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
373 # 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
374 # will play at approximately half the amplitude of the normal native signal
375 # amplitude. A value of +6.0 (dB) will play at approximately twice the
376 # amplitude of the normal native signal amplitude. We strongly recommend not
377 # to exceed +10 (dB) as there's usually no effective increase in loudness for
378 # any value greater than that.
379 "pitch": 3.14, # Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
380 # semitones from the original pitch. -20 means decrease 20 semitones from the
381 # original pitch.
382 },
383 },
384 "outputAudio": "A String", # The audio data bytes encoded as specified in the request.
385 # Note: The output audio is generated based on the values of default platform
386 # text responses found in the `query_result.fulfillment_messages` field. If
387 # multiple default text responses exist, they will be concatenated when
388 # generating audio. If no default platform text responses exist, the
389 # generated audio content will be empty.
390 "webhookStatus": { # The `Status` type defines a logical error model that is suitable for # Specifies the status of the webhook request.
391 # different programming environments, including REST APIs and RPC APIs. It is
392 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
393 # three pieces of data: error code, error message, and error details.
394 #
395 # You can find out more about this error model and how to work with it in the
396 # [API Design Guide](https://cloud.google.com/apis/design/errors).
397 "message": "A String", # A developer-facing error message, which should be in English. Any
398 # user-facing error message should be localized and sent in the
399 # google.rpc.Status.details field, or localized by the client.
400 "code": 42, # The status code, which should be an enum value of google.rpc.Code.
401 "details": [ # A list of messages that carry the error details. There is a common set of
402 # message types for APIs to use.
403 {
404 "a_key": "", # Properties of the object. Contains field @type with type URL.
405 },
406 ],
407 },
408 "queryResult": { # Represents the result of conversational query or event processing. # The selected results of the conversational query or event processing.
409 # See `alternative_query_results` for additional potential results.
410 "sentimentAnalysisResult": { # The result of sentiment analysis as configured by # The sentiment analysis result, which depends on the
411 # `sentiment_analysis_request_config` specified in the request.
412 # `sentiment_analysis_request_config`.
413 "queryTextSentiment": { # The sentiment, such as positive/negative feeling or association, for a unit # The sentiment analysis result for `query_text`.
414 # of analysis, such as the query text.
415 "score": 3.14, # Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
416 # sentiment).
417 "magnitude": 3.14, # A non-negative number in the [0, +inf) range, which represents the absolute
418 # magnitude of sentiment, regardless of score (positive or negative).
419 },
420 },
421 "fulfillmentText": "A String", # The text to be pronounced to the user or shown on the screen.
422 # Note: This is a legacy field, `fulfillment_messages` should be preferred.
423 "allRequiredParamsPresent": True or False, # This field is set to:
424 #
425 # - `false` if the matched intent has required parameters and not all of
426 # the required parameter values have been collected.
427 # - `true` if all required parameter values have been collected, or if the
428 # matched intent doesn't contain any required parameters.
429 "parameters": { # The collection of extracted parameters.
430 "a_key": "", # Properties of the object.
431 },
432 "languageCode": "A String", # The language that was triggered during intent detection.
433 # See [Language
434 # Support](https://cloud.google.com/dialogflow-enterprise/docs/reference/language)
435 # for a list of the currently supported language codes.
436 "speechRecognitionConfidence": 3.14, # The Speech recognition confidence between 0.0 and 1.0. A higher number
437 # indicates an estimated greater likelihood that the recognized words are
438 # correct. The default of 0.0 is a sentinel value indicating that confidence
439 # was not set.
440 #
441 # This field is not guaranteed to be accurate or set. In particular this
442 # field isn't set for StreamingDetectIntent since the streaming endpoint has
443 # separate confidence estimates per portion of the audio in
444 # StreamingRecognitionResult.
445 "intentDetectionConfidence": 3.14, # The intent detection confidence. Values range from 0.0
446 # (completely uncertain) to 1.0 (completely certain).
447 # If there are `multiple knowledge_answers` messages, this value is set to
448 # the greatest `knowledgeAnswers.match_confidence` value in the list.
449 "action": "A String", # The action name from the matched intent.
450 "intent": { # Represents an intent. # The intent that matched the conversational query. Some, not
451 # all fields are filled in this message, including but not limited to:
452 # `name`, `display_name` and `webhook_state`.
453 # Intents convert a number of user expressions or patterns into an action. An
454 # action is an extraction of a user command or sentence semantics.
455 "isFallback": True or False, # Optional. Indicates whether this is a fallback intent.
456 "mlDisabled": True or False, # Optional. Indicates whether Machine Learning is disabled for the intent.
457 # Note: If `ml_diabled` setting is set to true, then this intent is not
458 # taken into account during inference in `ML ONLY` match mode. Also,
459 # auto-markup in the UI is turned off.
460 "displayName": "A String", # Required. The name of this intent.
461 "name": "A String", # The unique identifier of this intent.
462 # Required for Intents.UpdateIntent and Intents.BatchUpdateIntents
463 # methods.
464 # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
465 "parameters": [ # Optional. The collection of parameters associated with the intent.
466 { # Represents intent parameters.
467 "displayName": "A String", # Required. The name of the parameter.
468 "name": "A String", # The unique identifier of this parameter.
469 "defaultValue": "A String", # Optional. The default value to use when the `value` yields an empty
470 # result.
471 # Default values can be extracted from contexts by using the following
472 # syntax: `#context_name.parameter_name`.
473 "entityTypeDisplayName": "A String", # Optional. The name of the entity type, prefixed with `@`, that
474 # describes values of the parameter. If the parameter is
475 # required, this must be provided.
476 "value": "A String", # Optional. The definition of the parameter value. It can be:
477 # - a constant string,
478 # - a parameter value defined as `$parameter_name`,
479 # - an original parameter value defined as `$parameter_name.original`,
480 # - a parameter value from some context defined as
481 # `#context_name.parameter_name`.
482 "prompts": [ # Optional. The collection of prompts that the agent can present to the
483 # user in order to collect value for the parameter.
484 "A String",
485 ],
486 "isList": True or False, # Optional. Indicates whether the parameter represents a list of values.
487 "mandatory": True or False, # Optional. Indicates whether the parameter is required. That is,
488 # whether the intent cannot be completed without collecting the parameter
489 # value.
490 },
491 ],
492 "trainingPhrases": [ # Optional. The collection of examples that the agent is
493 # trained on.
494 { # Represents an example that the agent is trained on.
495 "parts": [ # Required. The ordered list of training phrase parts.
496 # The parts are concatenated in order to form the training phrase.
497 #
498 # Note: The API does not automatically annotate training phrases like the
499 # Dialogflow Console does.
500 #
501 # Note: Do not forget to include whitespace at part boundaries,
502 # so the training phrase is well formatted when the parts are concatenated.
503 #
504 # If the training phrase does not need to be annotated with parameters,
505 # you just need a single part with only the Part.text field set.
506 #
507 # If you want to annotate the training phrase, you must create multiple
508 # parts, where the fields of each part are populated in one of two ways:
509 #
510 # - `Part.text` is set to a part of the phrase that has no parameters.
511 # - `Part.text` is set to a part of the phrase that you want to annotate,
512 # and the `entity_type`, `alias`, and `user_defined` fields are all
513 # set.
514 { # Represents a part of a training phrase.
515 "alias": "A String", # Optional. The parameter name for the value extracted from the
516 # annotated part of the example.
517 # This field is required for annotated parts of the training phrase.
518 "entityType": "A String", # Optional. The entity type name prefixed with `@`.
519 # This field is required for annotated parts of the training phrase.
520 "text": "A String", # Required. The text for this part.
521 "userDefined": True or False, # Optional. Indicates whether the text was manually annotated.
522 # This field is set to true when the Dialogflow Console is used to
523 # manually annotate the part. When creating an annotated part with the
524 # API, you must set this to true.
525 },
526 ],
527 "type": "A String", # Required. The type of the training phrase.
528 "name": "A String", # Output only. The unique identifier of this training phrase.
529 "timesAddedCount": 42, # Optional. Indicates how many times this example was added to
530 # the intent. Each time a developer adds an existing sample by editing an
531 # intent or training, this counter is increased.
532 },
533 ],
534 "followupIntentInfo": [ # Read-only. Information about all followup intents that have this intent as
535 # a direct or indirect parent. We populate this field only in the output.
536 { # Represents a single followup intent in the chain.
537 "followupIntentName": "A String", # The unique identifier of the followup intent.
538 # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
539 "parentFollowupIntentName": "A String", # The unique identifier of the followup intent's parent.
540 # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
541 },
542 ],
543 "webhookState": "A String", # Optional. Indicates whether webhooks are enabled for the intent.
544 "resetContexts": True or False, # Optional. Indicates whether to delete all contexts in the current
545 # session when this intent is matched.
546 "messages": [ # Optional. The collection of rich messages corresponding to the
547 # `Response` field in the Dialogflow console.
548 { # Corresponds to the `Response` field in the Dialogflow console.
549 "simpleResponses": { # The collection of simple response candidates. # The voice and text-only responses for Actions on Google.
550 # This message in `QueryResult.fulfillment_messages` and
551 # `WebhookResponse.fulfillment_messages` should contain only one
552 # `SimpleResponse`.
553 "simpleResponses": [ # Required. The list of simple responses.
554 { # The simple response message containing speech or text.
555 "textToSpeech": "A String", # One of text_to_speech or ssml must be provided. The plain text of the
556 # speech output. Mutually exclusive with ssml.
557 "displayText": "A String", # Optional. The text to display.
558 "ssml": "A String", # One of text_to_speech or ssml must be provided. Structured spoken
559 # response to the user in the SSML format. Mutually exclusive with
560 # text_to_speech.
561 },
562 ],
563 },
564 "quickReplies": { # The quick replies response message. # The quick replies response.
565 "quickReplies": [ # Optional. The collection of quick replies.
566 "A String",
567 ],
568 "title": "A String", # Optional. The title of the collection of quick replies.
569 },
570 "platform": "A String", # Optional. The platform that this message is intended for.
571 "text": { # The text response message. # The text response.
572 "text": [ # Optional. The collection of the agent's responses.
573 "A String",
574 ],
575 },
576 "image": { # The image response message. # The image response.
577 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
578 # e.g., screen readers.
579 "imageUri": "A String", # Optional. The public URI to an image file.
580 },
581 "suggestions": { # The collection of suggestions. # The suggestion chips for Actions on Google.
582 "suggestions": [ # Required. The list of suggested replies.
583 { # The suggestion chip message that the user can tap to quickly post a reply
584 # to the conversation.
585 "title": "A String", # Required. The text shown the in the suggestion chip.
586 },
587 ],
588 },
589 "linkOutSuggestion": { # The suggestion chip message that allows the user to jump out to the app # The link out suggestion chip for Actions on Google.
590 # or website associated with this agent.
591 "uri": "A String", # Required. The URI of the app or site to open when the user taps the
592 # suggestion chip.
593 "destinationName": "A String", # Required. The name of the app or site this chip is linking to.
594 },
595 "basicCard": { # The basic card message. Useful for displaying information. # The basic card response for Actions on Google.
596 "buttons": [ # Optional. The collection of card buttons.
597 { # The button object that appears at the bottom of a card.
598 "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
599 "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
600 },
601 "title": "A String", # Required. The title of the button.
602 },
603 ],
604 "subtitle": "A String", # Optional. The subtitle of the card.
605 "image": { # The image response message. # Optional. The image for the card.
606 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
607 # e.g., screen readers.
608 "imageUri": "A String", # Optional. The public URI to an image file.
609 },
610 "formattedText": "A String", # Required, unless image is present. The body text of the card.
611 "title": "A String", # Optional. The title of the card.
612 },
613 "carouselSelect": { # The card for presenting a carousel of options to select from. # The carousel card response for Actions on Google.
614 "items": [ # Required. Carousel items.
615 { # An item in the carousel.
616 "info": { # Additional info about the select item for when it is triggered in a # Required. Additional info about the option item.
617 # dialog.
618 "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
619 # item in dialog.
620 "A String",
621 ],
622 "key": "A String", # Required. A unique key that will be sent back to the agent if this
623 # response is given.
624 },
625 "image": { # The image response message. # Optional. The image to display.
626 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
627 # e.g., screen readers.
628 "imageUri": "A String", # Optional. The public URI to an image file.
629 },
630 "description": "A String", # Optional. The body text of the card.
631 "title": "A String", # Required. Title of the carousel item.
632 },
633 ],
634 },
635 "listSelect": { # The card for presenting a list of options to select from. # The list card response for Actions on Google.
636 "items": [ # Required. List items.
637 { # An item in the list.
638 "info": { # Additional info about the select item for when it is triggered in a # Required. Additional information about this option.
639 # dialog.
640 "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
641 # item in dialog.
642 "A String",
643 ],
644 "key": "A String", # Required. A unique key that will be sent back to the agent if this
645 # response is given.
646 },
647 "image": { # The image response message. # Optional. The image to display.
648 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
649 # e.g., screen readers.
650 "imageUri": "A String", # Optional. The public URI to an image file.
651 },
652 "description": "A String", # Optional. The main text describing the item.
653 "title": "A String", # Required. The title of the list item.
654 },
655 ],
656 "title": "A String", # Optional. The overall title of the list.
657 },
658 "payload": { # Returns a response containing a custom, platform-specific payload.
659 # See the Intent.Message.Platform type for a description of the
660 # structure that may be required for your platform.
661 "a_key": "", # Properties of the object.
662 },
663 "card": { # The card response message. # The card response.
664 "buttons": [ # Optional. The collection of card buttons.
665 { # Optional. Contains information about a button.
666 "text": "A String", # Optional. The text to show on the button.
667 "postback": "A String", # Optional. The text to send back to the Dialogflow API or a URI to
668 # open.
669 },
670 ],
671 "title": "A String", # Optional. The title of the card.
672 "subtitle": "A String", # Optional. The subtitle of the card.
673 "imageUri": "A String", # Optional. The public URI to an image file for the card.
674 },
675 },
676 ],
677 "parentFollowupIntentName": "A String", # Read-only after creation. The unique identifier of the parent intent in the
678 # chain of followup intents. You can set this field when creating an intent,
679 # for example with CreateIntent or BatchUpdateIntents, in order to
680 # make this intent a followup intent.
681 #
682 # It identifies the parent followup intent.
683 # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
684 "defaultResponsePlatforms": [ # Optional. The list of platforms for which the first responses will be
685 # copied from the messages in PLATFORM_UNSPECIFIED (i.e. default platform).
686 "A String",
687 ],
688 "priority": 42, # Optional. The priority of this intent. Higher numbers represent higher
689 # priorities. If this is zero or unspecified, we use the default
690 # priority 500000.
691 #
692 # Negative numbers mean that the intent is disabled.
693 "rootFollowupIntentName": "A String", # Read-only. The unique identifier of the root intent in the chain of
694 # followup intents. It identifies the correct followup intents chain for
695 # this intent. We populate this field only in the output.
696 #
697 # Format: `projects/<Project ID>/agent/intents/<Intent ID>`.
698 "inputContextNames": [ # Optional. The list of context names required for this intent to be
699 # triggered.
700 # Format: `projects/<Project ID>/agent/sessions/-/contexts/<Context ID>`.
701 "A String",
702 ],
703 "action": "A String", # Optional. The name of the action associated with the intent.
704 # Note: The action name must not contain whitespaces.
705 "outputContexts": [ # Optional. The collection of contexts that are activated when the intent
706 # is matched. Context messages in this collection should not set the
707 # parameters field. Setting the `lifespan_count` to 0 will reset the context
708 # when the intent is matched.
709 # Format: `projects/<Project ID>/agent/sessions/-/contexts/<Context ID>`.
710 { # Represents a context.
711 "parameters": { # Optional. The collection of parameters associated with this context.
712 # Refer to [this
713 # doc](https://cloud.google.com/dialogflow-enterprise/docs/intents-actions-parameters)
714 # for syntax.
715 "a_key": "", # Properties of the object.
716 },
717 "name": "A String", # Required. The unique identifier of the context. Format:
718 # `projects/<Project ID>/agent/sessions/<Session ID>/contexts/<Context ID>`.
719 #
720 # The `Context ID` is always converted to lowercase, may only contain
721 # characters in [a-zA-Z0-9_-%] and may be at most 250 bytes long.
722 "lifespanCount": 42, # Optional. The number of conversational query requests after which the
723 # context expires. If set to `0` (the default) the context expires
724 # immediately. Contexts expire automatically after 20 minutes if there
725 # are no matching queries.
726 },
727 ],
728 "events": [ # Optional. The collection of event names that trigger the intent.
729 # If the collection of input contexts is not empty, all of the contexts must
730 # be present in the active user session for an event to trigger this intent.
731 "A String",
732 ],
733 },
734 "fulfillmentMessages": [ # The collection of rich messages to present to the user.
735 { # Corresponds to the `Response` field in the Dialogflow console.
736 "simpleResponses": { # The collection of simple response candidates. # The voice and text-only responses for Actions on Google.
737 # This message in `QueryResult.fulfillment_messages` and
738 # `WebhookResponse.fulfillment_messages` should contain only one
739 # `SimpleResponse`.
740 "simpleResponses": [ # Required. The list of simple responses.
741 { # The simple response message containing speech or text.
742 "textToSpeech": "A String", # One of text_to_speech or ssml must be provided. The plain text of the
743 # speech output. Mutually exclusive with ssml.
744 "displayText": "A String", # Optional. The text to display.
745 "ssml": "A String", # One of text_to_speech or ssml must be provided. Structured spoken
746 # response to the user in the SSML format. Mutually exclusive with
747 # text_to_speech.
748 },
749 ],
750 },
751 "quickReplies": { # The quick replies response message. # The quick replies response.
752 "quickReplies": [ # Optional. The collection of quick replies.
753 "A String",
754 ],
755 "title": "A String", # Optional. The title of the collection of quick replies.
756 },
757 "platform": "A String", # Optional. The platform that this message is intended for.
758 "text": { # The text response message. # The text response.
759 "text": [ # Optional. The collection of the agent's responses.
760 "A String",
761 ],
762 },
763 "image": { # The image response message. # The image response.
764 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
765 # e.g., screen readers.
766 "imageUri": "A String", # Optional. The public URI to an image file.
767 },
768 "suggestions": { # The collection of suggestions. # The suggestion chips for Actions on Google.
769 "suggestions": [ # Required. The list of suggested replies.
770 { # The suggestion chip message that the user can tap to quickly post a reply
771 # to the conversation.
772 "title": "A String", # Required. The text shown the in the suggestion chip.
773 },
774 ],
775 },
776 "linkOutSuggestion": { # The suggestion chip message that allows the user to jump out to the app # The link out suggestion chip for Actions on Google.
777 # or website associated with this agent.
778 "uri": "A String", # Required. The URI of the app or site to open when the user taps the
779 # suggestion chip.
780 "destinationName": "A String", # Required. The name of the app or site this chip is linking to.
781 },
782 "basicCard": { # The basic card message. Useful for displaying information. # The basic card response for Actions on Google.
783 "buttons": [ # Optional. The collection of card buttons.
784 { # The button object that appears at the bottom of a card.
785 "openUriAction": { # Opens the given URI. # Required. Action to take when a user taps on the button.
786 "uri": "A String", # Required. The HTTP or HTTPS scheme URI.
787 },
788 "title": "A String", # Required. The title of the button.
789 },
790 ],
791 "subtitle": "A String", # Optional. The subtitle of the card.
792 "image": { # The image response message. # Optional. The image for the card.
793 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
794 # e.g., screen readers.
795 "imageUri": "A String", # Optional. The public URI to an image file.
796 },
797 "formattedText": "A String", # Required, unless image is present. The body text of the card.
798 "title": "A String", # Optional. The title of the card.
799 },
800 "carouselSelect": { # The card for presenting a carousel of options to select from. # The carousel card response for Actions on Google.
801 "items": [ # Required. Carousel items.
802 { # An item in the carousel.
803 "info": { # Additional info about the select item for when it is triggered in a # Required. Additional info about the option item.
804 # dialog.
805 "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
806 # item in dialog.
807 "A String",
808 ],
809 "key": "A String", # Required. A unique key that will be sent back to the agent if this
810 # response is given.
811 },
812 "image": { # The image response message. # Optional. The image to display.
813 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
814 # e.g., screen readers.
815 "imageUri": "A String", # Optional. The public URI to an image file.
816 },
817 "description": "A String", # Optional. The body text of the card.
818 "title": "A String", # Required. Title of the carousel item.
819 },
820 ],
821 },
822 "listSelect": { # The card for presenting a list of options to select from. # The list card response for Actions on Google.
823 "items": [ # Required. List items.
824 { # An item in the list.
825 "info": { # Additional info about the select item for when it is triggered in a # Required. Additional information about this option.
826 # dialog.
827 "synonyms": [ # Optional. A list of synonyms that can also be used to trigger this
828 # item in dialog.
829 "A String",
830 ],
831 "key": "A String", # Required. A unique key that will be sent back to the agent if this
832 # response is given.
833 },
834 "image": { # The image response message. # Optional. The image to display.
835 "accessibilityText": "A String", # Optional. A text description of the image to be used for accessibility,
836 # e.g., screen readers.
837 "imageUri": "A String", # Optional. The public URI to an image file.
838 },
839 "description": "A String", # Optional. The main text describing the item.
840 "title": "A String", # Required. The title of the list item.
841 },
842 ],
843 "title": "A String", # Optional. The overall title of the list.
844 },
845 "payload": { # Returns a response containing a custom, platform-specific payload.
846 # See the Intent.Message.Platform type for a description of the
847 # structure that may be required for your platform.
848 "a_key": "", # Properties of the object.
849 },
850 "card": { # The card response message. # The card response.
851 "buttons": [ # Optional. The collection of card buttons.
852 { # Optional. Contains information about a button.
853 "text": "A String", # Optional. The text to show on the button.
854 "postback": "A String", # Optional. The text to send back to the Dialogflow API or a URI to
855 # open.
856 },
857 ],
858 "title": "A String", # Optional. The title of the card.
859 "subtitle": "A String", # Optional. The subtitle of the card.
860 "imageUri": "A String", # Optional. The public URI to an image file for the card.
861 },
862 },
863 ],
864 "diagnosticInfo": { # The free-form diagnostic info. For example, this field could contain
865 # webhook call latency. The string keys of the Struct's fields map can change
866 # without notice.
867 "a_key": "", # Properties of the object.
868 },
869 "queryText": "A String", # The original conversational query text:
870 #
871 # - If natural language text was provided as input, `query_text` contains
872 # a copy of the input.
873 # - If natural language speech audio was provided as input, `query_text`
874 # contains the speech recognition result. If speech recognizer produced
875 # multiple alternatives, a particular one is picked.
876 # - If automatic spell correction is enabled, `query_text` will contain the
877 # corrected user input.
878 "webhookSource": "A String", # If the query was fulfilled by a webhook call, this field is set to the
879 # value of the `source` field returned in the webhook response.
880 "outputContexts": [ # The collection of output contexts. If applicable,
881 # `output_contexts.parameters` contains entries with name
882 # `<parameter name>.original` containing the original parameter values
883 # before the query.
884 { # Represents a context.
885 "parameters": { # Optional. The collection of parameters associated with this context.
886 # Refer to [this
887 # doc](https://cloud.google.com/dialogflow-enterprise/docs/intents-actions-parameters)
888 # for syntax.
889 "a_key": "", # Properties of the object.
890 },
891 "name": "A String", # Required. The unique identifier of the context. Format:
892 # `projects/<Project ID>/agent/sessions/<Session ID>/contexts/<Context ID>`.
893 #
894 # The `Context ID` is always converted to lowercase, may only contain
895 # characters in [a-zA-Z0-9_-%] and may be at most 250 bytes long.
896 "lifespanCount": 42, # Optional. The number of conversational query requests after which the
897 # context expires. If set to `0` (the default) the context expires
898 # immediately. Contexts expire automatically after 20 minutes if there
899 # are no matching queries.
900 },
901 ],
902 "webhookPayload": { # If the query was fulfilled by a webhook call, this field is set to the
903 # value of the `payload` field returned in the webhook response.
904 "a_key": "", # Properties of the object.
905 },
906 },
907 "responseId": "A String", # The unique identifier of the response. It can be used to
908 # locate a response in the training example set or for reporting issues.
909 }</pre>
910</div>
911
912</body></html>