blob: b423b10e488502db0afaa02681307102117bc018 [file] [log] [blame]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1p1beta1.html">Cloud Vision API</a> . <a href="vision_v1p1beta1.files.html">files</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070078 <code><a href="#annotate">annotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070079<p class="firstline">Service that performs image detection and annotation for a batch of files.</p>
80<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070081 <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070082<p class="firstline">Run asynchronous image detection and annotation for a list of generic</p>
83<h3>Method Details</h3>
84<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -070085 <code class="details" id="annotate">annotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070086 <pre>Service that performs image detection and annotation for a batch of files.
87Now only "application/pdf", "image/tiff" and "image/gif" are supported.
88
89This service will extract at most 5 (customers can specify which 5 in
90AnnotateFileRequest.pages) frames (gif) or pages (pdf or tiff) from each
91file provided and perform detection and annotation for each image
92extracted.
93
94Args:
Dan O'Mearadd494642020-05-01 07:42:23 -070095 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070096 The object takes the form of:
97
98{ # A list of requests to annotate files using the BatchAnnotateFiles API.
Dan O'Mearadd494642020-05-01 07:42:23 -070099 "requests": [ # Required. The list of file annotation requests. Right now we support only one
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700100 # AnnotateFileRequest in BatchAnnotateFilesRequest.
101 { # A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
102 "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
103 "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
104 "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
105 # of doubles representing degrees latitude and degrees longitude. Unless
106 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -0700107 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
108 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700109 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
110 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
111 },
112 "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
113 # of doubles representing degrees latitude and degrees longitude. Unless
114 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -0700115 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
116 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700117 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
118 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
119 },
120 },
121 "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
122 # yields the best results since it enables automatic language detection. For
123 # languages based on the Latin alphabet, setting `language_hints` is not
124 # needed. In rare cases, when the language of the text in the image is known,
125 # setting a hint will help get better results (although it will be a
126 # significant hindrance if the hint is wrong). Text detection returns an
127 # error if one or more of the specified languages is not one of the
Dan O'Mearadd494642020-05-01 07:42:23 -0700128 # [supported languages](https://cloud.google.com/vision/docs/languages).
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700129 "A String",
130 ],
131 "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
132 "filter": "A String", # The filtering expression. This can be used to restrict search results based
133 # on Product labels. We currently support an AND of OR of key-value
134 # expressions, where each expression within an OR must have the same key. An
135 # '=' should be used to connect the key and value.
136 #
137 # For example, "(color = red OR color = blue) AND brand = Google" is
138 # acceptable, but "(color = red OR brand = Google)" is not acceptable.
139 # "color: red" is not acceptable because it uses a ':' instead of an '='.
140 "productCategories": [ # The list of product categories to search in. Currently, we only consider
Dan O'Mearadd494642020-05-01 07:42:23 -0700141 # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
142 # "packagedgoods-v1", or "general-v1" should be specified. The legacy
143 # categories "homegoods", "apparel", and "toys" are still supported but will
144 # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
145 # or "toys-v2" for better product search accuracy. It is recommended to
146 # migrate existing products to these categories as well.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700147 "A String",
148 ],
149 "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
150 #
151 # Format is:
152 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
153 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
Dan O'Mearadd494642020-05-01 07:42:23 -0700154 # If it is not specified, system discretion will be applied.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700155 "normalizedVertices": [ # The bounding polygon normalized vertices.
156 { # A vertex represents a 2D point in the image.
157 # NOTE: the normalized vertex coordinates are relative to the original image
158 # and range from 0 to 1.
159 "y": 3.14, # Y coordinate.
160 "x": 3.14, # X coordinate.
161 },
162 ],
163 "vertices": [ # The bounding polygon vertices.
164 { # A vertex represents a 2D point in the image.
165 # NOTE: the vertex coordinates are in the same scale as the original image.
166 "y": 42, # Y coordinate.
167 "x": 42, # X coordinate.
168 },
169 ],
170 },
171 },
172 "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
173 "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
174 # of the image. For example, if the desired aspect ratio is 4/3, the
175 # corresponding float value should be 1.33333. If not specified, the
176 # best possible crop is returned. The number of provided aspect ratios is
177 # limited to a maximum of 16; any aspect ratios provided after the 16th are
178 # ignored.
179 3.14,
180 ],
181 },
182 "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
183 "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
184 },
185 },
186 "pages": [ # Pages of the file to perform image annotation.
187 #
188 # Pages starts from 1, we assume the first page of the file is page 1.
189 # At most 5 pages are supported per request. Pages can be negative.
190 #
191 # Page 1 means the first page.
192 # Page 2 means the second page.
193 # Page -1 means the last page.
194 # Page -2 means the second to the last page.
195 #
196 # If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
197 #
198 # If this field is empty, by default the service performs image annotation
199 # for the first 5 pages of the file.
200 42,
201 ],
202 "inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700203 "content": "A String", # File content, represented as a stream of bytes.
204 # Note: As with all `bytes` fields, protobuffers use a pure binary
205 # representation, whereas JSON representations use base64.
206 #
207 # Currently, this field only works for BatchAnnotateFiles requests. It does
208 # not work for AsyncBatchAnnotateFiles requests.
Dan O'Mearadd494642020-05-01 07:42:23 -0700209 "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
210 # "image/gif" are supported. Wildcards are not supported.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700211 "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
212 "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
213 # Google Cloud Storage object. Wildcards are not currently supported.
214 },
215 },
216 "features": [ # Required. Requested features.
217 { # The type of Google Cloud Vision API detection to perform, and the maximum
218 # number of results to return for that type. Multiple `Feature` objects can
219 # be specified in the `features` list.
220 "model": "A String", # Model to use for the feature.
221 # Supported values: "builtin/stable" (the default if unset) and
222 # "builtin/latest".
223 "type": "A String", # The feature type.
224 "maxResults": 42, # Maximum number of results of this type. Does not apply to
225 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
226 },
227 ],
228 },
229 ],
Dan O'Mearadd494642020-05-01 07:42:23 -0700230 "parent": "A String", # Optional. Target project and location to make a call.
231 #
232 # Format: `projects/{project-id}/locations/{location-id}`.
233 #
234 # If no parent is specified, a region will be chosen automatically.
235 #
236 # Supported location-ids:
237 # `us`: USA country only,
238 # `asia`: East asia areas, like Japan, Taiwan,
239 # `eu`: The European Union.
240 #
241 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700242 }
243
244 x__xgafv: string, V1 error format.
245 Allowed values
246 1 - v1 error format
247 2 - v2 error format
248
249Returns:
250 An object of the form:
251
252 { # A list of file annotation responses.
253 "responses": [ # The list of file annotation responses, each response corresponding to each
254 # AnnotateFileRequest in BatchAnnotateFilesRequest.
255 { # Response to a single file annotation request. A file may contain one or more
256 # images, which individually have their own responses.
257 "totalPages": 42, # This field gives the total number of pages in the file.
Dan O'Mearadd494642020-05-01 07:42:23 -0700258 "inputConfig": { # The desired input location and metadata. # Information about the file for which this response is generated.
259 "content": "A String", # File content, represented as a stream of bytes.
260 # Note: As with all `bytes` fields, protobuffers use a pure binary
261 # representation, whereas JSON representations use base64.
262 #
263 # Currently, this field only works for BatchAnnotateFiles requests. It does
264 # not work for AsyncBatchAnnotateFiles requests.
265 "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
266 # "image/gif" are supported. Wildcards are not supported.
267 "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
268 "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
269 # Google Cloud Storage object. Wildcards are not currently supported.
270 },
271 },
272 "responses": [ # Individual responses to images found within the file. This field will be
273 # empty if the `error` field is set.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700274 { # Response to an image annotation request.
275 "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
276 # methods over safe-search verticals (for example, adult, spoof, medical,
277 # violence).
278 "medical": "A String", # Likelihood that this is a medical image.
Dan O'Mearadd494642020-05-01 07:42:23 -0700279 "violence": "A String", # Likelihood that this image contains violent content.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700280 "spoof": "A String", # Spoof likelihood. The likelihood that an modification
281 # was made to the image's canonical version to make it appear
282 # funny or offensive.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700283 "adult": "A String", # Represents the adult content likelihood for the image. Adult content may
284 # contain elements such as nudity, pornographic images or cartoons, or
285 # sexual activities.
286 "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
287 # include (but is not limited to) skimpy or sheer clothing, strategically
288 # covered nudity, lewd or provocative poses, or close-ups of sensitive
289 # body areas.
290 },
291 "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
292 { # Set of detected entity features.
293 "confidence": 3.14, # **Deprecated. Use `score` instead.**
294 # The accuracy of the entity detection in an image.
295 # For example, for an image in which the "Eiffel Tower" entity is detected,
296 # this field represents the confidence that there is a tower in the query
297 # image. Range [0, 1].
298 "description": "A String", # Entity textual description, expressed in its `locale` language.
299 "locale": "A String", # The language code for the locale in which the entity textual
300 # `description` is expressed.
301 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
302 # image. For example, the relevancy of "tower" is likely higher to an image
303 # containing the detected "Eiffel Tower" than to an image containing a
304 # detected distant towering building, even though the confidence that
305 # there is a tower in each image may be the same. Range [0, 1].
306 "mid": "A String", # Opaque entity ID. Some IDs may be available in
307 # [Google Knowledge Graph Search
308 # API](https://developers.google.com/knowledge-graph/).
309 "locations": [ # The location information for the detected entity. Multiple
310 # `LocationInfo` elements can be present because one location may
311 # indicate the location of the scene in the image, and another location
312 # may indicate the location of the place where the image was taken.
313 # Location information is usually present for landmarks.
314 { # Detected entity location information.
315 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
316 # of doubles representing degrees latitude and degrees longitude. Unless
317 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -0700318 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
319 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700320 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
321 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
322 },
323 },
324 ],
325 "score": 3.14, # Overall score of the result. Range [0, 1].
326 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
327 # for `LABEL_DETECTION` features.
328 "normalizedVertices": [ # The bounding polygon normalized vertices.
329 { # A vertex represents a 2D point in the image.
330 # NOTE: the normalized vertex coordinates are relative to the original image
331 # and range from 0 to 1.
332 "y": 3.14, # Y coordinate.
333 "x": 3.14, # X coordinate.
334 },
335 ],
336 "vertices": [ # The bounding polygon vertices.
337 { # A vertex represents a 2D point in the image.
338 # NOTE: the vertex coordinates are in the same scale as the original image.
339 "y": 42, # Y coordinate.
340 "x": 42, # X coordinate.
341 },
342 ],
343 },
344 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
345 # fields, such a score or string that qualifies the entity.
346 { # A `Property` consists of a user-supplied name/value pair.
347 "uint64Value": "A String", # Value of numeric properties.
348 "name": "A String", # Name of the property.
349 "value": "A String", # Value of the property.
350 },
351 ],
352 },
353 ],
354 "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
355 "fullMatchingImages": [ # Fully matching images from the Internet.
356 # Can include resized copies of the query image.
357 { # Metadata for online images.
358 "url": "A String", # The result image URL.
359 "score": 3.14, # (Deprecated) Overall relevancy score for the image.
360 },
361 ],
362 "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
363 { # Metadata for web pages.
364 "url": "A String", # The result web page URL.
365 "pageTitle": "A String", # Title for the web page, may contain HTML markups.
366 "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
367 "partialMatchingImages": [ # Partial matching images on the page.
368 # Those images are similar enough to share some key-point features. For
369 # example an original image will likely have partial matching for its
370 # crops.
371 { # Metadata for online images.
372 "url": "A String", # The result image URL.
373 "score": 3.14, # (Deprecated) Overall relevancy score for the image.
374 },
375 ],
376 "fullMatchingImages": [ # Fully matching images on the page.
377 # Can include resized copies of the query image.
378 { # Metadata for online images.
379 "url": "A String", # The result image URL.
380 "score": 3.14, # (Deprecated) Overall relevancy score for the image.
381 },
382 ],
383 },
384 ],
385 "visuallySimilarImages": [ # The visually similar image results.
386 { # Metadata for online images.
387 "url": "A String", # The result image URL.
388 "score": 3.14, # (Deprecated) Overall relevancy score for the image.
389 },
390 ],
391 "partialMatchingImages": [ # Partial matching images from the Internet.
392 # Those images are similar enough to share some key-point features. For
393 # example an original image will likely have partial matching for its crops.
394 { # Metadata for online images.
395 "url": "A String", # The result image URL.
396 "score": 3.14, # (Deprecated) Overall relevancy score for the image.
397 },
398 ],
399 "webEntities": [ # Deduced entities from similar images on the Internet.
400 { # Entity deduced from similar images on the Internet.
401 "entityId": "A String", # Opaque entity ID.
402 "score": 3.14, # Overall relevancy score for the entity.
403 # Not normalized and not comparable across different image queries.
404 "description": "A String", # Canonical description of the entity, in English.
405 },
406 ],
407 "bestGuessLabels": [ # The service's best guess as to the topic of the request image.
408 # Inferred from similar images on the open web.
409 { # Label to provide extra metadata for the web detection.
410 "languageCode": "A String", # The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
411 # For more information, see
412 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
413 "label": "A String", # Label for extra metadata.
414 },
415 ],
416 },
417 "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
418 # This will be sorted descending by confidence score.
419 { # Set of detected objects with bounding boxes.
420 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
421 # information, see
422 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
423 "score": 3.14, # Score of the result. Range [0, 1].
424 "mid": "A String", # Object ID that should align with EntityAnnotation mid.
425 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
426 "normalizedVertices": [ # The bounding polygon normalized vertices.
427 { # A vertex represents a 2D point in the image.
428 # NOTE: the normalized vertex coordinates are relative to the original image
429 # and range from 0 to 1.
430 "y": 3.14, # Y coordinate.
431 "x": 3.14, # X coordinate.
432 },
433 ],
434 "vertices": [ # The bounding polygon vertices.
435 { # A vertex represents a 2D point in the image.
436 # NOTE: the vertex coordinates are in the same scale as the original image.
437 "y": 42, # Y coordinate.
438 "x": 42, # X coordinate.
439 },
440 ],
441 },
442 "name": "A String", # Object name, expressed in its `language_code` language.
443 },
444 ],
445 "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
446 # completed successfully.
447 # This annotation provides the structural hierarchy for the OCR detected
448 # text.
449 # The hierarchy of an OCR extracted text structure is like this:
Dan O'Mearadd494642020-05-01 07:42:23 -0700450 # TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700451 # Each structural component, starting from Page, may further have their own
452 # properties. Properties describe detected languages, breaks etc.. Please refer
453 # to the TextAnnotation.TextProperty message definition below for more
454 # detail.
455 "text": "A String", # UTF-8 text detected on the pages.
456 "pages": [ # List of pages detected by OCR.
457 { # Detected page from OCR.
458 "width": 42, # Page width. For PDFs the unit is points. For images (including
459 # TIFFs) the unit is pixels.
460 "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
461 "property": { # Additional information detected on the structural component. # Additional information detected on the page.
462 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
463 "isPrefix": True or False, # True if break prepends the element.
464 "type": "A String", # Detected break type.
465 },
466 "detectedLanguages": [ # A list of detected languages together with confidence.
467 { # Detected language for a structural component.
468 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
469 # information, see
470 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
471 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
472 },
473 ],
474 },
475 "blocks": [ # List of blocks of text, images etc on this page.
476 { # Logical element on the page.
477 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the block.
478 # The vertices are in the order of top-left, top-right, bottom-right,
479 # bottom-left. When a rotation of the bounding box is detected the rotation
480 # is represented as around the top-left corner as defined when the text is
481 # read in the 'natural' orientation.
482 # For example:
483 #
484 # * when the text is horizontal it might look like:
485 #
486 # 0----1
487 # | |
488 # 3----2
489 #
490 # * when it's rotated 180 degrees around the top-left corner it becomes:
491 #
492 # 2----3
493 # | |
494 # 1----0
495 #
496 # and the vertex order will still be (0, 1, 2, 3).
497 "normalizedVertices": [ # The bounding polygon normalized vertices.
498 { # A vertex represents a 2D point in the image.
499 # NOTE: the normalized vertex coordinates are relative to the original image
500 # and range from 0 to 1.
501 "y": 3.14, # Y coordinate.
502 "x": 3.14, # X coordinate.
503 },
504 ],
505 "vertices": [ # The bounding polygon vertices.
506 { # A vertex represents a 2D point in the image.
507 # NOTE: the vertex coordinates are in the same scale as the original image.
508 "y": 42, # Y coordinate.
509 "x": 42, # X coordinate.
510 },
511 ],
512 },
513 "blockType": "A String", # Detected block type (text, image etc) for this block.
514 "property": { # Additional information detected on the structural component. # Additional information detected for the block.
515 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
516 "isPrefix": True or False, # True if break prepends the element.
517 "type": "A String", # Detected break type.
518 },
519 "detectedLanguages": [ # A list of detected languages together with confidence.
520 { # Detected language for a structural component.
521 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
522 # information, see
523 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
524 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
525 },
526 ],
527 },
528 "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
529 "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
530 { # Structural unit of text representing a number of words in certain order.
531 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
532 # The vertices are in the order of top-left, top-right, bottom-right,
533 # bottom-left. When a rotation of the bounding box is detected the rotation
534 # is represented as around the top-left corner as defined when the text is
535 # read in the 'natural' orientation.
536 # For example:
537 # * when the text is horizontal it might look like:
538 # 0----1
539 # | |
540 # 3----2
541 # * when it's rotated 180 degrees around the top-left corner it becomes:
542 # 2----3
543 # | |
544 # 1----0
545 # and the vertex order will still be (0, 1, 2, 3).
546 "normalizedVertices": [ # The bounding polygon normalized vertices.
547 { # A vertex represents a 2D point in the image.
548 # NOTE: the normalized vertex coordinates are relative to the original image
549 # and range from 0 to 1.
550 "y": 3.14, # Y coordinate.
551 "x": 3.14, # X coordinate.
552 },
553 ],
554 "vertices": [ # The bounding polygon vertices.
555 { # A vertex represents a 2D point in the image.
556 # NOTE: the vertex coordinates are in the same scale as the original image.
557 "y": 42, # Y coordinate.
558 "x": 42, # X coordinate.
559 },
560 ],
561 },
562 "confidence": 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
563 "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
564 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
565 "isPrefix": True or False, # True if break prepends the element.
566 "type": "A String", # Detected break type.
567 },
568 "detectedLanguages": [ # A list of detected languages together with confidence.
569 { # Detected language for a structural component.
570 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
571 # information, see
572 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
573 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
574 },
575 ],
576 },
Dan O'Mearadd494642020-05-01 07:42:23 -0700577 "words": [ # List of all words in this paragraph.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700578 { # A word representation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700579 "symbols": [ # List of symbols in the word.
580 # The order of the symbols follows the natural reading order.
581 { # A single symbol representation.
582 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
583 # The vertices are in the order of top-left, top-right, bottom-right,
584 # bottom-left. When a rotation of the bounding box is detected the rotation
585 # is represented as around the top-left corner as defined when the text is
586 # read in the 'natural' orientation.
587 # For example:
588 # * when the text is horizontal it might look like:
589 # 0----1
590 # | |
591 # 3----2
592 # * when it's rotated 180 degrees around the top-left corner it becomes:
593 # 2----3
594 # | |
595 # 1----0
Dan O'Mearadd494642020-05-01 07:42:23 -0700596 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700597 "normalizedVertices": [ # The bounding polygon normalized vertices.
598 { # A vertex represents a 2D point in the image.
599 # NOTE: the normalized vertex coordinates are relative to the original image
600 # and range from 0 to 1.
601 "y": 3.14, # Y coordinate.
602 "x": 3.14, # X coordinate.
603 },
604 ],
605 "vertices": [ # The bounding polygon vertices.
606 { # A vertex represents a 2D point in the image.
607 # NOTE: the vertex coordinates are in the same scale as the original image.
608 "y": 42, # Y coordinate.
609 "x": 42, # X coordinate.
610 },
611 ],
612 },
613 "text": "A String", # The actual UTF-8 representation of the symbol.
614 "confidence": 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
615 "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
616 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
617 "isPrefix": True or False, # True if break prepends the element.
618 "type": "A String", # Detected break type.
619 },
620 "detectedLanguages": [ # A list of detected languages together with confidence.
621 { # Detected language for a structural component.
622 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
623 # information, see
624 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
625 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
626 },
627 ],
628 },
629 },
630 ],
Dan O'Mearadd494642020-05-01 07:42:23 -0700631 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
632 # The vertices are in the order of top-left, top-right, bottom-right,
633 # bottom-left. When a rotation of the bounding box is detected the rotation
634 # is represented as around the top-left corner as defined when the text is
635 # read in the 'natural' orientation.
636 # For example:
637 # * when the text is horizontal it might look like:
638 # 0----1
639 # | |
640 # 3----2
641 # * when it's rotated 180 degrees around the top-left corner it becomes:
642 # 2----3
643 # | |
644 # 1----0
645 # and the vertex order will still be (0, 1, 2, 3).
646 "normalizedVertices": [ # The bounding polygon normalized vertices.
647 { # A vertex represents a 2D point in the image.
648 # NOTE: the normalized vertex coordinates are relative to the original image
649 # and range from 0 to 1.
650 "y": 3.14, # Y coordinate.
651 "x": 3.14, # X coordinate.
652 },
653 ],
654 "vertices": [ # The bounding polygon vertices.
655 { # A vertex represents a 2D point in the image.
656 # NOTE: the vertex coordinates are in the same scale as the original image.
657 "y": 42, # Y coordinate.
658 "x": 42, # X coordinate.
659 },
660 ],
661 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700662 "confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
663 "property": { # Additional information detected on the structural component. # Additional information detected for the word.
664 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
665 "isPrefix": True or False, # True if break prepends the element.
666 "type": "A String", # Detected break type.
667 },
668 "detectedLanguages": [ # A list of detected languages together with confidence.
669 { # Detected language for a structural component.
670 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
671 # information, see
672 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
673 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
674 },
675 ],
676 },
677 },
678 ],
679 },
680 ],
681 },
682 ],
683 "height": 42, # Page height. For PDFs the unit is points. For images (including
684 # TIFFs) the unit is pixels.
685 },
686 ],
687 },
688 "labelAnnotations": [ # If present, label detection has completed successfully.
689 { # Set of detected entity features.
690 "confidence": 3.14, # **Deprecated. Use `score` instead.**
691 # The accuracy of the entity detection in an image.
692 # For example, for an image in which the "Eiffel Tower" entity is detected,
693 # this field represents the confidence that there is a tower in the query
694 # image. Range [0, 1].
695 "description": "A String", # Entity textual description, expressed in its `locale` language.
696 "locale": "A String", # The language code for the locale in which the entity textual
697 # `description` is expressed.
698 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
699 # image. For example, the relevancy of "tower" is likely higher to an image
700 # containing the detected "Eiffel Tower" than to an image containing a
701 # detected distant towering building, even though the confidence that
702 # there is a tower in each image may be the same. Range [0, 1].
703 "mid": "A String", # Opaque entity ID. Some IDs may be available in
704 # [Google Knowledge Graph Search
705 # API](https://developers.google.com/knowledge-graph/).
706 "locations": [ # The location information for the detected entity. Multiple
707 # `LocationInfo` elements can be present because one location may
708 # indicate the location of the scene in the image, and another location
709 # may indicate the location of the place where the image was taken.
710 # Location information is usually present for landmarks.
711 { # Detected entity location information.
712 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
713 # of doubles representing degrees latitude and degrees longitude. Unless
714 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -0700715 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
716 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700717 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
718 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
719 },
720 },
721 ],
722 "score": 3.14, # Overall score of the result. Range [0, 1].
723 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
724 # for `LABEL_DETECTION` features.
725 "normalizedVertices": [ # The bounding polygon normalized vertices.
726 { # A vertex represents a 2D point in the image.
727 # NOTE: the normalized vertex coordinates are relative to the original image
728 # and range from 0 to 1.
729 "y": 3.14, # Y coordinate.
730 "x": 3.14, # X coordinate.
731 },
732 ],
733 "vertices": [ # The bounding polygon vertices.
734 { # A vertex represents a 2D point in the image.
735 # NOTE: the vertex coordinates are in the same scale as the original image.
736 "y": 42, # Y coordinate.
737 "x": 42, # X coordinate.
738 },
739 ],
740 },
741 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
742 # fields, such a score or string that qualifies the entity.
743 { # A `Property` consists of a user-supplied name/value pair.
744 "uint64Value": "A String", # Value of numeric properties.
745 "name": "A String", # Name of the property.
746 "value": "A String", # Value of the property.
747 },
748 ],
749 },
750 ],
751 "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
752 "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
753 "colors": [ # RGB color values with their score and pixel fraction.
754 { # Color information consists of RGB channels, score, and the fraction of
755 # the image that the color occupies in the image.
756 "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
757 # for simplicity of conversion to/from color representations in various
758 # languages over compactness; for example, the fields of this representation
759 # can be trivially provided to the constructor of "java.awt.Color" in Java; it
760 # can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
761 # method in iOS; and, with just a little work, it can be easily formatted into
762 # a CSS "rgba()" string in JavaScript, as well.
763 #
764 # Note: this proto does not carry information about the absolute color space
765 # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
766 # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
767 # space.
768 #
769 # Example (Java):
770 #
771 # import com.google.type.Color;
772 #
773 # // ...
774 # public static java.awt.Color fromProto(Color protocolor) {
775 # float alpha = protocolor.hasAlpha()
776 # ? protocolor.getAlpha().getValue()
777 # : 1.0;
778 #
779 # return new java.awt.Color(
780 # protocolor.getRed(),
781 # protocolor.getGreen(),
782 # protocolor.getBlue(),
783 # alpha);
784 # }
785 #
786 # public static Color toProto(java.awt.Color color) {
787 # float red = (float) color.getRed();
788 # float green = (float) color.getGreen();
789 # float blue = (float) color.getBlue();
790 # float denominator = 255.0;
791 # Color.Builder resultBuilder =
792 # Color
793 # .newBuilder()
794 # .setRed(red / denominator)
795 # .setGreen(green / denominator)
796 # .setBlue(blue / denominator);
797 # int alpha = color.getAlpha();
798 # if (alpha != 255) {
799 # result.setAlpha(
800 # FloatValue
801 # .newBuilder()
802 # .setValue(((float) alpha) / denominator)
803 # .build());
804 # }
805 # return resultBuilder.build();
806 # }
807 # // ...
808 #
809 # Example (iOS / Obj-C):
810 #
811 # // ...
812 # static UIColor* fromProto(Color* protocolor) {
813 # float red = [protocolor red];
814 # float green = [protocolor green];
815 # float blue = [protocolor blue];
816 # FloatValue* alpha_wrapper = [protocolor alpha];
817 # float alpha = 1.0;
818 # if (alpha_wrapper != nil) {
819 # alpha = [alpha_wrapper value];
820 # }
821 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
822 # }
823 #
824 # static Color* toProto(UIColor* color) {
825 # CGFloat red, green, blue, alpha;
Dan O'Mearadd494642020-05-01 07:42:23 -0700826 # if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700827 # return nil;
828 # }
829 # Color* result = [[Color alloc] init];
830 # [result setRed:red];
831 # [result setGreen:green];
832 # [result setBlue:blue];
Dan O'Mearadd494642020-05-01 07:42:23 -0700833 # if (alpha &lt;= 0.9999) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700834 # [result setAlpha:floatWrapperWithValue(alpha)];
835 # }
836 # [result autorelease];
837 # return result;
838 # }
839 # // ...
840 #
841 # Example (JavaScript):
842 #
843 # // ...
844 #
845 # var protoToCssColor = function(rgb_color) {
846 # var redFrac = rgb_color.red || 0.0;
847 # var greenFrac = rgb_color.green || 0.0;
848 # var blueFrac = rgb_color.blue || 0.0;
849 # var red = Math.floor(redFrac * 255);
850 # var green = Math.floor(greenFrac * 255);
851 # var blue = Math.floor(blueFrac * 255);
852 #
853 # if (!('alpha' in rgb_color)) {
854 # return rgbToCssColor_(red, green, blue);
855 # }
856 #
857 # var alphaFrac = rgb_color.alpha.value || 0.0;
858 # var rgbParams = [red, green, blue].join(',');
859 # return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
860 # };
861 #
862 # var rgbToCssColor_ = function(red, green, blue) {
Dan O'Mearadd494642020-05-01 07:42:23 -0700863 # var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700864 # var hexString = rgbNumber.toString(16);
865 # var missingZeros = 6 - hexString.length;
866 # var resultBuilder = ['#'];
Dan O'Mearadd494642020-05-01 07:42:23 -0700867 # for (var i = 0; i &lt; missingZeros; i++) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700868 # resultBuilder.push('0');
869 # }
870 # resultBuilder.push(hexString);
871 # return resultBuilder.join('');
872 # };
873 #
874 # // ...
875 "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
876 "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
877 # the final pixel color is defined by the equation:
878 #
879 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
880 #
881 # This means that a value of 1.0 corresponds to a solid color, whereas
882 # a value of 0.0 corresponds to a completely transparent color. This
883 # uses a wrapper message rather than a simple float scalar so that it is
884 # possible to distinguish between a default value and the value being unset.
885 # If omitted, this color object is to be rendered as a solid color
886 # (as if the alpha value had been explicitly given with a value of 1.0).
887 "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
888 "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
889 },
890 "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
891 # Value in range [0, 1].
892 "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
893 },
894 ],
895 },
896 },
897 "faceAnnotations": [ # If present, face detection has completed successfully.
898 { # A face annotation object contains the results of face detection.
899 "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
900 # pointing relative to the vertical plane perpendicular to the image. Range
901 # [-180,180].
902 "sorrowLikelihood": "A String", # Sorrow likelihood.
903 "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
904 "underExposedLikelihood": "A String", # Under-exposed likelihood.
905 "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
906 "joyLikelihood": "A String", # Joy likelihood.
907 "landmarks": [ # Detected face landmarks.
908 { # A face-specific landmark (for example, a face feature).
909 "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
910 # A valid Position must have both x and y coordinates.
911 # The position coordinates are in the same scale as the original image.
912 "y": 3.14, # Y coordinate.
913 "x": 3.14, # X coordinate.
914 "z": 3.14, # Z coordinate (or depth).
915 },
916 "type": "A String", # Face landmark type.
917 },
918 ],
919 "surpriseLikelihood": "A String", # Surprise likelihood.
920 "blurredLikelihood": "A String", # Blurred likelihood.
921 "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
922 # pointing relative to the image's horizontal plane. Range [-180,180].
923 "angerLikelihood": "A String", # Anger likelihood.
924 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
925 # are in the original image's scale.
926 # The bounding box is computed to "frame" the face in accordance with human
927 # expectations. It is based on the landmarker results.
928 # Note that one or more x and/or y coordinates may not be generated in the
929 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
930 # appears in the image to be annotated.
931 "normalizedVertices": [ # The bounding polygon normalized vertices.
932 { # A vertex represents a 2D point in the image.
933 # NOTE: the normalized vertex coordinates are relative to the original image
934 # and range from 0 to 1.
935 "y": 3.14, # Y coordinate.
936 "x": 3.14, # X coordinate.
937 },
938 ],
939 "vertices": [ # The bounding polygon vertices.
940 { # A vertex represents a 2D point in the image.
941 # NOTE: the vertex coordinates are in the same scale as the original image.
942 "y": 42, # Y coordinate.
943 "x": 42, # X coordinate.
944 },
945 ],
946 },
947 "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
948 # of the face relative to the image vertical about the axis perpendicular to
949 # the face. Range [-180,180].
950 "headwearLikelihood": "A String", # Headwear likelihood.
951 "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
952 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
953 # is used to eliminate the face from any image analysis that detects the
954 # "amount of skin" visible in an image. It is not based on the
955 # landmarker results, only on the initial face detection, hence
Dan O'Mearadd494642020-05-01 07:42:23 -0700956 # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700957 "normalizedVertices": [ # The bounding polygon normalized vertices.
958 { # A vertex represents a 2D point in the image.
959 # NOTE: the normalized vertex coordinates are relative to the original image
960 # and range from 0 to 1.
961 "y": 3.14, # Y coordinate.
962 "x": 3.14, # X coordinate.
963 },
964 ],
965 "vertices": [ # The bounding polygon vertices.
966 { # A vertex represents a 2D point in the image.
967 # NOTE: the vertex coordinates are in the same scale as the original image.
968 "y": 42, # Y coordinate.
969 "x": 42, # X coordinate.
970 },
971 ],
972 },
973 },
974 ],
975 "productSearchResults": { # Results for a product search request. # If present, product search has completed successfully.
976 "productGroupedResults": [ # List of results grouped by products detected in the query image. Each entry
977 # corresponds to one bounding polygon in the query image, and contains the
978 # matching products specific to that region. There may be duplicate product
979 # matches in the union of all the per-product results.
980 { # Information about the products similar to a single product in a query
981 # image.
Dan O'Mearadd494642020-05-01 07:42:23 -0700982 "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
983 { # Prediction for what the object in the bounding box is.
984 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
985 # information, see
986 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
987 "score": 3.14, # Score of the result. Range [0, 1].
988 "mid": "A String", # Object ID that should align with EntityAnnotation mid.
989 "name": "A String", # Object name, expressed in its `language_code` language.
990 },
991 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700992 "results": [ # List of results, one for each product match.
993 { # Information about a product.
994 "image": "A String", # The resource name of the image from the product that is the closest match
995 # to the query.
996 "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
997 # 1 (full confidence).
998 "product": { # A Product contains ReferenceImages. # The Product.
999 "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
1000 # constraints can be specified based on the product_labels.
1001 #
1002 # Note that integer values can be provided as strings, e.g. "1199". Only
1003 # strings with integer values can match a range-based restriction which is
1004 # to be supported soon.
1005 #
1006 # Multiple values can be assigned to the same key. One product may have up to
Dan O'Mearadd494642020-05-01 07:42:23 -07001007 # 500 product_labels.
1008 #
1009 # Notice that the total number of distinct product_labels over all products
1010 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
1011 # will refuse to work for that ProductSet.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001012 { # A product label represented as a key-value pair.
1013 "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
1014 # exceed 128 bytes.
1015 "value": "A String", # The value of the label attached to the product. Cannot be empty and
1016 # cannot exceed 128 bytes.
1017 },
1018 ],
1019 "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
1020 # 4096 characters long.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001021 "name": "A String", # The resource name of the product.
1022 #
1023 # Format is:
1024 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
1025 #
1026 # This field is ignored when creating a product.
Dan O'Mearadd494642020-05-01 07:42:23 -07001027 "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
1028 # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
1029 # "homegoods", "apparel", and "toys" are still supported, but these should
1030 # not be used for new products.
1031 "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
1032 # characters long.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001033 },
1034 },
1035 ],
1036 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
1037 "normalizedVertices": [ # The bounding polygon normalized vertices.
1038 { # A vertex represents a 2D point in the image.
1039 # NOTE: the normalized vertex coordinates are relative to the original image
1040 # and range from 0 to 1.
1041 "y": 3.14, # Y coordinate.
1042 "x": 3.14, # X coordinate.
1043 },
1044 ],
1045 "vertices": [ # The bounding polygon vertices.
1046 { # A vertex represents a 2D point in the image.
1047 # NOTE: the vertex coordinates are in the same scale as the original image.
1048 "y": 42, # Y coordinate.
1049 "x": 42, # X coordinate.
1050 },
1051 ],
1052 },
1053 },
1054 ],
1055 "results": [ # List of results, one for each product match.
1056 { # Information about a product.
1057 "image": "A String", # The resource name of the image from the product that is the closest match
1058 # to the query.
1059 "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
1060 # 1 (full confidence).
1061 "product": { # A Product contains ReferenceImages. # The Product.
1062 "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
1063 # constraints can be specified based on the product_labels.
1064 #
1065 # Note that integer values can be provided as strings, e.g. "1199". Only
1066 # strings with integer values can match a range-based restriction which is
1067 # to be supported soon.
1068 #
1069 # Multiple values can be assigned to the same key. One product may have up to
Dan O'Mearadd494642020-05-01 07:42:23 -07001070 # 500 product_labels.
1071 #
1072 # Notice that the total number of distinct product_labels over all products
1073 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
1074 # will refuse to work for that ProductSet.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001075 { # A product label represented as a key-value pair.
1076 "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
1077 # exceed 128 bytes.
1078 "value": "A String", # The value of the label attached to the product. Cannot be empty and
1079 # cannot exceed 128 bytes.
1080 },
1081 ],
1082 "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
1083 # 4096 characters long.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001084 "name": "A String", # The resource name of the product.
1085 #
1086 # Format is:
1087 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
1088 #
1089 # This field is ignored when creating a product.
Dan O'Mearadd494642020-05-01 07:42:23 -07001090 "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
1091 # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
1092 # "homegoods", "apparel", and "toys" are still supported, but these should
1093 # not be used for new products.
1094 "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
1095 # characters long.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001096 },
1097 },
1098 ],
1099 "indexTime": "A String", # Timestamp of the index which provided these results. Products added to the
1100 # product set and products removed from the product set after this time are
1101 # not reflected in the current results.
1102 },
1103 "logoAnnotations": [ # If present, logo detection has completed successfully.
1104 { # Set of detected entity features.
1105 "confidence": 3.14, # **Deprecated. Use `score` instead.**
1106 # The accuracy of the entity detection in an image.
1107 # For example, for an image in which the "Eiffel Tower" entity is detected,
1108 # this field represents the confidence that there is a tower in the query
1109 # image. Range [0, 1].
1110 "description": "A String", # Entity textual description, expressed in its `locale` language.
1111 "locale": "A String", # The language code for the locale in which the entity textual
1112 # `description` is expressed.
1113 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1114 # image. For example, the relevancy of "tower" is likely higher to an image
1115 # containing the detected "Eiffel Tower" than to an image containing a
1116 # detected distant towering building, even though the confidence that
1117 # there is a tower in each image may be the same. Range [0, 1].
1118 "mid": "A String", # Opaque entity ID. Some IDs may be available in
1119 # [Google Knowledge Graph Search
1120 # API](https://developers.google.com/knowledge-graph/).
1121 "locations": [ # The location information for the detected entity. Multiple
1122 # `LocationInfo` elements can be present because one location may
1123 # indicate the location of the scene in the image, and another location
1124 # may indicate the location of the place where the image was taken.
1125 # Location information is usually present for landmarks.
1126 { # Detected entity location information.
1127 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1128 # of doubles representing degrees latitude and degrees longitude. Unless
1129 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -07001130 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
1131 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001132 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1133 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1134 },
1135 },
1136 ],
1137 "score": 3.14, # Overall score of the result. Range [0, 1].
1138 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1139 # for `LABEL_DETECTION` features.
1140 "normalizedVertices": [ # The bounding polygon normalized vertices.
1141 { # A vertex represents a 2D point in the image.
1142 # NOTE: the normalized vertex coordinates are relative to the original image
1143 # and range from 0 to 1.
1144 "y": 3.14, # Y coordinate.
1145 "x": 3.14, # X coordinate.
1146 },
1147 ],
1148 "vertices": [ # The bounding polygon vertices.
1149 { # A vertex represents a 2D point in the image.
1150 # NOTE: the vertex coordinates are in the same scale as the original image.
1151 "y": 42, # Y coordinate.
1152 "x": 42, # X coordinate.
1153 },
1154 ],
1155 },
1156 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
1157 # fields, such a score or string that qualifies the entity.
1158 { # A `Property` consists of a user-supplied name/value pair.
1159 "uint64Value": "A String", # Value of numeric properties.
1160 "name": "A String", # Name of the property.
1161 "value": "A String", # Value of the property.
1162 },
1163 ],
1164 },
1165 ],
1166 "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
1167 { # Set of detected entity features.
1168 "confidence": 3.14, # **Deprecated. Use `score` instead.**
1169 # The accuracy of the entity detection in an image.
1170 # For example, for an image in which the "Eiffel Tower" entity is detected,
1171 # this field represents the confidence that there is a tower in the query
1172 # image. Range [0, 1].
1173 "description": "A String", # Entity textual description, expressed in its `locale` language.
1174 "locale": "A String", # The language code for the locale in which the entity textual
1175 # `description` is expressed.
1176 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1177 # image. For example, the relevancy of "tower" is likely higher to an image
1178 # containing the detected "Eiffel Tower" than to an image containing a
1179 # detected distant towering building, even though the confidence that
1180 # there is a tower in each image may be the same. Range [0, 1].
1181 "mid": "A String", # Opaque entity ID. Some IDs may be available in
1182 # [Google Knowledge Graph Search
1183 # API](https://developers.google.com/knowledge-graph/).
1184 "locations": [ # The location information for the detected entity. Multiple
1185 # `LocationInfo` elements can be present because one location may
1186 # indicate the location of the scene in the image, and another location
1187 # may indicate the location of the place where the image was taken.
1188 # Location information is usually present for landmarks.
1189 { # Detected entity location information.
1190 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1191 # of doubles representing degrees latitude and degrees longitude. Unless
1192 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -07001193 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
1194 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001195 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1196 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1197 },
1198 },
1199 ],
1200 "score": 3.14, # Overall score of the result. Range [0, 1].
1201 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1202 # for `LABEL_DETECTION` features.
1203 "normalizedVertices": [ # The bounding polygon normalized vertices.
1204 { # A vertex represents a 2D point in the image.
1205 # NOTE: the normalized vertex coordinates are relative to the original image
1206 # and range from 0 to 1.
1207 "y": 3.14, # Y coordinate.
1208 "x": 3.14, # X coordinate.
1209 },
1210 ],
1211 "vertices": [ # The bounding polygon vertices.
1212 { # A vertex represents a 2D point in the image.
1213 # NOTE: the vertex coordinates are in the same scale as the original image.
1214 "y": 42, # Y coordinate.
1215 "x": 42, # X coordinate.
1216 },
1217 ],
1218 },
1219 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
1220 # fields, such a score or string that qualifies the entity.
1221 { # A `Property` consists of a user-supplied name/value pair.
1222 "uint64Value": "A String", # Value of numeric properties.
1223 "name": "A String", # Name of the property.
1224 "value": "A String", # Value of the property.
1225 },
1226 ],
1227 },
1228 ],
1229 "context": { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
1230 # comes from.
1231 # information about the source of that image.
1232 "pageNumber": 42, # If the file was a PDF or TIFF, this field gives the page number within
1233 # the file used to produce the image.
1234 "uri": "A String", # The URI of the file used to produce the image.
1235 },
1236 "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
1237 # Note that filled-in image annotations are guaranteed to be
1238 # correct, even when `error` is set.
1239 # different programming environments, including REST APIs and RPC APIs. It is
1240 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1241 # three pieces of data: error code, error message, and error details.
1242 #
1243 # You can find out more about this error model and how to work with it in the
1244 # [API Design Guide](https://cloud.google.com/apis/design/errors).
1245 "message": "A String", # A developer-facing error message, which should be in English. Any
1246 # user-facing error message should be localized and sent in the
1247 # google.rpc.Status.details field, or localized by the client.
1248 "code": 42, # The status code, which should be an enum value of google.rpc.Code.
1249 "details": [ # A list of messages that carry the error details. There is a common set of
1250 # message types for APIs to use.
1251 {
1252 "a_key": "", # Properties of the object. Contains field @type with type URL.
1253 },
1254 ],
1255 },
1256 "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
1257 "cropHints": [ # Crop hint results.
1258 { # Single crop hint that is used to generate a new crop when serving an image.
1259 "confidence": 3.14, # Confidence of this being a salient region. Range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001260 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
1261 # box are in the original image's scale.
1262 "normalizedVertices": [ # The bounding polygon normalized vertices.
1263 { # A vertex represents a 2D point in the image.
1264 # NOTE: the normalized vertex coordinates are relative to the original image
1265 # and range from 0 to 1.
1266 "y": 3.14, # Y coordinate.
1267 "x": 3.14, # X coordinate.
1268 },
1269 ],
1270 "vertices": [ # The bounding polygon vertices.
1271 { # A vertex represents a 2D point in the image.
1272 # NOTE: the vertex coordinates are in the same scale as the original image.
1273 "y": 42, # Y coordinate.
1274 "x": 42, # X coordinate.
1275 },
1276 ],
1277 },
Dan O'Mearadd494642020-05-01 07:42:23 -07001278 "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
1279 # image.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001280 },
1281 ],
1282 },
1283 },
1284 ],
Dan O'Mearadd494642020-05-01 07:42:23 -07001285 "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
1286 # `responses` field will not be set in this case.
1287 # different programming environments, including REST APIs and RPC APIs. It is
1288 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1289 # three pieces of data: error code, error message, and error details.
1290 #
1291 # You can find out more about this error model and how to work with it in the
1292 # [API Design Guide](https://cloud.google.com/apis/design/errors).
1293 "message": "A String", # A developer-facing error message, which should be in English. Any
1294 # user-facing error message should be localized and sent in the
1295 # google.rpc.Status.details field, or localized by the client.
1296 "code": 42, # The status code, which should be an enum value of google.rpc.Code.
1297 "details": [ # A list of messages that carry the error details. There is a common set of
1298 # message types for APIs to use.
1299 {
1300 "a_key": "", # Properties of the object. Contains field @type with type URL.
1301 },
1302 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001303 },
1304 },
1305 ],
1306 }</pre>
1307</div>
1308
1309<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07001310 <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001311 <pre>Run asynchronous image detection and annotation for a list of generic
1312files, such as PDF files, which may contain multiple pages and multiple
1313images per page. Progress and results can be retrieved through the
1314`google.longrunning.Operations` interface.
1315`Operation.metadata` contains `OperationMetadata` (metadata).
1316`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
1317
1318Args:
Dan O'Mearadd494642020-05-01 07:42:23 -07001319 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001320 The object takes the form of:
1321
1322{ # Multiple async file annotation requests are batched into a single service
1323 # call.
Dan O'Mearadd494642020-05-01 07:42:23 -07001324 "requests": [ # Required. Individual async file annotation requests for this batch.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001325 { # An offline file annotation request.
1326 "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
1327 "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1328 "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1329 # of doubles representing degrees latitude and degrees longitude. Unless
1330 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -07001331 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
1332 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001333 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1334 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1335 },
1336 "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1337 # of doubles representing degrees latitude and degrees longitude. Unless
1338 # specified otherwise, this must conform to the
Dan O'Mearadd494642020-05-01 07:42:23 -07001339 # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
1340 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001341 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1342 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1343 },
1344 },
1345 "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1346 # yields the best results since it enables automatic language detection. For
1347 # languages based on the Latin alphabet, setting `language_hints` is not
1348 # needed. In rare cases, when the language of the text in the image is known,
1349 # setting a hint will help get better results (although it will be a
1350 # significant hindrance if the hint is wrong). Text detection returns an
1351 # error if one or more of the specified languages is not one of the
Dan O'Mearadd494642020-05-01 07:42:23 -07001352 # [supported languages](https://cloud.google.com/vision/docs/languages).
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001353 "A String",
1354 ],
1355 "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
1356 "filter": "A String", # The filtering expression. This can be used to restrict search results based
1357 # on Product labels. We currently support an AND of OR of key-value
1358 # expressions, where each expression within an OR must have the same key. An
1359 # '=' should be used to connect the key and value.
1360 #
1361 # For example, "(color = red OR color = blue) AND brand = Google" is
1362 # acceptable, but "(color = red OR brand = Google)" is not acceptable.
1363 # "color: red" is not acceptable because it uses a ':' instead of an '='.
1364 "productCategories": [ # The list of product categories to search in. Currently, we only consider
Dan O'Mearadd494642020-05-01 07:42:23 -07001365 # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
1366 # "packagedgoods-v1", or "general-v1" should be specified. The legacy
1367 # categories "homegoods", "apparel", and "toys" are still supported but will
1368 # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
1369 # or "toys-v2" for better product search accuracy. It is recommended to
1370 # migrate existing products to these categories as well.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001371 "A String",
1372 ],
1373 "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
1374 #
1375 # Format is:
1376 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1377 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
Dan O'Mearadd494642020-05-01 07:42:23 -07001378 # If it is not specified, system discretion will be applied.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001379 "normalizedVertices": [ # The bounding polygon normalized vertices.
1380 { # A vertex represents a 2D point in the image.
1381 # NOTE: the normalized vertex coordinates are relative to the original image
1382 # and range from 0 to 1.
1383 "y": 3.14, # Y coordinate.
1384 "x": 3.14, # X coordinate.
1385 },
1386 ],
1387 "vertices": [ # The bounding polygon vertices.
1388 { # A vertex represents a 2D point in the image.
1389 # NOTE: the vertex coordinates are in the same scale as the original image.
1390 "y": 42, # Y coordinate.
1391 "x": 42, # X coordinate.
1392 },
1393 ],
1394 },
1395 },
1396 "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1397 "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
1398 # of the image. For example, if the desired aspect ratio is 4/3, the
1399 # corresponding float value should be 1.33333. If not specified, the
1400 # best possible crop is returned. The number of provided aspect ratios is
1401 # limited to a maximum of 16; any aspect ratios provided after the 16th are
1402 # ignored.
1403 3.14,
1404 ],
1405 },
1406 "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
1407 "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
1408 },
1409 },
1410 "outputConfig": { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
1411 "batchSize": 42, # The max number of response protos to put into each output JSON file on
1412 # Google Cloud Storage.
1413 # The valid range is [1, 100]. If not specified, the default value is 20.
1414 #
1415 # For example, for one pdf file with 100 pages, 100 response protos will
1416 # be generated. If `batch_size` = 20, then 5 json files each
1417 # containing 20 response protos will be written under the prefix
1418 # `gcs_destination`.`uri`.
1419 #
1420 # Currently, batch_size only applies to GcsDestination, with potential future
1421 # support for other output configurations.
1422 "gcsDestination": { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1423 "uri": "A String", # Google Cloud Storage URI prefix where the results will be stored. Results
1424 # will be in JSON format and preceded by its corresponding input URI prefix.
1425 # This field can either represent a gcs file prefix or gcs directory. In
1426 # either case, the uri should be unique because in order to get all of the
1427 # output files, you will need to do a wildcard gcs search on the uri prefix
1428 # you provide.
1429 #
1430 # Examples:
1431 #
1432 # * File Prefix: gs://bucket-name/here/filenameprefix The output files
1433 # will be created in gs://bucket-name/here/ and the names of the
1434 # output files will begin with "filenameprefix".
1435 #
1436 # * Directory Prefix: gs://bucket-name/some/location/ The output files
1437 # will be created in gs://bucket-name/some/location/ and the names of the
1438 # output files could be anything because there was no filename prefix
1439 # specified.
1440 #
1441 # If multiple outputs, each response is still AnnotateFileResponse, each of
1442 # which contains some subset of the full list of AnnotateImageResponse.
1443 # Multiple outputs can happen if, for example, the output JSON is too large
1444 # and overflows into multiple sharded files.
1445 },
1446 },
1447 "inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001448 "content": "A String", # File content, represented as a stream of bytes.
1449 # Note: As with all `bytes` fields, protobuffers use a pure binary
1450 # representation, whereas JSON representations use base64.
1451 #
1452 # Currently, this field only works for BatchAnnotateFiles requests. It does
1453 # not work for AsyncBatchAnnotateFiles requests.
Dan O'Mearadd494642020-05-01 07:42:23 -07001454 "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
1455 # "image/gif" are supported. Wildcards are not supported.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001456 "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
1457 "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
1458 # Google Cloud Storage object. Wildcards are not currently supported.
1459 },
1460 },
1461 "features": [ # Required. Requested features.
1462 { # The type of Google Cloud Vision API detection to perform, and the maximum
1463 # number of results to return for that type. Multiple `Feature` objects can
1464 # be specified in the `features` list.
1465 "model": "A String", # Model to use for the feature.
1466 # Supported values: "builtin/stable" (the default if unset) and
1467 # "builtin/latest".
1468 "type": "A String", # The feature type.
1469 "maxResults": 42, # Maximum number of results of this type. Does not apply to
1470 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1471 },
1472 ],
1473 },
1474 ],
Dan O'Mearadd494642020-05-01 07:42:23 -07001475 "parent": "A String", # Optional. Target project and location to make a call.
1476 #
1477 # Format: `projects/{project-id}/locations/{location-id}`.
1478 #
1479 # If no parent is specified, a region will be chosen automatically.
1480 #
1481 # Supported location-ids:
1482 # `us`: USA country only,
1483 # `asia`: East asia areas, like Japan, Taiwan,
1484 # `eu`: The European Union.
1485 #
1486 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001487 }
1488
1489 x__xgafv: string, V1 error format.
1490 Allowed values
1491 1 - v1 error format
1492 2 - v2 error format
1493
1494Returns:
1495 An object of the form:
1496
1497 { # This resource represents a long-running operation that is the result of a
1498 # network API call.
1499 "response": { # The normal response of the operation in case of success. If the original
1500 # method returns no data on success, such as `Delete`, the response is
1501 # `google.protobuf.Empty`. If the original method is standard
1502 # `Get`/`Create`/`Update`, the response should be the resource. For other
1503 # methods, the response should have the type `XxxResponse`, where `Xxx`
1504 # is the original method name. For example, if the original method name
1505 # is `TakeSnapshot()`, the inferred response type is
1506 # `TakeSnapshotResponse`.
1507 "a_key": "", # Properties of the object. Contains field @type with type URL.
1508 },
1509 "metadata": { # Service-specific metadata associated with the operation. It typically
1510 # contains progress information and common metadata such as create time.
1511 # Some services might not provide such metadata. Any method that returns a
1512 # long-running operation should document the metadata type, if any.
1513 "a_key": "", # Properties of the object. Contains field @type with type URL.
1514 },
1515 "done": True or False, # If the value is `false`, it means the operation is still in progress.
1516 # If `true`, the operation is completed, and either `error` or `response` is
1517 # available.
1518 "name": "A String", # The server-assigned name, which is only unique within the same service that
1519 # originally returns it. If you use the default HTTP mapping, the
1520 # `name` should be a resource name ending with `operations/{unique_id}`.
1521 "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
1522 # different programming environments, including REST APIs and RPC APIs. It is
1523 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1524 # three pieces of data: error code, error message, and error details.
1525 #
1526 # You can find out more about this error model and how to work with it in the
1527 # [API Design Guide](https://cloud.google.com/apis/design/errors).
1528 "message": "A String", # A developer-facing error message, which should be in English. Any
1529 # user-facing error message should be localized and sent in the
1530 # google.rpc.Status.details field, or localized by the client.
1531 "code": 42, # The status code, which should be an enum value of google.rpc.Code.
1532 "details": [ # A list of messages that carry the error details. There is a common set of
1533 # message types for APIs to use.
1534 {
1535 "a_key": "", # Properties of the object. Contains field @type with type URL.
1536 },
1537 ],
1538 },
1539 }</pre>
1540</div>
1541
1542</body></html>