blob: 06f9b7eed2ac61fbbef85b9d00722e8fb61d57ae [file] [log] [blame]
Bu Sun Kim65020912020-05-20 12:08:20 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1p1beta1.html">Cloud Vision API</a> . <a href="vision_v1p1beta1.projects.html">projects</a> . <a href="vision_v1p1beta1.projects.images.html">images</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="#annotate">annotate(parent, body=None, x__xgafv=None)</a></code></p>
79<p class="firstline">Run image detection and annotation for a batch of images.</p>
80<p class="toc_element">
81 <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(parent, body=None, x__xgafv=None)</a></code></p>
82<p class="firstline">Run asynchronous image detection and annotation for a list of images.</p>
83<h3>Method Details</h3>
84<div class="method">
85 <code class="details" id="annotate">annotate(parent, body=None, x__xgafv=None)</code>
86 <pre>Run image detection and annotation for a batch of images.
87
88Args:
89 parent: string, Optional. Target project and location to make a call.
90
91Format: `projects/{project-id}/locations/{location-id}`.
92
93If no parent is specified, a region will be chosen automatically.
94
95Supported location-ids:
96 `us`: USA country only,
97 `asia`: East asia areas, like Japan, Taiwan,
98 `eu`: The European Union.
99
100Example: `projects/project-A/locations/eu`. (required)
101 body: object, The request body.
102 The object takes the form of:
103
104{ # Multiple image annotation requests are batched into a single service call.
Bu Sun Kim65020912020-05-20 12:08:20 -0700105 &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
106 { # Request for performing Google Cloud Vision API tasks over a user-provided
107 # image, with user-requested features, and with context information.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700108 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
109 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
110 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
111 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
112 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
113 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
114 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
115 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
116 # migrate existing products to these categories as well.
117 &quot;A String&quot;,
118 ],
119 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
120 # If it is not specified, system discretion will be applied.
121 &quot;vertices&quot;: [ # The bounding polygon vertices.
122 { # A vertex represents a 2D point in the image.
123 # NOTE: the vertex coordinates are in the same scale as the original image.
124 &quot;y&quot;: 42, # Y coordinate.
125 &quot;x&quot;: 42, # X coordinate.
126 },
127 ],
128 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
129 { # A vertex represents a 2D point in the image.
130 # NOTE: the normalized vertex coordinates are relative to the original image
131 # and range from 0 to 1.
132 &quot;x&quot;: 3.14, # X coordinate.
133 &quot;y&quot;: 3.14, # Y coordinate.
134 },
135 ],
136 },
137 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
138 # on Product labels. We currently support an AND of OR of key-value
139 # expressions, where each expression within an OR must have the same key. An
140 # &#x27;=&#x27; should be used to connect the key and value.
141 #
142 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
143 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
144 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
145 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
146 #
147 # Format is:
148 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
149 },
150 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
151 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
152 # of the image. For example, if the desired aspect ratio is 4/3, the
153 # corresponding float value should be 1.33333. If not specified, the
154 # best possible crop is returned. The number of provided aspect ratios is
155 # limited to a maximum of 16; any aspect ratios provided after the 16th are
156 # ignored.
157 3.14,
158 ],
159 },
160 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
161 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
162 },
163 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
164 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
165 # of doubles representing degrees latitude and degrees longitude. Unless
166 # specified otherwise, this must conform to the
167 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
168 # standard&lt;/a&gt;. Values must be within normalized ranges.
169 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
170 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
171 },
172 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
173 # of doubles representing degrees latitude and degrees longitude. Unless
174 # specified otherwise, this must conform to the
175 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
176 # standard&lt;/a&gt;. Values must be within normalized ranges.
177 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
178 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
179 },
180 },
181 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
182 # yields the best results since it enables automatic language detection. For
183 # languages based on the Latin alphabet, setting `language_hints` is not
184 # needed. In rare cases, when the language of the text in the image is known,
185 # setting a hint will help get better results (although it will be a
186 # significant hindrance if the hint is wrong). Text detection returns an
187 # error if one or more of the specified languages is not one of the
188 # [supported languages](https://cloud.google.com/vision/docs/languages).
189 &quot;A String&quot;,
190 ],
191 },
192 &quot;features&quot;: [ # Requested features.
193 { # The type of Google Cloud Vision API detection to perform, and the maximum
194 # number of results to return for that type. Multiple `Feature` objects can
195 # be specified in the `features` list.
196 &quot;type&quot;: &quot;A String&quot;, # The feature type.
197 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
198 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
199 # &quot;builtin/latest&quot;.
200 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
201 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
202 },
203 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700204 &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
205 &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
206 # Note: As with all `bytes` fields, protobuffers use a pure binary
207 # representation, whereas JSON representations use base64.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700208 #
209 # Currently, this field only works for BatchAnnotateImages requests. It does
210 # not work for AsyncBatchAnnotateImages requests.
Bu Sun Kim65020912020-05-20 12:08:20 -0700211 &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
212 # URL. If both `content` and `source` are provided for an image, `content`
213 # takes precedence and is used to perform the image annotation request.
214 &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
215 #
216 # 1. A Google Cloud Storage URI of the form
217 # `gs://bucket_name/object_name`. Object versioning is not supported. See
218 # [Google Cloud Storage Request
219 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more
220 # info.
221 #
222 # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
223 # HTTP/HTTPS URLs, Google cannot guarantee that the request will be
224 # completed. Your request may fail if the specified host denies the
225 # request (e.g. due to request throttling or DOS prevention), or if Google
226 # throttles requests to the site for abuse prevention. You should not
227 # depend on externally-hosted images for production applications.
228 #
229 # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
230 # precedence.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700231 &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700232 #
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700233 # The Google Cloud Storage URI of the form
234 # `gs://bucket_name/object_name`. Object versioning is not supported. See
235 # [Google Cloud Storage Request
236 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
Bu Sun Kim65020912020-05-20 12:08:20 -0700237 },
238 },
239 },
240 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700241 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
242 #
243 # Format: `projects/{project-id}/locations/{location-id}`.
244 #
245 # If no parent is specified, a region will be chosen automatically.
246 #
247 # Supported location-ids:
248 # `us`: USA country only,
249 # `asia`: East asia areas, like Japan, Taiwan,
250 # `eu`: The European Union.
251 #
252 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim65020912020-05-20 12:08:20 -0700253 }
254
255 x__xgafv: string, V1 error format.
256 Allowed values
257 1 - v1 error format
258 2 - v2 error format
259
260Returns:
261 An object of the form:
262
263 { # Response to a batch image annotation request.
264 &quot;responses&quot;: [ # Individual responses to image annotation requests within the batch.
265 { # Response to an image annotation request.
Bu Sun Kim65020912020-05-20 12:08:20 -0700266 &quot;fullTextAnnotation&quot;: { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
267 # completed successfully.
268 # This annotation provides the structural hierarchy for the OCR detected
269 # text.
270 # The hierarchy of an OCR extracted text structure is like this:
271 # TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
272 # Each structural component, starting from Page, may further have their own
273 # properties. Properties describe detected languages, breaks etc.. Please refer
274 # to the TextAnnotation.TextProperty message definition below for more
275 # detail.
276 &quot;pages&quot;: [ # List of pages detected by OCR.
277 { # Detected page from OCR.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700278 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the page. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700279 &quot;blocks&quot;: [ # List of blocks of text, images etc on this page.
280 { # Logical element on the page.
Bu Sun Kim65020912020-05-20 12:08:20 -0700281 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the block.
282 # The vertices are in the order of top-left, top-right, bottom-right,
283 # bottom-left. When a rotation of the bounding box is detected the rotation
284 # is represented as around the top-left corner as defined when the text is
285 # read in the &#x27;natural&#x27; orientation.
286 # For example:
287 #
288 # * when the text is horizontal it might look like:
289 #
290 # 0----1
291 # | |
292 # 3----2
293 #
294 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
295 #
296 # 2----3
297 # | |
298 # 1----0
299 #
300 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700301 &quot;vertices&quot;: [ # The bounding polygon vertices.
302 { # A vertex represents a 2D point in the image.
303 # NOTE: the vertex coordinates are in the same scale as the original image.
304 &quot;y&quot;: 42, # Y coordinate.
305 &quot;x&quot;: 42, # X coordinate.
306 },
307 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700308 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
309 { # A vertex represents a 2D point in the image.
310 # NOTE: the normalized vertex coordinates are relative to the original image
311 # and range from 0 to 1.
312 &quot;x&quot;: 3.14, # X coordinate.
313 &quot;y&quot;: 3.14, # Y coordinate.
314 },
315 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700316 },
317 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the block. Range [0, 1].
318 &quot;paragraphs&quot;: [ # List of paragraphs in this block (if this blocks is of type text).
319 { # Structural unit of text representing a number of words in certain order.
Bu Sun Kim65020912020-05-20 12:08:20 -0700320 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
321 # The vertices are in the order of top-left, top-right, bottom-right,
322 # bottom-left. When a rotation of the bounding box is detected the rotation
323 # is represented as around the top-left corner as defined when the text is
324 # read in the &#x27;natural&#x27; orientation.
325 # For example:
326 # * when the text is horizontal it might look like:
327 # 0----1
328 # | |
329 # 3----2
330 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
331 # 2----3
332 # | |
333 # 1----0
334 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700335 &quot;vertices&quot;: [ # The bounding polygon vertices.
336 { # A vertex represents a 2D point in the image.
337 # NOTE: the vertex coordinates are in the same scale as the original image.
338 &quot;y&quot;: 42, # Y coordinate.
339 &quot;x&quot;: 42, # X coordinate.
340 },
341 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700342 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
343 { # A vertex represents a 2D point in the image.
344 # NOTE: the normalized vertex coordinates are relative to the original image
345 # and range from 0 to 1.
346 &quot;x&quot;: 3.14, # X coordinate.
347 &quot;y&quot;: 3.14, # Y coordinate.
348 },
349 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700350 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700351 &quot;words&quot;: [ # List of all words in this paragraph.
352 { # A word representation.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700353 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the word. Range [0, 1].
354 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the word.
355 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
356 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
357 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
358 },
359 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
360 { # Detected language for a structural component.
361 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
362 # information, see
363 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
364 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
365 },
366 ],
367 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700368 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the word.
369 # The vertices are in the order of top-left, top-right, bottom-right,
370 # bottom-left. When a rotation of the bounding box is detected the rotation
371 # is represented as around the top-left corner as defined when the text is
372 # read in the &#x27;natural&#x27; orientation.
373 # For example:
374 # * when the text is horizontal it might look like:
375 # 0----1
376 # | |
377 # 3----2
378 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
379 # 2----3
380 # | |
381 # 1----0
382 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700383 &quot;vertices&quot;: [ # The bounding polygon vertices.
384 { # A vertex represents a 2D point in the image.
385 # NOTE: the vertex coordinates are in the same scale as the original image.
386 &quot;y&quot;: 42, # Y coordinate.
387 &quot;x&quot;: 42, # X coordinate.
388 },
389 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700390 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
391 { # A vertex represents a 2D point in the image.
392 # NOTE: the normalized vertex coordinates are relative to the original image
393 # and range from 0 to 1.
394 &quot;x&quot;: 3.14, # X coordinate.
395 &quot;y&quot;: 3.14, # Y coordinate.
396 },
397 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700398 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700399 &quot;symbols&quot;: [ # List of symbols in the word.
400 # The order of the symbols follows the natural reading order.
401 { # A single symbol representation.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700402 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700403 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the symbol.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700404 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
405 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
406 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
407 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700408 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
409 { # Detected language for a structural component.
410 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
411 # information, see
412 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
413 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
414 },
415 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700416 },
417 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
418 # The vertices are in the order of top-left, top-right, bottom-right,
419 # bottom-left. When a rotation of the bounding box is detected the rotation
420 # is represented as around the top-left corner as defined when the text is
421 # read in the &#x27;natural&#x27; orientation.
422 # For example:
423 # * when the text is horizontal it might look like:
424 # 0----1
425 # | |
426 # 3----2
427 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
428 # 2----3
429 # | |
430 # 1----0
431 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700432 &quot;vertices&quot;: [ # The bounding polygon vertices.
433 { # A vertex represents a 2D point in the image.
434 # NOTE: the vertex coordinates are in the same scale as the original image.
435 &quot;y&quot;: 42, # Y coordinate.
436 &quot;x&quot;: 42, # X coordinate.
437 },
438 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700439 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
440 { # A vertex represents a 2D point in the image.
441 # NOTE: the normalized vertex coordinates are relative to the original image
442 # and range from 0 to 1.
443 &quot;x&quot;: 3.14, # X coordinate.
444 &quot;y&quot;: 3.14, # Y coordinate.
445 },
446 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700447 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700448 &quot;text&quot;: &quot;A String&quot;, # The actual UTF-8 representation of the symbol.
449 },
450 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700451 },
452 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700453 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the paragraph.
454 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
455 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
456 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
457 },
458 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
459 { # Detected language for a structural component.
460 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
461 # information, see
462 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
463 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
464 },
465 ],
466 },
467 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700468 },
469 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700470 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the block.
471 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
472 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
473 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
474 },
475 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
476 { # Detected language for a structural component.
477 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
478 # information, see
479 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
480 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
481 },
482 ],
483 },
484 &quot;blockType&quot;: &quot;A String&quot;, # Detected block type (text, image etc) for this block.
Bu Sun Kim65020912020-05-20 12:08:20 -0700485 },
486 ],
487 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected on the page.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700488 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
489 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
490 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
491 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700492 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
493 { # Detected language for a structural component.
494 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
495 # information, see
496 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
497 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
498 },
499 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700500 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700501 &quot;height&quot;: 42, # Page height. For PDFs the unit is points. For images (including
502 # TIFFs) the unit is pixels.
503 &quot;width&quot;: 42, # Page width. For PDFs the unit is points. For images (including
504 # TIFFs) the unit is pixels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700505 },
506 ],
507 &quot;text&quot;: &quot;A String&quot;, # UTF-8 text detected on the pages.
508 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700509 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
510 # Note that filled-in image annotations are guaranteed to be
511 # correct, even when `error` is set.
512 # different programming environments, including REST APIs and RPC APIs. It is
513 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
514 # three pieces of data: error code, error message, and error details.
515 #
516 # You can find out more about this error model and how to work with it in the
517 # [API Design Guide](https://cloud.google.com/apis/design/errors).
518 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
519 # message types for APIs to use.
520 {
521 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
522 },
523 ],
524 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
525 # user-facing error message should be localized and sent in the
526 # google.rpc.Status.details field, or localized by the client.
527 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
528 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700529 &quot;textAnnotations&quot;: [ # If present, text (OCR) detection has completed successfully.
530 { # Set of detected entity features.
Bu Sun Kim65020912020-05-20 12:08:20 -0700531 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
532 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700533 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
534 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
535 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
536 # detected distant towering building, even though the confidence that
537 # there is a tower in each image may be the same. Range [0, 1].
538 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
539 # fields, such a score or string that qualifies the entity.
540 { # A `Property` consists of a user-supplied name/value pair.
541 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
542 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
543 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
544 },
545 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700546 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
547 # [Google Knowledge Graph Search
548 # API](https://developers.google.com/knowledge-graph/).
549 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
550 # The accuracy of the entity detection in an image.
551 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
552 # this field represents the confidence that there is a tower in the query
553 # image. Range [0, 1].
554 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
555 # for `LABEL_DETECTION` features.
556 &quot;vertices&quot;: [ # The bounding polygon vertices.
557 { # A vertex represents a 2D point in the image.
558 # NOTE: the vertex coordinates are in the same scale as the original image.
559 &quot;y&quot;: 42, # Y coordinate.
560 &quot;x&quot;: 42, # X coordinate.
561 },
562 ],
563 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
564 { # A vertex represents a 2D point in the image.
565 # NOTE: the normalized vertex coordinates are relative to the original image
566 # and range from 0 to 1.
567 &quot;x&quot;: 3.14, # X coordinate.
568 &quot;y&quot;: 3.14, # Y coordinate.
569 },
570 ],
571 },
572 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
573 # `LocationInfo` elements can be present because one location may
574 # indicate the location of the scene in the image, and another location
575 # may indicate the location of the place where the image was taken.
576 # Location information is usually present for landmarks.
577 { # Detected entity location information.
578 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
579 # of doubles representing degrees latitude and degrees longitude. Unless
580 # specified otherwise, this must conform to the
581 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
582 # standard&lt;/a&gt;. Values must be within normalized ranges.
583 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
584 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
585 },
586 },
587 ],
588 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
589 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700590 },
591 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700592 &quot;faceAnnotations&quot;: [ # If present, face detection has completed successfully.
593 { # A face annotation object contains the results of face detection.
594 &quot;landmarkingConfidence&quot;: 3.14, # Face landmarking confidence. Range [0, 1].
595 &quot;panAngle&quot;: 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
596 # pointing relative to the vertical plane perpendicular to the image. Range
597 # [-180,180].
598 &quot;surpriseLikelihood&quot;: &quot;A String&quot;, # Surprise likelihood.
599 &quot;tiltAngle&quot;: 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
600 # pointing relative to the image&#x27;s horizontal plane. Range [-180,180].
601 &quot;blurredLikelihood&quot;: &quot;A String&quot;, # Blurred likelihood.
602 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
603 # are in the original image&#x27;s scale.
604 # The bounding box is computed to &quot;frame&quot; the face in accordance with human
605 # expectations. It is based on the landmarker results.
606 # Note that one or more x and/or y coordinates may not be generated in the
607 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
608 # appears in the image to be annotated.
609 &quot;vertices&quot;: [ # The bounding polygon vertices.
610 { # A vertex represents a 2D point in the image.
611 # NOTE: the vertex coordinates are in the same scale as the original image.
612 &quot;y&quot;: 42, # Y coordinate.
613 &quot;x&quot;: 42, # X coordinate.
614 },
615 ],
616 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
617 { # A vertex represents a 2D point in the image.
618 # NOTE: the normalized vertex coordinates are relative to the original image
619 # and range from 0 to 1.
620 &quot;x&quot;: 3.14, # X coordinate.
621 &quot;y&quot;: 3.14, # Y coordinate.
622 },
623 ],
624 },
625 &quot;headwearLikelihood&quot;: &quot;A String&quot;, # Headwear likelihood.
626 &quot;fdBoundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
627 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
628 # is used to eliminate the face from any image analysis that detects the
629 # &quot;amount of skin&quot; visible in an image. It is not based on the
630 # landmarker results, only on the initial face detection, hence
631 # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
632 &quot;vertices&quot;: [ # The bounding polygon vertices.
633 { # A vertex represents a 2D point in the image.
634 # NOTE: the vertex coordinates are in the same scale as the original image.
635 &quot;y&quot;: 42, # Y coordinate.
636 &quot;x&quot;: 42, # X coordinate.
637 },
638 ],
639 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
640 { # A vertex represents a 2D point in the image.
641 # NOTE: the normalized vertex coordinates are relative to the original image
642 # and range from 0 to 1.
643 &quot;x&quot;: 3.14, # X coordinate.
644 &quot;y&quot;: 3.14, # Y coordinate.
645 },
646 ],
647 },
648 &quot;detectionConfidence&quot;: 3.14, # Detection confidence. Range [0, 1].
649 &quot;underExposedLikelihood&quot;: &quot;A String&quot;, # Under-exposed likelihood.
650 &quot;joyLikelihood&quot;: &quot;A String&quot;, # Joy likelihood.
651 &quot;sorrowLikelihood&quot;: &quot;A String&quot;, # Sorrow likelihood.
652 &quot;landmarks&quot;: [ # Detected face landmarks.
653 { # A face-specific landmark (for example, a face feature).
654 &quot;position&quot;: { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
655 # A valid Position must have both x and y coordinates.
656 # The position coordinates are in the same scale as the original image.
657 &quot;x&quot;: 3.14, # X coordinate.
658 &quot;y&quot;: 3.14, # Y coordinate.
659 &quot;z&quot;: 3.14, # Z coordinate (or depth).
660 },
661 &quot;type&quot;: &quot;A String&quot;, # Face landmark type.
662 },
663 ],
664 &quot;rollAngle&quot;: 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
665 # of the face relative to the image vertical about the axis perpendicular to
666 # the face. Range [-180,180].
667 &quot;angerLikelihood&quot;: &quot;A String&quot;, # Anger likelihood.
668 },
669 ],
670 &quot;context&quot;: { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
671 # comes from.
672 # information about the source of that image.
673 &quot;uri&quot;: &quot;A String&quot;, # The URI of the file used to produce the image.
674 &quot;pageNumber&quot;: 42, # If the file was a PDF or TIFF, this field gives the page number within
675 # the file used to produce the image.
676 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700677 &quot;imagePropertiesAnnotation&quot;: { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
678 &quot;dominantColors&quot;: { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
679 &quot;colors&quot;: [ # RGB color values with their score and pixel fraction.
680 { # Color information consists of RGB channels, score, and the fraction of
681 # the image that the color occupies in the image.
682 &quot;pixelFraction&quot;: 3.14, # The fraction of pixels the color occupies in the image.
683 # Value in range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700684 &quot;score&quot;: 3.14, # Image-specific score for this color. Value in range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700685 &quot;color&quot;: { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
686 # for simplicity of conversion to/from color representations in various
687 # languages over compactness; for example, the fields of this representation
688 # can be trivially provided to the constructor of &quot;java.awt.Color&quot; in Java; it
689 # can also be trivially provided to UIColor&#x27;s &quot;+colorWithRed:green:blue:alpha&quot;
690 # method in iOS; and, with just a little work, it can be easily formatted into
691 # a CSS &quot;rgba()&quot; string in JavaScript, as well.
692 #
693 # Note: this proto does not carry information about the absolute color space
694 # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
695 # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
696 # space.
697 #
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700698 # Note: when color equality needs to be decided, implementations, unless
699 # documented otherwise, will treat two colors to be equal if all their red,
700 # green, blue and alpha values each differ by at most 1e-5.
701 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700702 # Example (Java):
703 #
704 # import com.google.type.Color;
705 #
706 # // ...
707 # public static java.awt.Color fromProto(Color protocolor) {
708 # float alpha = protocolor.hasAlpha()
709 # ? protocolor.getAlpha().getValue()
710 # : 1.0;
711 #
712 # return new java.awt.Color(
713 # protocolor.getRed(),
714 # protocolor.getGreen(),
715 # protocolor.getBlue(),
716 # alpha);
717 # }
718 #
719 # public static Color toProto(java.awt.Color color) {
720 # float red = (float) color.getRed();
721 # float green = (float) color.getGreen();
722 # float blue = (float) color.getBlue();
723 # float denominator = 255.0;
724 # Color.Builder resultBuilder =
725 # Color
726 # .newBuilder()
727 # .setRed(red / denominator)
728 # .setGreen(green / denominator)
729 # .setBlue(blue / denominator);
730 # int alpha = color.getAlpha();
731 # if (alpha != 255) {
732 # result.setAlpha(
733 # FloatValue
734 # .newBuilder()
735 # .setValue(((float) alpha) / denominator)
736 # .build());
737 # }
738 # return resultBuilder.build();
739 # }
740 # // ...
741 #
742 # Example (iOS / Obj-C):
743 #
744 # // ...
745 # static UIColor* fromProto(Color* protocolor) {
746 # float red = [protocolor red];
747 # float green = [protocolor green];
748 # float blue = [protocolor blue];
749 # FloatValue* alpha_wrapper = [protocolor alpha];
750 # float alpha = 1.0;
751 # if (alpha_wrapper != nil) {
752 # alpha = [alpha_wrapper value];
753 # }
754 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
755 # }
756 #
757 # static Color* toProto(UIColor* color) {
758 # CGFloat red, green, blue, alpha;
759 # if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
760 # return nil;
761 # }
762 # Color* result = [[Color alloc] init];
763 # [result setRed:red];
764 # [result setGreen:green];
765 # [result setBlue:blue];
766 # if (alpha &lt;= 0.9999) {
767 # [result setAlpha:floatWrapperWithValue(alpha)];
768 # }
769 # [result autorelease];
770 # return result;
771 # }
772 # // ...
773 #
774 # Example (JavaScript):
775 #
776 # // ...
777 #
778 # var protoToCssColor = function(rgb_color) {
779 # var redFrac = rgb_color.red || 0.0;
780 # var greenFrac = rgb_color.green || 0.0;
781 # var blueFrac = rgb_color.blue || 0.0;
782 # var red = Math.floor(redFrac * 255);
783 # var green = Math.floor(greenFrac * 255);
784 # var blue = Math.floor(blueFrac * 255);
785 #
786 # if (!(&#x27;alpha&#x27; in rgb_color)) {
787 # return rgbToCssColor_(red, green, blue);
788 # }
789 #
790 # var alphaFrac = rgb_color.alpha.value || 0.0;
791 # var rgbParams = [red, green, blue].join(&#x27;,&#x27;);
792 # return [&#x27;rgba(&#x27;, rgbParams, &#x27;,&#x27;, alphaFrac, &#x27;)&#x27;].join(&#x27;&#x27;);
793 # };
794 #
795 # var rgbToCssColor_ = function(red, green, blue) {
796 # var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
797 # var hexString = rgbNumber.toString(16);
798 # var missingZeros = 6 - hexString.length;
799 # var resultBuilder = [&#x27;#&#x27;];
800 # for (var i = 0; i &lt; missingZeros; i++) {
801 # resultBuilder.push(&#x27;0&#x27;);
802 # }
803 # resultBuilder.push(hexString);
804 # return resultBuilder.join(&#x27;&#x27;);
805 # };
806 #
807 # // ...
Bu Sun Kim65020912020-05-20 12:08:20 -0700808 &quot;blue&quot;: 3.14, # The amount of blue in the color as a value in the interval [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700809 &quot;green&quot;: 3.14, # The amount of green in the color as a value in the interval [0, 1].
810 &quot;red&quot;: 3.14, # The amount of red in the color as a value in the interval [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700811 &quot;alpha&quot;: 3.14, # The fraction of this color that should be applied to the pixel. That is,
812 # the final pixel color is defined by the equation:
813 #
814 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
815 #
816 # This means that a value of 1.0 corresponds to a solid color, whereas
817 # a value of 0.0 corresponds to a completely transparent color. This
818 # uses a wrapper message rather than a simple float scalar so that it is
819 # possible to distinguish between a default value and the value being unset.
820 # If omitted, this color object is to be rendered as a solid color
821 # (as if the alpha value had been explicitly given with a value of 1.0).
822 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700823 },
824 ],
825 },
826 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700827 &quot;webDetection&quot;: { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
828 &quot;visuallySimilarImages&quot;: [ # The visually similar image results.
829 { # Metadata for online images.
Bu Sun Kim65020912020-05-20 12:08:20 -0700830 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700831 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700832 },
833 ],
834 &quot;bestGuessLabels&quot;: [ # The service&#x27;s best guess as to the topic of the request image.
835 # Inferred from similar images on the open web.
836 { # Label to provide extra metadata for the web detection.
837 &quot;label&quot;: &quot;A String&quot;, # Label for extra metadata.
838 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code for `label`, such as &quot;en-US&quot; or &quot;sr-Latn&quot;.
839 # For more information, see
840 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
841 },
842 ],
843 &quot;fullMatchingImages&quot;: [ # Fully matching images from the Internet.
844 # Can include resized copies of the query image.
845 { # Metadata for online images.
Bu Sun Kim65020912020-05-20 12:08:20 -0700846 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700847 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700848 },
849 ],
850 &quot;webEntities&quot;: [ # Deduced entities from similar images on the Internet.
851 { # Entity deduced from similar images on the Internet.
Bu Sun Kim65020912020-05-20 12:08:20 -0700852 &quot;description&quot;: &quot;A String&quot;, # Canonical description of the entity, in English.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700853 &quot;entityId&quot;: &quot;A String&quot;, # Opaque entity ID.
Bu Sun Kim65020912020-05-20 12:08:20 -0700854 &quot;score&quot;: 3.14, # Overall relevancy score for the entity.
855 # Not normalized and not comparable across different image queries.
856 },
857 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700858 &quot;partialMatchingImages&quot;: [ # Partial matching images from the Internet.
859 # Those images are similar enough to share some key-point features. For
860 # example an original image will likely have partial matching for its crops.
861 { # Metadata for online images.
Bu Sun Kim65020912020-05-20 12:08:20 -0700862 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700863 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
864 },
865 ],
866 &quot;pagesWithMatchingImages&quot;: [ # Web pages containing the matching images from the Internet.
867 { # Metadata for web pages.
868 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the web page.
869 &quot;url&quot;: &quot;A String&quot;, # The result web page URL.
870 &quot;partialMatchingImages&quot;: [ # Partial matching images on the page.
871 # Those images are similar enough to share some key-point features. For
872 # example an original image will likely have partial matching for its
873 # crops.
874 { # Metadata for online images.
875 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
876 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
877 },
878 ],
879 &quot;pageTitle&quot;: &quot;A String&quot;, # Title for the web page, may contain HTML markups.
880 &quot;fullMatchingImages&quot;: [ # Fully matching images on the page.
881 # Can include resized copies of the query image.
882 { # Metadata for online images.
883 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
884 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
885 },
886 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700887 },
888 ],
889 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700890 &quot;landmarkAnnotations&quot;: [ # If present, landmark detection has completed successfully.
891 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700892 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
893 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700894 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
895 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
896 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
897 # detected distant towering building, even though the confidence that
898 # there is a tower in each image may be the same. Range [0, 1].
899 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
900 # fields, such a score or string that qualifies the entity.
901 { # A `Property` consists of a user-supplied name/value pair.
902 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
903 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
904 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
905 },
906 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700907 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
908 # [Google Knowledge Graph Search
909 # API](https://developers.google.com/knowledge-graph/).
910 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
911 # The accuracy of the entity detection in an image.
912 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
913 # this field represents the confidence that there is a tower in the query
914 # image. Range [0, 1].
915 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
916 # for `LABEL_DETECTION` features.
917 &quot;vertices&quot;: [ # The bounding polygon vertices.
918 { # A vertex represents a 2D point in the image.
919 # NOTE: the vertex coordinates are in the same scale as the original image.
920 &quot;y&quot;: 42, # Y coordinate.
921 &quot;x&quot;: 42, # X coordinate.
922 },
923 ],
924 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
925 { # A vertex represents a 2D point in the image.
926 # NOTE: the normalized vertex coordinates are relative to the original image
927 # and range from 0 to 1.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700928 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700929 &quot;y&quot;: 3.14, # Y coordinate.
930 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700931 ],
932 },
933 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
934 # `LocationInfo` elements can be present because one location may
935 # indicate the location of the scene in the image, and another location
936 # may indicate the location of the place where the image was taken.
937 # Location information is usually present for landmarks.
938 { # Detected entity location information.
939 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
940 # of doubles representing degrees latitude and degrees longitude. Unless
941 # specified otherwise, this must conform to the
942 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
943 # standard&lt;/a&gt;. Values must be within normalized ranges.
944 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
945 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
946 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700947 },
948 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700949 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
950 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700951 },
952 ],
953 &quot;cropHintsAnnotation&quot;: { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
954 &quot;cropHints&quot;: [ # Crop hint results.
955 { # Single crop hint that is used to generate a new crop when serving an image.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700956 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
957 # box are in the original image&#x27;s scale.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700958 &quot;vertices&quot;: [ # The bounding polygon vertices.
959 { # A vertex represents a 2D point in the image.
960 # NOTE: the vertex coordinates are in the same scale as the original image.
961 &quot;y&quot;: 42, # Y coordinate.
962 &quot;x&quot;: 42, # X coordinate.
963 },
964 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700965 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
966 { # A vertex represents a 2D point in the image.
967 # NOTE: the normalized vertex coordinates are relative to the original image
968 # and range from 0 to 1.
969 &quot;x&quot;: 3.14, # X coordinate.
970 &quot;y&quot;: 3.14, # Y coordinate.
971 },
972 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700973 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700974 &quot;importanceFraction&quot;: 3.14, # Fraction of importance of this salient region with respect to the original
975 # image.
976 &quot;confidence&quot;: 3.14, # Confidence of this being a salient region. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700977 },
978 ],
979 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700980 &quot;logoAnnotations&quot;: [ # If present, logo detection has completed successfully.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700981 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700982 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
983 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700984 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
985 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
986 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
987 # detected distant towering building, even though the confidence that
988 # there is a tower in each image may be the same. Range [0, 1].
989 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
990 # fields, such a score or string that qualifies the entity.
991 { # A `Property` consists of a user-supplied name/value pair.
992 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
993 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
994 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
995 },
996 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700997 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
998 # [Google Knowledge Graph Search
999 # API](https://developers.google.com/knowledge-graph/).
1000 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1001 # The accuracy of the entity detection in an image.
1002 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1003 # this field represents the confidence that there is a tower in the query
1004 # image. Range [0, 1].
1005 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1006 # for `LABEL_DETECTION` features.
1007 &quot;vertices&quot;: [ # The bounding polygon vertices.
1008 { # A vertex represents a 2D point in the image.
1009 # NOTE: the vertex coordinates are in the same scale as the original image.
1010 &quot;y&quot;: 42, # Y coordinate.
1011 &quot;x&quot;: 42, # X coordinate.
1012 },
1013 ],
1014 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1015 { # A vertex represents a 2D point in the image.
1016 # NOTE: the normalized vertex coordinates are relative to the original image
1017 # and range from 0 to 1.
1018 &quot;x&quot;: 3.14, # X coordinate.
1019 &quot;y&quot;: 3.14, # Y coordinate.
1020 },
1021 ],
1022 },
1023 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1024 # `LocationInfo` elements can be present because one location may
1025 # indicate the location of the scene in the image, and another location
1026 # may indicate the location of the place where the image was taken.
1027 # Location information is usually present for landmarks.
1028 { # Detected entity location information.
1029 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1030 # of doubles representing degrees latitude and degrees longitude. Unless
1031 # specified otherwise, this must conform to the
1032 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1033 # standard&lt;/a&gt;. Values must be within normalized ranges.
1034 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1035 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1036 },
1037 },
1038 ],
1039 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1040 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001041 },
1042 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001043 &quot;localizedObjectAnnotations&quot;: [ # If present, localized object detection has completed successfully.
1044 # This will be sorted descending by confidence score.
1045 { # Set of detected objects with bounding boxes.
1046 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
1047 &quot;vertices&quot;: [ # The bounding polygon vertices.
1048 { # A vertex represents a 2D point in the image.
1049 # NOTE: the vertex coordinates are in the same scale as the original image.
1050 &quot;y&quot;: 42, # Y coordinate.
1051 &quot;x&quot;: 42, # X coordinate.
1052 },
1053 ],
1054 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1055 { # A vertex represents a 2D point in the image.
1056 # NOTE: the normalized vertex coordinates are relative to the original image
1057 # and range from 0 to 1.
1058 &quot;x&quot;: 3.14, # X coordinate.
1059 &quot;y&quot;: 3.14, # Y coordinate.
1060 },
1061 ],
1062 },
1063 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
1064 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
1065 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
1066 # information, see
1067 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1068 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
1069 },
1070 ],
1071 &quot;labelAnnotations&quot;: [ # If present, label detection has completed successfully.
1072 { # Set of detected entity features.
1073 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1074 # `description` is expressed.
1075 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1076 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1077 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1078 # detected distant towering building, even though the confidence that
1079 # there is a tower in each image may be the same. Range [0, 1].
1080 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1081 # fields, such a score or string that qualifies the entity.
1082 { # A `Property` consists of a user-supplied name/value pair.
1083 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1084 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1085 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1086 },
1087 ],
1088 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1089 # [Google Knowledge Graph Search
1090 # API](https://developers.google.com/knowledge-graph/).
1091 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1092 # The accuracy of the entity detection in an image.
1093 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1094 # this field represents the confidence that there is a tower in the query
1095 # image. Range [0, 1].
1096 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1097 # for `LABEL_DETECTION` features.
1098 &quot;vertices&quot;: [ # The bounding polygon vertices.
1099 { # A vertex represents a 2D point in the image.
1100 # NOTE: the vertex coordinates are in the same scale as the original image.
1101 &quot;y&quot;: 42, # Y coordinate.
1102 &quot;x&quot;: 42, # X coordinate.
1103 },
1104 ],
1105 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1106 { # A vertex represents a 2D point in the image.
1107 # NOTE: the normalized vertex coordinates are relative to the original image
1108 # and range from 0 to 1.
1109 &quot;x&quot;: 3.14, # X coordinate.
1110 &quot;y&quot;: 3.14, # Y coordinate.
1111 },
1112 ],
1113 },
1114 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1115 # `LocationInfo` elements can be present because one location may
1116 # indicate the location of the scene in the image, and another location
1117 # may indicate the location of the place where the image was taken.
1118 # Location information is usually present for landmarks.
1119 { # Detected entity location information.
1120 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1121 # of doubles representing degrees latitude and degrees longitude. Unless
1122 # specified otherwise, this must conform to the
1123 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1124 # standard&lt;/a&gt;. Values must be within normalized ranges.
1125 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1126 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1127 },
1128 },
1129 ],
1130 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1131 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1132 },
1133 ],
1134 &quot;productSearchResults&quot;: { # Results for a product search request. # If present, product search has completed successfully.
1135 &quot;productGroupedResults&quot;: [ # List of results grouped by products detected in the query image. Each entry
1136 # corresponds to one bounding polygon in the query image, and contains the
1137 # matching products specific to that region. There may be duplicate product
1138 # matches in the union of all the per-product results.
1139 { # Information about the products similar to a single product in a query
1140 # image.
1141 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
1142 &quot;vertices&quot;: [ # The bounding polygon vertices.
1143 { # A vertex represents a 2D point in the image.
1144 # NOTE: the vertex coordinates are in the same scale as the original image.
1145 &quot;y&quot;: 42, # Y coordinate.
1146 &quot;x&quot;: 42, # X coordinate.
1147 },
1148 ],
1149 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1150 { # A vertex represents a 2D point in the image.
1151 # NOTE: the normalized vertex coordinates are relative to the original image
1152 # and range from 0 to 1.
1153 &quot;x&quot;: 3.14, # X coordinate.
1154 &quot;y&quot;: 3.14, # Y coordinate.
1155 },
1156 ],
1157 },
1158 &quot;results&quot;: [ # List of results, one for each product match.
1159 { # Information about a product.
1160 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
1161 # to the query.
1162 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
1163 # 1 (full confidence).
1164 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
1165 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
1166 # 4096 characters long.
1167 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
1168 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
1169 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
1170 # not be used for new products.
1171 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
1172 # constraints can be specified based on the product_labels.
1173 #
1174 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
1175 # strings with integer values can match a range-based restriction which is
1176 # to be supported soon.
1177 #
1178 # Multiple values can be assigned to the same key. One product may have up to
1179 # 500 product_labels.
1180 #
1181 # Notice that the total number of distinct product_labels over all products
1182 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
1183 # will refuse to work for that ProductSet.
1184 { # A product label represented as a key-value pair.
1185 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
1186 # exceed 128 bytes.
1187 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
1188 # cannot exceed 128 bytes.
1189 },
1190 ],
1191 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
1192 #
1193 # Format is:
1194 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
1195 #
1196 # This field is ignored when creating a product.
1197 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
1198 # characters long.
1199 },
1200 },
1201 ],
1202 &quot;objectAnnotations&quot;: [ # List of generic predictions for the object in the bounding box.
1203 { # Prediction for what the object in the bounding box is.
1204 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
1205 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
1206 # information, see
1207 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1208 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
1209 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
1210 },
1211 ],
1212 },
1213 ],
1214 &quot;results&quot;: [ # List of results, one for each product match.
1215 { # Information about a product.
1216 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
1217 # to the query.
1218 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
1219 # 1 (full confidence).
1220 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
1221 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
1222 # 4096 characters long.
1223 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
1224 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
1225 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
1226 # not be used for new products.
1227 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
1228 # constraints can be specified based on the product_labels.
1229 #
1230 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
1231 # strings with integer values can match a range-based restriction which is
1232 # to be supported soon.
1233 #
1234 # Multiple values can be assigned to the same key. One product may have up to
1235 # 500 product_labels.
1236 #
1237 # Notice that the total number of distinct product_labels over all products
1238 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
1239 # will refuse to work for that ProductSet.
1240 { # A product label represented as a key-value pair.
1241 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
1242 # exceed 128 bytes.
1243 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
1244 # cannot exceed 128 bytes.
1245 },
1246 ],
1247 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
1248 #
1249 # Format is:
1250 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
1251 #
1252 # This field is ignored when creating a product.
1253 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
1254 # characters long.
1255 },
1256 },
1257 ],
1258 &quot;indexTime&quot;: &quot;A String&quot;, # Timestamp of the index which provided these results. Products added to the
1259 # product set and products removed from the product set after this time are
1260 # not reflected in the current results.
1261 },
1262 &quot;safeSearchAnnotation&quot;: { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
1263 # methods over safe-search verticals (for example, adult, spoof, medical,
1264 # violence).
1265 &quot;medical&quot;: &quot;A String&quot;, # Likelihood that this is a medical image.
1266 &quot;racy&quot;: &quot;A String&quot;, # Likelihood that the request image contains racy content. Racy content may
1267 # include (but is not limited to) skimpy or sheer clothing, strategically
1268 # covered nudity, lewd or provocative poses, or close-ups of sensitive
1269 # body areas.
1270 &quot;spoof&quot;: &quot;A String&quot;, # Spoof likelihood. The likelihood that an modification
1271 # was made to the image&#x27;s canonical version to make it appear
1272 # funny or offensive.
1273 &quot;violence&quot;: &quot;A String&quot;, # Likelihood that this image contains violent content.
1274 &quot;adult&quot;: &quot;A String&quot;, # Represents the adult content likelihood for the image. Adult content may
1275 # contain elements such as nudity, pornographic images or cartoons, or
1276 # sexual activities.
1277 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001278 },
1279 ],
1280 }</pre>
1281</div>
1282
1283<div class="method">
1284 <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(parent, body=None, x__xgafv=None)</code>
1285 <pre>Run asynchronous image detection and annotation for a list of images.
1286
1287Progress and results can be retrieved through the
1288`google.longrunning.Operations` interface.
1289`Operation.metadata` contains `OperationMetadata` (metadata).
1290`Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).
1291
1292This service will write image annotation outputs to json files in customer
1293GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
1294
1295Args:
1296 parent: string, Optional. Target project and location to make a call.
1297
1298Format: `projects/{project-id}/locations/{location-id}`.
1299
1300If no parent is specified, a region will be chosen automatically.
1301
1302Supported location-ids:
1303 `us`: USA country only,
1304 `asia`: East asia areas, like Japan, Taiwan,
1305 `eu`: The European Union.
1306
1307Example: `projects/project-A/locations/eu`. (required)
1308 body: object, The request body.
1309 The object takes the form of:
1310
1311{ # Request for async image annotation for a list of images.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001312 &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
1313 { # Request for performing Google Cloud Vision API tasks over a user-provided
1314 # image, with user-requested features, and with context information.
1315 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
1316 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
1317 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
1318 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
1319 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
1320 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
1321 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
1322 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
1323 # migrate existing products to these categories as well.
1324 &quot;A String&quot;,
1325 ],
1326 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
1327 # If it is not specified, system discretion will be applied.
1328 &quot;vertices&quot;: [ # The bounding polygon vertices.
1329 { # A vertex represents a 2D point in the image.
1330 # NOTE: the vertex coordinates are in the same scale as the original image.
1331 &quot;y&quot;: 42, # Y coordinate.
1332 &quot;x&quot;: 42, # X coordinate.
1333 },
1334 ],
1335 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1336 { # A vertex represents a 2D point in the image.
1337 # NOTE: the normalized vertex coordinates are relative to the original image
1338 # and range from 0 to 1.
1339 &quot;x&quot;: 3.14, # X coordinate.
1340 &quot;y&quot;: 3.14, # Y coordinate.
1341 },
1342 ],
1343 },
1344 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
1345 # on Product labels. We currently support an AND of OR of key-value
1346 # expressions, where each expression within an OR must have the same key. An
1347 # &#x27;=&#x27; should be used to connect the key and value.
1348 #
1349 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
1350 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
1351 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
1352 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
1353 #
1354 # Format is:
1355 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1356 },
1357 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1358 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
1359 # of the image. For example, if the desired aspect ratio is 4/3, the
1360 # corresponding float value should be 1.33333. If not specified, the
1361 # best possible crop is returned. The number of provided aspect ratios is
1362 # limited to a maximum of 16; any aspect ratios provided after the 16th are
1363 # ignored.
1364 3.14,
1365 ],
1366 },
1367 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
1368 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
1369 },
1370 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1371 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1372 # of doubles representing degrees latitude and degrees longitude. Unless
1373 # specified otherwise, this must conform to the
1374 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1375 # standard&lt;/a&gt;. Values must be within normalized ranges.
1376 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1377 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1378 },
1379 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1380 # of doubles representing degrees latitude and degrees longitude. Unless
1381 # specified otherwise, this must conform to the
1382 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1383 # standard&lt;/a&gt;. Values must be within normalized ranges.
1384 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1385 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1386 },
1387 },
1388 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1389 # yields the best results since it enables automatic language detection. For
1390 # languages based on the Latin alphabet, setting `language_hints` is not
1391 # needed. In rare cases, when the language of the text in the image is known,
1392 # setting a hint will help get better results (although it will be a
1393 # significant hindrance if the hint is wrong). Text detection returns an
1394 # error if one or more of the specified languages is not one of the
1395 # [supported languages](https://cloud.google.com/vision/docs/languages).
1396 &quot;A String&quot;,
1397 ],
1398 },
1399 &quot;features&quot;: [ # Requested features.
1400 { # The type of Google Cloud Vision API detection to perform, and the maximum
1401 # number of results to return for that type. Multiple `Feature` objects can
1402 # be specified in the `features` list.
1403 &quot;type&quot;: &quot;A String&quot;, # The feature type.
1404 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
1405 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
1406 # &quot;builtin/latest&quot;.
1407 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
1408 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1409 },
1410 ],
1411 &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
1412 &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
1413 # Note: As with all `bytes` fields, protobuffers use a pure binary
1414 # representation, whereas JSON representations use base64.
1415 #
1416 # Currently, this field only works for BatchAnnotateImages requests. It does
1417 # not work for AsyncBatchAnnotateImages requests.
1418 &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
1419 # URL. If both `content` and `source` are provided for an image, `content`
1420 # takes precedence and is used to perform the image annotation request.
1421 &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
1422 #
1423 # 1. A Google Cloud Storage URI of the form
1424 # `gs://bucket_name/object_name`. Object versioning is not supported. See
1425 # [Google Cloud Storage Request
1426 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more
1427 # info.
1428 #
1429 # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
1430 # HTTP/HTTPS URLs, Google cannot guarantee that the request will be
1431 # completed. Your request may fail if the specified host denies the
1432 # request (e.g. due to request throttling or DOS prevention), or if Google
1433 # throttles requests to the site for abuse prevention. You should not
1434 # depend on externally-hosted images for production applications.
1435 #
1436 # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
1437 # precedence.
1438 &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
1439 #
1440 # The Google Cloud Storage URI of the form
1441 # `gs://bucket_name/object_name`. Object versioning is not supported. See
1442 # [Google Cloud Storage Request
1443 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
1444 },
1445 },
1446 },
1447 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001448 &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
1449 &quot;gcsDestination&quot;: { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1450 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI prefix where the results will be stored. Results
1451 # will be in JSON format and preceded by its corresponding input URI prefix.
1452 # This field can either represent a gcs file prefix or gcs directory. In
1453 # either case, the uri should be unique because in order to get all of the
1454 # output files, you will need to do a wildcard gcs search on the uri prefix
1455 # you provide.
1456 #
1457 # Examples:
1458 #
1459 # * File Prefix: gs://bucket-name/here/filenameprefix The output files
1460 # will be created in gs://bucket-name/here/ and the names of the
1461 # output files will begin with &quot;filenameprefix&quot;.
1462 #
1463 # * Directory Prefix: gs://bucket-name/some/location/ The output files
1464 # will be created in gs://bucket-name/some/location/ and the names of the
1465 # output files could be anything because there was no filename prefix
1466 # specified.
1467 #
1468 # If multiple outputs, each response is still AnnotateFileResponse, each of
1469 # which contains some subset of the full list of AnnotateImageResponse.
1470 # Multiple outputs can happen if, for example, the output JSON is too large
1471 # and overflows into multiple sharded files.
1472 },
1473 &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
1474 # Google Cloud Storage.
1475 # The valid range is [1, 100]. If not specified, the default value is 20.
1476 #
1477 # For example, for one pdf file with 100 pages, 100 response protos will
1478 # be generated. If `batch_size` = 20, then 5 json files each
1479 # containing 20 response protos will be written under the prefix
1480 # `gcs_destination`.`uri`.
1481 #
1482 # Currently, batch_size only applies to GcsDestination, with potential future
1483 # support for other output configurations.
1484 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001485 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
1486 #
1487 # Format: `projects/{project-id}/locations/{location-id}`.
1488 #
1489 # If no parent is specified, a region will be chosen automatically.
1490 #
1491 # Supported location-ids:
1492 # `us`: USA country only,
1493 # `asia`: East asia areas, like Japan, Taiwan,
1494 # `eu`: The European Union.
1495 #
1496 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim65020912020-05-20 12:08:20 -07001497 }
1498
1499 x__xgafv: string, V1 error format.
1500 Allowed values
1501 1 - v1 error format
1502 2 - v2 error format
1503
1504Returns:
1505 An object of the form:
1506
1507 { # This resource represents a long-running operation that is the result of a
1508 # network API call.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001509 &quot;response&quot;: { # The normal response of the operation in case of success. If the original
1510 # method returns no data on success, such as `Delete`, the response is
1511 # `google.protobuf.Empty`. If the original method is standard
1512 # `Get`/`Create`/`Update`, the response should be the resource. For other
1513 # methods, the response should have the type `XxxResponse`, where `Xxx`
1514 # is the original method name. For example, if the original method name
1515 # is `TakeSnapshot()`, the inferred response type is
1516 # `TakeSnapshotResponse`.
1517 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1518 },
1519 &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically
1520 # contains progress information and common metadata such as create time.
1521 # Some services might not provide such metadata. Any method that returns a
1522 # long-running operation should document the metadata type, if any.
1523 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1524 },
1525 &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
1526 # originally returns it. If you use the default HTTP mapping, the
1527 # `name` should be a resource name ending with `operations/{unique_id}`.
1528 &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
1529 # If `true`, the operation is completed, and either `error` or `response` is
1530 # available.
Bu Sun Kim65020912020-05-20 12:08:20 -07001531 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
1532 # different programming environments, including REST APIs and RPC APIs. It is
1533 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1534 # three pieces of data: error code, error message, and error details.
1535 #
1536 # You can find out more about this error model and how to work with it in the
1537 # [API Design Guide](https://cloud.google.com/apis/design/errors).
Bu Sun Kim65020912020-05-20 12:08:20 -07001538 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1539 # message types for APIs to use.
1540 {
1541 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1542 },
1543 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001544 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1545 # user-facing error message should be localized and sent in the
1546 # google.rpc.Status.details field, or localized by the client.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001547 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
Bu Sun Kim65020912020-05-20 12:08:20 -07001548 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001549 }</pre>
1550</div>
1551
1552</body></html>