blob: b62ad95af22538831257b71a34777dfb1f807cb5 [file] [log] [blame]
Bu Sun Kim65020912020-05-20 12:08:20 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1.html">Cloud Vision API</a> . <a href="vision_v1.projects.html">projects</a> . <a href="vision_v1.projects.files.html">files</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="#annotate">annotate(parent, body=None, x__xgafv=None)</a></code></p>
79<p class="firstline">Service that performs image detection and annotation for a batch of files.</p>
80<p class="toc_element">
81 <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(parent, body=None, x__xgafv=None)</a></code></p>
82<p class="firstline">Run asynchronous image detection and annotation for a list of generic</p>
83<h3>Method Details</h3>
84<div class="method">
85 <code class="details" id="annotate">annotate(parent, body=None, x__xgafv=None)</code>
86 <pre>Service that performs image detection and annotation for a batch of files.
87Now only &quot;application/pdf&quot;, &quot;image/tiff&quot; and &quot;image/gif&quot; are supported.
88
89This service will extract at most 5 (customers can specify which 5 in
90AnnotateFileRequest.pages) frames (gif) or pages (pdf or tiff) from each
91file provided and perform detection and annotation for each image
92extracted.
93
94Args:
95 parent: string, Optional. Target project and location to make a call.
96
97Format: `projects/{project-id}/locations/{location-id}`.
98
99If no parent is specified, a region will be chosen automatically.
100
101Supported location-ids:
102 `us`: USA country only,
103 `asia`: East asia areas, like Japan, Taiwan,
104 `eu`: The European Union.
105
106Example: `projects/project-A/locations/eu`. (required)
107 body: object, The request body.
108 The object takes the form of:
109
110{ # A list of requests to annotate files using the BatchAnnotateFiles API.
Bu Sun Kim65020912020-05-20 12:08:20 -0700111 &quot;requests&quot;: [ # Required. The list of file annotation requests. Right now we support only one
112 # AnnotateFileRequest in BatchAnnotateFilesRequest.
113 { # A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
Bu Sun Kim65020912020-05-20 12:08:20 -0700114 &quot;features&quot;: [ # Required. Requested features.
115 { # The type of Google Cloud Vision API detection to perform, and the maximum
116 # number of results to return for that type. Multiple `Feature` objects can
117 # be specified in the `features` list.
Bu Sun Kim65020912020-05-20 12:08:20 -0700118 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
119 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700120 &quot;type&quot;: &quot;A String&quot;, # The feature type.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700121 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
122 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
123 # &quot;builtin/latest&quot;.
Bu Sun Kim65020912020-05-20 12:08:20 -0700124 },
125 ],
126 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700127 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
128 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
129 # of doubles representing degrees latitude and degrees longitude. Unless
130 # specified otherwise, this must conform to the
131 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
132 # standard&lt;/a&gt;. Values must be within normalized ranges.
133 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
134 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
135 },
136 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
137 # of doubles representing degrees latitude and degrees longitude. Unless
138 # specified otherwise, this must conform to the
139 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
140 # standard&lt;/a&gt;. Values must be within normalized ranges.
141 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
142 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
143 },
144 },
145 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
146 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
147 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700148 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
149 # yields the best results since it enables automatic language detection. For
150 # languages based on the Latin alphabet, setting `language_hints` is not
151 # needed. In rare cases, when the language of the text in the image is known,
152 # setting a hint will help get better results (although it will be a
153 # significant hindrance if the hint is wrong). Text detection returns an
154 # error if one or more of the specified languages is not one of the
155 # [supported languages](https://cloud.google.com/vision/docs/languages).
156 &quot;A String&quot;,
157 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700158 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700159 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
160 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
161 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
162 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
163 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
164 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
165 # migrate existing products to these categories as well.
166 &quot;A String&quot;,
167 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700168 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
169 # If it is not specified, system discretion will be applied.
170 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
171 { # A vertex represents a 2D point in the image.
172 # NOTE: the normalized vertex coordinates are relative to the original image
173 # and range from 0 to 1.
174 &quot;y&quot;: 3.14, # Y coordinate.
175 &quot;x&quot;: 3.14, # X coordinate.
176 },
177 ],
178 &quot;vertices&quot;: [ # The bounding polygon vertices.
179 { # A vertex represents a 2D point in the image.
180 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700181 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700182 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700183 },
184 ],
185 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700186 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
187 # on Product labels. We currently support an AND of OR of key-value
188 # expressions, where each expression within an OR must have the same key. An
189 # &#x27;=&#x27; should be used to connect the key and value.
190 #
191 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
192 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
193 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
194 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
195 #
196 # Format is:
197 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
198 },
199 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
200 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
201 # of the image. For example, if the desired aspect ratio is 4/3, the
202 # corresponding float value should be 1.33333. If not specified, the
203 # best possible crop is returned. The number of provided aspect ratios is
204 # limited to a maximum of 16; any aspect ratios provided after the 16th are
205 # ignored.
206 3.14,
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700207 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700208 },
209 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700210 &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
211 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
212 # &quot;image/gif&quot; are supported. Wildcards are not supported.
213 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
214 # Note: As with all `bytes` fields, protobuffers use a pure binary
215 # representation, whereas JSON representations use base64.
216 #
217 # Currently, this field only works for BatchAnnotateFiles requests. It does
218 # not work for AsyncBatchAnnotateFiles requests.
219 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
220 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
221 # Google Cloud Storage object. Wildcards are not currently supported.
222 },
223 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700224 &quot;pages&quot;: [ # Pages of the file to perform image annotation.
225 #
226 # Pages starts from 1, we assume the first page of the file is page 1.
227 # At most 5 pages are supported per request. Pages can be negative.
228 #
229 # Page 1 means the first page.
230 # Page 2 means the second page.
231 # Page -1 means the last page.
232 # Page -2 means the second to the last page.
233 #
234 # If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
235 #
236 # If this field is empty, by default the service performs image annotation
237 # for the first 5 pages of the file.
238 42,
239 ],
240 },
241 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700242 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
243 #
244 # Format: `projects/{project-id}/locations/{location-id}`.
245 #
246 # If no parent is specified, a region will be chosen automatically.
247 #
248 # Supported location-ids:
249 # `us`: USA country only,
250 # `asia`: East asia areas, like Japan, Taiwan,
251 # `eu`: The European Union.
252 #
253 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim65020912020-05-20 12:08:20 -0700254 }
255
256 x__xgafv: string, V1 error format.
257 Allowed values
258 1 - v1 error format
259 2 - v2 error format
260
261Returns:
262 An object of the form:
263
264 { # A list of file annotation responses.
265 &quot;responses&quot;: [ # The list of file annotation responses, each response corresponding to each
266 # AnnotateFileRequest in BatchAnnotateFilesRequest.
267 { # Response to a single file annotation request. A file may contain one or more
268 # images, which individually have their own responses.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700269 &quot;totalPages&quot;: 42, # This field gives the total number of pages in the file.
270 &quot;inputConfig&quot;: { # The desired input location and metadata. # Information about the file for which this response is generated.
271 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
272 # &quot;image/gif&quot; are supported. Wildcards are not supported.
273 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
274 # Note: As with all `bytes` fields, protobuffers use a pure binary
275 # representation, whereas JSON representations use base64.
276 #
277 # Currently, this field only works for BatchAnnotateFiles requests. It does
278 # not work for AsyncBatchAnnotateFiles requests.
279 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
280 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
281 # Google Cloud Storage object. Wildcards are not currently supported.
282 },
283 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700284 &quot;responses&quot;: [ # Individual responses to images found within the file. This field will be
285 # empty if the `error` field is set.
286 { # Response to an image annotation request.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700287 &quot;labelAnnotations&quot;: [ # If present, label detection has completed successfully.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700288 { # Set of detected entity features.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700289 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700290 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
291 # The accuracy of the entity detection in an image.
292 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
293 # this field represents the confidence that there is a tower in the query
294 # image. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700295 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
296 # for `LABEL_DETECTION` features.
297 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
298 { # A vertex represents a 2D point in the image.
299 # NOTE: the normalized vertex coordinates are relative to the original image
300 # and range from 0 to 1.
301 &quot;y&quot;: 3.14, # Y coordinate.
302 &quot;x&quot;: 3.14, # X coordinate.
303 },
304 ],
305 &quot;vertices&quot;: [ # The bounding polygon vertices.
306 { # A vertex represents a 2D point in the image.
307 # NOTE: the vertex coordinates are in the same scale as the original image.
308 &quot;x&quot;: 42, # X coordinate.
309 &quot;y&quot;: 42, # Y coordinate.
310 },
311 ],
312 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700313 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
314 # [Google Knowledge Graph Search
315 # API](https://developers.google.com/knowledge-graph/).
316 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
317 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700318 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
319 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
320 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
321 # detected distant towering building, even though the confidence that
322 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700323 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
324 # `LocationInfo` elements can be present because one location may
325 # indicate the location of the scene in the image, and another location
326 # may indicate the location of the place where the image was taken.
327 # Location information is usually present for landmarks.
328 { # Detected entity location information.
329 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
330 # of doubles representing degrees latitude and degrees longitude. Unless
331 # specified otherwise, this must conform to the
332 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
333 # standard&lt;/a&gt;. Values must be within normalized ranges.
334 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
335 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
336 },
337 },
338 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700339 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
340 # fields, such a score or string that qualifies the entity.
341 { # A `Property` consists of a user-supplied name/value pair.
342 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
343 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
344 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
345 },
346 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700347 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700348 },
349 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700350 &quot;safeSearchAnnotation&quot;: { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
351 # methods over safe-search verticals (for example, adult, spoof, medical,
352 # violence).
353 &quot;spoof&quot;: &quot;A String&quot;, # Spoof likelihood. The likelihood that an modification
354 # was made to the image&#x27;s canonical version to make it appear
355 # funny or offensive.
356 &quot;racy&quot;: &quot;A String&quot;, # Likelihood that the request image contains racy content. Racy content may
357 # include (but is not limited to) skimpy or sheer clothing, strategically
358 # covered nudity, lewd or provocative poses, or close-ups of sensitive
359 # body areas.
360 &quot;adult&quot;: &quot;A String&quot;, # Represents the adult content likelihood for the image. Adult content may
361 # contain elements such as nudity, pornographic images or cartoons, or
362 # sexual activities.
363 &quot;violence&quot;: &quot;A String&quot;, # Likelihood that this image contains violent content.
364 &quot;medical&quot;: &quot;A String&quot;, # Likelihood that this is a medical image.
365 },
366 &quot;webDetection&quot;: { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
367 &quot;partialMatchingImages&quot;: [ # Partial matching images from the Internet.
368 # Those images are similar enough to share some key-point features. For
369 # example an original image will likely have partial matching for its crops.
370 { # Metadata for online images.
371 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
372 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700373 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700374 ],
375 &quot;bestGuessLabels&quot;: [ # The service&#x27;s best guess as to the topic of the request image.
376 # Inferred from similar images on the open web.
377 { # Label to provide extra metadata for the web detection.
378 &quot;label&quot;: &quot;A String&quot;, # Label for extra metadata.
379 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code for `label`, such as &quot;en-US&quot; or &quot;sr-Latn&quot;.
380 # For more information, see
381 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
Bu Sun Kim65020912020-05-20 12:08:20 -0700382 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700383 ],
384 &quot;visuallySimilarImages&quot;: [ # The visually similar image results.
385 { # Metadata for online images.
386 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
387 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
388 },
389 ],
390 &quot;webEntities&quot;: [ # Deduced entities from similar images on the Internet.
391 { # Entity deduced from similar images on the Internet.
392 &quot;entityId&quot;: &quot;A String&quot;, # Opaque entity ID.
393 &quot;score&quot;: 3.14, # Overall relevancy score for the entity.
394 # Not normalized and not comparable across different image queries.
395 &quot;description&quot;: &quot;A String&quot;, # Canonical description of the entity, in English.
396 },
397 ],
398 &quot;fullMatchingImages&quot;: [ # Fully matching images from the Internet.
399 # Can include resized copies of the query image.
400 { # Metadata for online images.
401 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
402 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
403 },
404 ],
405 &quot;pagesWithMatchingImages&quot;: [ # Web pages containing the matching images from the Internet.
406 { # Metadata for web pages.
407 &quot;fullMatchingImages&quot;: [ # Fully matching images on the page.
408 # Can include resized copies of the query image.
409 { # Metadata for online images.
410 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
411 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
412 },
413 ],
414 &quot;pageTitle&quot;: &quot;A String&quot;, # Title for the web page, may contain HTML markups.
415 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the web page.
416 &quot;url&quot;: &quot;A String&quot;, # The result web page URL.
417 &quot;partialMatchingImages&quot;: [ # Partial matching images on the page.
418 # Those images are similar enough to share some key-point features. For
419 # example an original image will likely have partial matching for its
420 # crops.
421 { # Metadata for online images.
422 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
423 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
424 },
425 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700426 },
427 ],
428 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700429 &quot;productSearchResults&quot;: { # Results for a product search request. # If present, product search has completed successfully.
Bu Sun Kim65020912020-05-20 12:08:20 -0700430 &quot;productGroupedResults&quot;: [ # List of results grouped by products detected in the query image. Each entry
431 # corresponds to one bounding polygon in the query image, and contains the
432 # matching products specific to that region. There may be duplicate product
433 # matches in the union of all the per-product results.
434 { # Information about the products similar to a single product in a query
435 # image.
436 &quot;objectAnnotations&quot;: [ # List of generic predictions for the object in the bounding box.
437 { # Prediction for what the object in the bounding box is.
Bu Sun Kim65020912020-05-20 12:08:20 -0700438 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700439 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
Bu Sun Kim65020912020-05-20 12:08:20 -0700440 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
441 # information, see
442 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700443 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
Bu Sun Kim65020912020-05-20 12:08:20 -0700444 },
445 ],
446 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
447 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
448 { # A vertex represents a 2D point in the image.
449 # NOTE: the normalized vertex coordinates are relative to the original image
450 # and range from 0 to 1.
451 &quot;y&quot;: 3.14, # Y coordinate.
452 &quot;x&quot;: 3.14, # X coordinate.
453 },
454 ],
455 &quot;vertices&quot;: [ # The bounding polygon vertices.
456 { # A vertex represents a 2D point in the image.
457 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700458 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700459 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700460 },
461 ],
462 },
463 &quot;results&quot;: [ # List of results, one for each product match.
464 { # Information about a product.
465 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
466 # to the query.
467 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700468 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
469 # 4096 characters long.
Bu Sun Kim65020912020-05-20 12:08:20 -0700470 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
471 #
472 # Format is:
473 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
474 #
475 # This field is ignored when creating a product.
Bu Sun Kim65020912020-05-20 12:08:20 -0700476 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
477 # constraints can be specified based on the product_labels.
478 #
479 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
480 # strings with integer values can match a range-based restriction which is
481 # to be supported soon.
482 #
483 # Multiple values can be assigned to the same key. One product may have up to
484 # 500 product_labels.
485 #
486 # Notice that the total number of distinct product_labels over all products
487 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
488 # will refuse to work for that ProductSet.
489 { # A product label represented as a key-value pair.
Bu Sun Kim65020912020-05-20 12:08:20 -0700490 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
491 # exceed 128 bytes.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700492 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
493 # cannot exceed 128 bytes.
Bu Sun Kim65020912020-05-20 12:08:20 -0700494 },
495 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700496 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
497 # characters long.
498 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
499 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
500 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
501 # not be used for new products.
Bu Sun Kim65020912020-05-20 12:08:20 -0700502 },
503 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
504 # 1 (full confidence).
505 },
506 ],
507 },
508 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700509 &quot;indexTime&quot;: &quot;A String&quot;, # Timestamp of the index which provided these results. Products added to the
510 # product set and products removed from the product set after this time are
511 # not reflected in the current results.
Bu Sun Kim65020912020-05-20 12:08:20 -0700512 &quot;results&quot;: [ # List of results, one for each product match.
513 { # Information about a product.
514 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
515 # to the query.
516 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700517 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
518 # 4096 characters long.
Bu Sun Kim65020912020-05-20 12:08:20 -0700519 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
520 #
521 # Format is:
522 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
523 #
524 # This field is ignored when creating a product.
Bu Sun Kim65020912020-05-20 12:08:20 -0700525 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
526 # constraints can be specified based on the product_labels.
527 #
528 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
529 # strings with integer values can match a range-based restriction which is
530 # to be supported soon.
531 #
532 # Multiple values can be assigned to the same key. One product may have up to
533 # 500 product_labels.
534 #
535 # Notice that the total number of distinct product_labels over all products
536 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
537 # will refuse to work for that ProductSet.
538 { # A product label represented as a key-value pair.
Bu Sun Kim65020912020-05-20 12:08:20 -0700539 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
540 # exceed 128 bytes.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700541 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
542 # cannot exceed 128 bytes.
Bu Sun Kim65020912020-05-20 12:08:20 -0700543 },
544 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700545 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
546 # characters long.
547 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
548 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
549 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
550 # not be used for new products.
Bu Sun Kim65020912020-05-20 12:08:20 -0700551 },
552 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
553 # 1 (full confidence).
554 },
555 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700556 },
557 &quot;fullTextAnnotation&quot;: { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
558 # completed successfully.
559 # This annotation provides the structural hierarchy for the OCR detected
560 # text.
561 # The hierarchy of an OCR extracted text structure is like this:
562 # TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
563 # Each structural component, starting from Page, may further have their own
564 # properties. Properties describe detected languages, breaks etc.. Please refer
565 # to the TextAnnotation.TextProperty message definition below for more
566 # detail.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700567 &quot;text&quot;: &quot;A String&quot;, # UTF-8 text detected on the pages.
Bu Sun Kim65020912020-05-20 12:08:20 -0700568 &quot;pages&quot;: [ # List of pages detected by OCR.
569 { # Detected page from OCR.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700570 &quot;width&quot;: 42, # Page width. For PDFs the unit is points. For images (including
571 # TIFFs) the unit is pixels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700572 &quot;blocks&quot;: [ # List of blocks of text, images etc on this page.
573 { # Logical element on the page.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700574 &quot;paragraphs&quot;: [ # List of paragraphs in this block (if this blocks is of type text).
575 { # Structural unit of text representing a number of words in certain order.
576 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
577 # The vertices are in the order of top-left, top-right, bottom-right,
578 # bottom-left. When a rotation of the bounding box is detected the rotation
579 # is represented as around the top-left corner as defined when the text is
580 # read in the &#x27;natural&#x27; orientation.
581 # For example:
582 # * when the text is horizontal it might look like:
583 # 0----1
584 # | |
585 # 3----2
586 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
587 # 2----3
588 # | |
589 # 1----0
590 # and the vertex order will still be (0, 1, 2, 3).
591 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
592 { # A vertex represents a 2D point in the image.
593 # NOTE: the normalized vertex coordinates are relative to the original image
594 # and range from 0 to 1.
595 &quot;y&quot;: 3.14, # Y coordinate.
596 &quot;x&quot;: 3.14, # X coordinate.
597 },
598 ],
599 &quot;vertices&quot;: [ # The bounding polygon vertices.
600 { # A vertex represents a 2D point in the image.
601 # NOTE: the vertex coordinates are in the same scale as the original image.
602 &quot;x&quot;: 42, # X coordinate.
603 &quot;y&quot;: 42, # Y coordinate.
604 },
605 ],
606 },
607 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the paragraph.
608 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
609 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
610 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
611 },
612 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
613 { # Detected language for a structural component.
614 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
615 # information, see
616 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
617 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
618 },
619 ],
620 },
621 &quot;words&quot;: [ # List of all words in this paragraph.
622 { # A word representation.
623 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the word.
624 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
625 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
626 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
627 },
628 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
629 { # Detected language for a structural component.
630 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
631 # information, see
632 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
633 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
634 },
635 ],
636 },
637 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the word. Range [0, 1].
638 &quot;symbols&quot;: [ # List of symbols in the word.
639 # The order of the symbols follows the natural reading order.
640 { # A single symbol representation.
641 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
642 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
643 # The vertices are in the order of top-left, top-right, bottom-right,
644 # bottom-left. When a rotation of the bounding box is detected the rotation
645 # is represented as around the top-left corner as defined when the text is
646 # read in the &#x27;natural&#x27; orientation.
647 # For example:
648 # * when the text is horizontal it might look like:
649 # 0----1
650 # | |
651 # 3----2
652 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
653 # 2----3
654 # | |
655 # 1----0
656 # and the vertex order will still be (0, 1, 2, 3).
657 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
658 { # A vertex represents a 2D point in the image.
659 # NOTE: the normalized vertex coordinates are relative to the original image
660 # and range from 0 to 1.
661 &quot;y&quot;: 3.14, # Y coordinate.
662 &quot;x&quot;: 3.14, # X coordinate.
663 },
664 ],
665 &quot;vertices&quot;: [ # The bounding polygon vertices.
666 { # A vertex represents a 2D point in the image.
667 # NOTE: the vertex coordinates are in the same scale as the original image.
668 &quot;x&quot;: 42, # X coordinate.
669 &quot;y&quot;: 42, # Y coordinate.
670 },
671 ],
672 },
673 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the symbol.
674 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
675 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
676 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
677 },
678 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
679 { # Detected language for a structural component.
680 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
681 # information, see
682 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
683 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
684 },
685 ],
686 },
687 &quot;text&quot;: &quot;A String&quot;, # The actual UTF-8 representation of the symbol.
688 },
689 ],
690 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the word.
691 # The vertices are in the order of top-left, top-right, bottom-right,
692 # bottom-left. When a rotation of the bounding box is detected the rotation
693 # is represented as around the top-left corner as defined when the text is
694 # read in the &#x27;natural&#x27; orientation.
695 # For example:
696 # * when the text is horizontal it might look like:
697 # 0----1
698 # | |
699 # 3----2
700 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
701 # 2----3
702 # | |
703 # 1----0
704 # and the vertex order will still be (0, 1, 2, 3).
705 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
706 { # A vertex represents a 2D point in the image.
707 # NOTE: the normalized vertex coordinates are relative to the original image
708 # and range from 0 to 1.
709 &quot;y&quot;: 3.14, # Y coordinate.
710 &quot;x&quot;: 3.14, # X coordinate.
711 },
712 ],
713 &quot;vertices&quot;: [ # The bounding polygon vertices.
714 { # A vertex represents a 2D point in the image.
715 # NOTE: the vertex coordinates are in the same scale as the original image.
716 &quot;x&quot;: 42, # X coordinate.
717 &quot;y&quot;: 42, # Y coordinate.
718 },
719 ],
720 },
721 },
722 ],
723 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
724 },
725 ],
726 &quot;blockType&quot;: &quot;A String&quot;, # Detected block type (text, image etc) for this block.
Bu Sun Kim65020912020-05-20 12:08:20 -0700727 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the block.
Bu Sun Kim65020912020-05-20 12:08:20 -0700728 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
Bu Sun Kim65020912020-05-20 12:08:20 -0700729 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700730 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
Bu Sun Kim65020912020-05-20 12:08:20 -0700731 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700732 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
733 { # Detected language for a structural component.
734 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
735 # information, see
736 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
737 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
738 },
739 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700740 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700741 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the block. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700742 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the block.
743 # The vertices are in the order of top-left, top-right, bottom-right,
744 # bottom-left. When a rotation of the bounding box is detected the rotation
745 # is represented as around the top-left corner as defined when the text is
746 # read in the &#x27;natural&#x27; orientation.
747 # For example:
748 #
749 # * when the text is horizontal it might look like:
750 #
751 # 0----1
752 # | |
753 # 3----2
754 #
755 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
756 #
757 # 2----3
758 # | |
759 # 1----0
760 #
761 # and the vertex order will still be (0, 1, 2, 3).
762 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
763 { # A vertex represents a 2D point in the image.
764 # NOTE: the normalized vertex coordinates are relative to the original image
765 # and range from 0 to 1.
766 &quot;y&quot;: 3.14, # Y coordinate.
767 &quot;x&quot;: 3.14, # X coordinate.
768 },
769 ],
770 &quot;vertices&quot;: [ # The bounding polygon vertices.
771 { # A vertex represents a 2D point in the image.
772 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700773 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700774 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700775 },
776 ],
777 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700778 },
779 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700780 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the page. Range [0, 1].
781 &quot;height&quot;: 42, # Page height. For PDFs the unit is points. For images (including
782 # TIFFs) the unit is pixels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700783 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected on the page.
Bu Sun Kim65020912020-05-20 12:08:20 -0700784 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
Bu Sun Kim65020912020-05-20 12:08:20 -0700785 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700786 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
Bu Sun Kim65020912020-05-20 12:08:20 -0700787 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700788 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
789 { # Detected language for a structural component.
790 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
791 # information, see
792 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
793 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
794 },
795 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700796 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700797 },
798 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700799 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700800 &quot;localizedObjectAnnotations&quot;: [ # If present, localized object detection has completed successfully.
801 # This will be sorted descending by confidence score.
802 { # Set of detected objects with bounding boxes.
803 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
804 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
805 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
806 # information, see
807 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
808 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
Bu Sun Kim65020912020-05-20 12:08:20 -0700809 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
810 { # A vertex represents a 2D point in the image.
811 # NOTE: the normalized vertex coordinates are relative to the original image
812 # and range from 0 to 1.
813 &quot;y&quot;: 3.14, # Y coordinate.
814 &quot;x&quot;: 3.14, # X coordinate.
815 },
816 ],
817 &quot;vertices&quot;: [ # The bounding polygon vertices.
818 { # A vertex represents a 2D point in the image.
819 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700820 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700821 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700822 },
823 ],
824 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700825 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
Bu Sun Kim65020912020-05-20 12:08:20 -0700826 },
827 ],
828 &quot;imagePropertiesAnnotation&quot;: { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
829 &quot;dominantColors&quot;: { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
830 &quot;colors&quot;: [ # RGB color values with their score and pixel fraction.
831 { # Color information consists of RGB channels, score, and the fraction of
832 # the image that the color occupies in the image.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700833 &quot;score&quot;: 3.14, # Image-specific score for this color. Value in range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700834 &quot;color&quot;: { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
835 # for simplicity of conversion to/from color representations in various
836 # languages over compactness; for example, the fields of this representation
837 # can be trivially provided to the constructor of &quot;java.awt.Color&quot; in Java; it
838 # can also be trivially provided to UIColor&#x27;s &quot;+colorWithRed:green:blue:alpha&quot;
839 # method in iOS; and, with just a little work, it can be easily formatted into
840 # a CSS &quot;rgba()&quot; string in JavaScript, as well.
841 #
842 # Note: this proto does not carry information about the absolute color space
843 # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
844 # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
845 # space.
846 #
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700847 # Note: when color equality needs to be decided, implementations, unless
848 # documented otherwise, will treat two colors to be equal if all their red,
849 # green, blue and alpha values each differ by at most 1e-5.
850 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700851 # Example (Java):
852 #
853 # import com.google.type.Color;
854 #
855 # // ...
856 # public static java.awt.Color fromProto(Color protocolor) {
857 # float alpha = protocolor.hasAlpha()
858 # ? protocolor.getAlpha().getValue()
859 # : 1.0;
860 #
861 # return new java.awt.Color(
862 # protocolor.getRed(),
863 # protocolor.getGreen(),
864 # protocolor.getBlue(),
865 # alpha);
866 # }
867 #
868 # public static Color toProto(java.awt.Color color) {
869 # float red = (float) color.getRed();
870 # float green = (float) color.getGreen();
871 # float blue = (float) color.getBlue();
872 # float denominator = 255.0;
873 # Color.Builder resultBuilder =
874 # Color
875 # .newBuilder()
876 # .setRed(red / denominator)
877 # .setGreen(green / denominator)
878 # .setBlue(blue / denominator);
879 # int alpha = color.getAlpha();
880 # if (alpha != 255) {
881 # result.setAlpha(
882 # FloatValue
883 # .newBuilder()
884 # .setValue(((float) alpha) / denominator)
885 # .build());
886 # }
887 # return resultBuilder.build();
888 # }
889 # // ...
890 #
891 # Example (iOS / Obj-C):
892 #
893 # // ...
894 # static UIColor* fromProto(Color* protocolor) {
895 # float red = [protocolor red];
896 # float green = [protocolor green];
897 # float blue = [protocolor blue];
898 # FloatValue* alpha_wrapper = [protocolor alpha];
899 # float alpha = 1.0;
900 # if (alpha_wrapper != nil) {
901 # alpha = [alpha_wrapper value];
902 # }
903 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
904 # }
905 #
906 # static Color* toProto(UIColor* color) {
907 # CGFloat red, green, blue, alpha;
908 # if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
909 # return nil;
910 # }
911 # Color* result = [[Color alloc] init];
912 # [result setRed:red];
913 # [result setGreen:green];
914 # [result setBlue:blue];
915 # if (alpha &lt;= 0.9999) {
916 # [result setAlpha:floatWrapperWithValue(alpha)];
917 # }
918 # [result autorelease];
919 # return result;
920 # }
921 # // ...
922 #
923 # Example (JavaScript):
924 #
925 # // ...
926 #
927 # var protoToCssColor = function(rgb_color) {
928 # var redFrac = rgb_color.red || 0.0;
929 # var greenFrac = rgb_color.green || 0.0;
930 # var blueFrac = rgb_color.blue || 0.0;
931 # var red = Math.floor(redFrac * 255);
932 # var green = Math.floor(greenFrac * 255);
933 # var blue = Math.floor(blueFrac * 255);
934 #
935 # if (!(&#x27;alpha&#x27; in rgb_color)) {
936 # return rgbToCssColor_(red, green, blue);
937 # }
938 #
939 # var alphaFrac = rgb_color.alpha.value || 0.0;
940 # var rgbParams = [red, green, blue].join(&#x27;,&#x27;);
941 # return [&#x27;rgba(&#x27;, rgbParams, &#x27;,&#x27;, alphaFrac, &#x27;)&#x27;].join(&#x27;&#x27;);
942 # };
943 #
944 # var rgbToCssColor_ = function(red, green, blue) {
945 # var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
946 # var hexString = rgbNumber.toString(16);
947 # var missingZeros = 6 - hexString.length;
948 # var resultBuilder = [&#x27;#&#x27;];
949 # for (var i = 0; i &lt; missingZeros; i++) {
950 # resultBuilder.push(&#x27;0&#x27;);
951 # }
952 # resultBuilder.push(hexString);
953 # return resultBuilder.join(&#x27;&#x27;);
954 # };
955 #
956 # // ...
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700957 &quot;blue&quot;: 3.14, # The amount of blue in the color as a value in the interval [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700958 &quot;red&quot;: 3.14, # The amount of red in the color as a value in the interval [0, 1].
959 &quot;green&quot;: 3.14, # The amount of green in the color as a value in the interval [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700960 &quot;alpha&quot;: 3.14, # The fraction of this color that should be applied to the pixel. That is,
961 # the final pixel color is defined by the equation:
962 #
963 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
964 #
965 # This means that a value of 1.0 corresponds to a solid color, whereas
966 # a value of 0.0 corresponds to a completely transparent color. This
967 # uses a wrapper message rather than a simple float scalar so that it is
968 # possible to distinguish between a default value and the value being unset.
969 # If omitted, this color object is to be rendered as a solid color
970 # (as if the alpha value had been explicitly given with a value of 1.0).
971 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700972 &quot;pixelFraction&quot;: 3.14, # The fraction of pixels the color occupies in the image.
973 # Value in range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700974 },
975 ],
976 },
977 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700978 &quot;faceAnnotations&quot;: [ # If present, face detection has completed successfully.
979 { # A face annotation object contains the results of face detection.
980 &quot;fdBoundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
981 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
982 # is used to eliminate the face from any image analysis that detects the
983 # &quot;amount of skin&quot; visible in an image. It is not based on the
984 # landmarker results, only on the initial face detection, hence
985 # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
986 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
987 { # A vertex represents a 2D point in the image.
988 # NOTE: the normalized vertex coordinates are relative to the original image
989 # and range from 0 to 1.
990 &quot;y&quot;: 3.14, # Y coordinate.
991 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700992 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700993 ],
994 &quot;vertices&quot;: [ # The bounding polygon vertices.
995 { # A vertex represents a 2D point in the image.
996 # NOTE: the vertex coordinates are in the same scale as the original image.
997 &quot;x&quot;: 42, # X coordinate.
998 &quot;y&quot;: 42, # Y coordinate.
999 },
1000 ],
1001 },
1002 &quot;sorrowLikelihood&quot;: &quot;A String&quot;, # Sorrow likelihood.
1003 &quot;rollAngle&quot;: 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
1004 # of the face relative to the image vertical about the axis perpendicular to
1005 # the face. Range [-180,180].
1006 &quot;angerLikelihood&quot;: &quot;A String&quot;, # Anger likelihood.
1007 &quot;surpriseLikelihood&quot;: &quot;A String&quot;, # Surprise likelihood.
1008 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
1009 # are in the original image&#x27;s scale.
1010 # The bounding box is computed to &quot;frame&quot; the face in accordance with human
1011 # expectations. It is based on the landmarker results.
1012 # Note that one or more x and/or y coordinates may not be generated in the
1013 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
1014 # appears in the image to be annotated.
1015 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1016 { # A vertex represents a 2D point in the image.
1017 # NOTE: the normalized vertex coordinates are relative to the original image
1018 # and range from 0 to 1.
1019 &quot;y&quot;: 3.14, # Y coordinate.
1020 &quot;x&quot;: 3.14, # X coordinate.
1021 },
1022 ],
1023 &quot;vertices&quot;: [ # The bounding polygon vertices.
1024 { # A vertex represents a 2D point in the image.
1025 # NOTE: the vertex coordinates are in the same scale as the original image.
1026 &quot;x&quot;: 42, # X coordinate.
1027 &quot;y&quot;: 42, # Y coordinate.
1028 },
1029 ],
1030 },
1031 &quot;detectionConfidence&quot;: 3.14, # Detection confidence. Range [0, 1].
1032 &quot;headwearLikelihood&quot;: &quot;A String&quot;, # Headwear likelihood.
1033 &quot;panAngle&quot;: 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
1034 # pointing relative to the vertical plane perpendicular to the image. Range
1035 # [-180,180].
1036 &quot;landmarks&quot;: [ # Detected face landmarks.
1037 { # A face-specific landmark (for example, a face feature).
1038 &quot;position&quot;: { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
1039 # A valid Position must have both x and y coordinates.
1040 # The position coordinates are in the same scale as the original image.
1041 &quot;z&quot;: 3.14, # Z coordinate (or depth).
1042 &quot;y&quot;: 3.14, # Y coordinate.
1043 &quot;x&quot;: 3.14, # X coordinate.
1044 },
1045 &quot;type&quot;: &quot;A String&quot;, # Face landmark type.
Bu Sun Kim65020912020-05-20 12:08:20 -07001046 },
1047 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001048 &quot;landmarkingConfidence&quot;: 3.14, # Face landmarking confidence. Range [0, 1].
1049 &quot;joyLikelihood&quot;: &quot;A String&quot;, # Joy likelihood.
1050 &quot;tiltAngle&quot;: 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
1051 # pointing relative to the image&#x27;s horizontal plane. Range [-180,180].
1052 &quot;underExposedLikelihood&quot;: &quot;A String&quot;, # Under-exposed likelihood.
1053 &quot;blurredLikelihood&quot;: &quot;A String&quot;, # Blurred likelihood.
1054 },
1055 ],
1056 &quot;logoAnnotations&quot;: [ # If present, logo detection has completed successfully.
1057 { # Set of detected entity features.
1058 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
Bu Sun Kim65020912020-05-20 12:08:20 -07001059 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1060 # The accuracy of the entity detection in an image.
1061 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1062 # this field represents the confidence that there is a tower in the query
1063 # image. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -07001064 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1065 # for `LABEL_DETECTION` features.
1066 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1067 { # A vertex represents a 2D point in the image.
1068 # NOTE: the normalized vertex coordinates are relative to the original image
1069 # and range from 0 to 1.
1070 &quot;y&quot;: 3.14, # Y coordinate.
1071 &quot;x&quot;: 3.14, # X coordinate.
1072 },
1073 ],
1074 &quot;vertices&quot;: [ # The bounding polygon vertices.
1075 { # A vertex represents a 2D point in the image.
1076 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -07001077 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001078 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -07001079 },
1080 ],
1081 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001082 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1083 # [Google Knowledge Graph Search
1084 # API](https://developers.google.com/knowledge-graph/).
1085 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1086 # `description` is expressed.
Bu Sun Kim65020912020-05-20 12:08:20 -07001087 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1088 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1089 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1090 # detected distant towering building, even though the confidence that
1091 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001092 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1093 # `LocationInfo` elements can be present because one location may
1094 # indicate the location of the scene in the image, and another location
1095 # may indicate the location of the place where the image was taken.
1096 # Location information is usually present for landmarks.
1097 { # Detected entity location information.
1098 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1099 # of doubles representing degrees latitude and degrees longitude. Unless
1100 # specified otherwise, this must conform to the
1101 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1102 # standard&lt;/a&gt;. Values must be within normalized ranges.
1103 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1104 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1105 },
1106 },
1107 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001108 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1109 # fields, such a score or string that qualifies the entity.
1110 { # A `Property` consists of a user-supplied name/value pair.
1111 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1112 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1113 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1114 },
1115 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001116 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -07001117 },
1118 ],
1119 &quot;context&quot;: { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
1120 # comes from.
1121 # information about the source of that image.
Bu Sun Kim65020912020-05-20 12:08:20 -07001122 &quot;pageNumber&quot;: 42, # If the file was a PDF or TIFF, this field gives the page number within
1123 # the file used to produce the image.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001124 &quot;uri&quot;: &quot;A String&quot;, # The URI of the file used to produce the image.
Bu Sun Kim65020912020-05-20 12:08:20 -07001125 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001126 &quot;cropHintsAnnotation&quot;: { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
1127 &quot;cropHints&quot;: [ # Crop hint results.
1128 { # Single crop hint that is used to generate a new crop when serving an image.
1129 &quot;importanceFraction&quot;: 3.14, # Fraction of importance of this salient region with respect to the original
1130 # image.
1131 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
1132 # box are in the original image&#x27;s scale.
1133 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1134 { # A vertex represents a 2D point in the image.
1135 # NOTE: the normalized vertex coordinates are relative to the original image
1136 # and range from 0 to 1.
1137 &quot;y&quot;: 3.14, # Y coordinate.
1138 &quot;x&quot;: 3.14, # X coordinate.
1139 },
1140 ],
1141 &quot;vertices&quot;: [ # The bounding polygon vertices.
1142 { # A vertex represents a 2D point in the image.
1143 # NOTE: the vertex coordinates are in the same scale as the original image.
1144 &quot;x&quot;: 42, # X coordinate.
1145 &quot;y&quot;: 42, # Y coordinate.
1146 },
1147 ],
1148 },
1149 &quot;confidence&quot;: 3.14, # Confidence of this being a salient region. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001150 },
1151 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001152 },
1153 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
1154 # Note that filled-in image annotations are guaranteed to be
1155 # correct, even when `error` is set.
1156 # different programming environments, including REST APIs and RPC APIs. It is
1157 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1158 # three pieces of data: error code, error message, and error details.
1159 #
1160 # You can find out more about this error model and how to work with it in the
1161 # [API Design Guide](https://cloud.google.com/apis/design/errors).
1162 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
1163 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1164 # message types for APIs to use.
1165 {
1166 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001167 },
1168 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001169 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1170 # user-facing error message should be localized and sent in the
1171 # google.rpc.Status.details field, or localized by the client.
1172 },
1173 &quot;landmarkAnnotations&quot;: [ # If present, landmark detection has completed successfully.
1174 { # Set of detected entity features.
1175 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1176 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1177 # The accuracy of the entity detection in an image.
1178 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1179 # this field represents the confidence that there is a tower in the query
1180 # image. Range [0, 1].
1181 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1182 # for `LABEL_DETECTION` features.
1183 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1184 { # A vertex represents a 2D point in the image.
1185 # NOTE: the normalized vertex coordinates are relative to the original image
1186 # and range from 0 to 1.
1187 &quot;y&quot;: 3.14, # Y coordinate.
1188 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -07001189 },
1190 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001191 &quot;vertices&quot;: [ # The bounding polygon vertices.
1192 { # A vertex represents a 2D point in the image.
1193 # NOTE: the vertex coordinates are in the same scale as the original image.
1194 &quot;x&quot;: 42, # X coordinate.
1195 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -07001196 },
1197 ],
1198 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001199 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1200 # [Google Knowledge Graph Search
1201 # API](https://developers.google.com/knowledge-graph/).
1202 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1203 # `description` is expressed.
1204 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1205 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1206 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1207 # detected distant towering building, even though the confidence that
1208 # there is a tower in each image may be the same. Range [0, 1].
1209 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1210 # `LocationInfo` elements can be present because one location may
1211 # indicate the location of the scene in the image, and another location
1212 # may indicate the location of the place where the image was taken.
1213 # Location information is usually present for landmarks.
1214 { # Detected entity location information.
1215 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1216 # of doubles representing degrees latitude and degrees longitude. Unless
1217 # specified otherwise, this must conform to the
1218 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1219 # standard&lt;/a&gt;. Values must be within normalized ranges.
1220 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1221 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1222 },
1223 },
1224 ],
1225 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1226 # fields, such a score or string that qualifies the entity.
1227 { # A `Property` consists of a user-supplied name/value pair.
1228 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1229 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1230 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1231 },
1232 ],
1233 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1234 },
1235 ],
1236 &quot;textAnnotations&quot;: [ # If present, text (OCR) detection has completed successfully.
1237 { # Set of detected entity features.
1238 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1239 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1240 # The accuracy of the entity detection in an image.
1241 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1242 # this field represents the confidence that there is a tower in the query
1243 # image. Range [0, 1].
1244 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1245 # for `LABEL_DETECTION` features.
1246 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1247 { # A vertex represents a 2D point in the image.
1248 # NOTE: the normalized vertex coordinates are relative to the original image
1249 # and range from 0 to 1.
1250 &quot;y&quot;: 3.14, # Y coordinate.
1251 &quot;x&quot;: 3.14, # X coordinate.
1252 },
1253 ],
1254 &quot;vertices&quot;: [ # The bounding polygon vertices.
1255 { # A vertex represents a 2D point in the image.
1256 # NOTE: the vertex coordinates are in the same scale as the original image.
1257 &quot;x&quot;: 42, # X coordinate.
1258 &quot;y&quot;: 42, # Y coordinate.
1259 },
1260 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001261 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001262 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1263 # [Google Knowledge Graph Search
1264 # API](https://developers.google.com/knowledge-graph/).
1265 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1266 # `description` is expressed.
1267 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1268 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1269 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1270 # detected distant towering building, even though the confidence that
1271 # there is a tower in each image may be the same. Range [0, 1].
1272 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1273 # `LocationInfo` elements can be present because one location may
1274 # indicate the location of the scene in the image, and another location
1275 # may indicate the location of the place where the image was taken.
1276 # Location information is usually present for landmarks.
1277 { # Detected entity location information.
1278 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1279 # of doubles representing degrees latitude and degrees longitude. Unless
1280 # specified otherwise, this must conform to the
1281 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1282 # standard&lt;/a&gt;. Values must be within normalized ranges.
1283 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1284 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1285 },
1286 },
1287 ],
1288 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1289 # fields, such a score or string that qualifies the entity.
1290 { # A `Property` consists of a user-supplied name/value pair.
1291 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1292 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1293 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1294 },
1295 ],
1296 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1297 },
1298 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001299 },
1300 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001301 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
1302 # `responses` field will not be set in this case.
1303 # different programming environments, including REST APIs and RPC APIs. It is
1304 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1305 # three pieces of data: error code, error message, and error details.
1306 #
1307 # You can find out more about this error model and how to work with it in the
1308 # [API Design Guide](https://cloud.google.com/apis/design/errors).
1309 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001310 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1311 # message types for APIs to use.
1312 {
1313 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1314 },
1315 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001316 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1317 # user-facing error message should be localized and sent in the
1318 # google.rpc.Status.details field, or localized by the client.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001319 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001320 },
1321 ],
1322 }</pre>
1323</div>
1324
1325<div class="method">
1326 <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(parent, body=None, x__xgafv=None)</code>
1327 <pre>Run asynchronous image detection and annotation for a list of generic
1328files, such as PDF files, which may contain multiple pages and multiple
1329images per page. Progress and results can be retrieved through the
1330`google.longrunning.Operations` interface.
1331`Operation.metadata` contains `OperationMetadata` (metadata).
1332`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
1333
1334Args:
1335 parent: string, Optional. Target project and location to make a call.
1336
1337Format: `projects/{project-id}/locations/{location-id}`.
1338
1339If no parent is specified, a region will be chosen automatically.
1340
1341Supported location-ids:
1342 `us`: USA country only,
1343 `asia`: East asia areas, like Japan, Taiwan,
1344 `eu`: The European Union.
1345
1346Example: `projects/project-A/locations/eu`. (required)
1347 body: object, The request body.
1348 The object takes the form of:
1349
1350{ # Multiple async file annotation requests are batched into a single service
1351 # call.
1352 &quot;requests&quot;: [ # Required. Individual async file annotation requests for this batch.
1353 { # An offline file annotation request.
1354 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001355 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1356 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1357 # of doubles representing degrees latitude and degrees longitude. Unless
1358 # specified otherwise, this must conform to the
1359 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1360 # standard&lt;/a&gt;. Values must be within normalized ranges.
1361 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1362 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1363 },
1364 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1365 # of doubles representing degrees latitude and degrees longitude. Unless
1366 # specified otherwise, this must conform to the
1367 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1368 # standard&lt;/a&gt;. Values must be within normalized ranges.
1369 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1370 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1371 },
1372 },
1373 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
1374 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
1375 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001376 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1377 # yields the best results since it enables automatic language detection. For
1378 # languages based on the Latin alphabet, setting `language_hints` is not
1379 # needed. In rare cases, when the language of the text in the image is known,
1380 # setting a hint will help get better results (although it will be a
1381 # significant hindrance if the hint is wrong). Text detection returns an
1382 # error if one or more of the specified languages is not one of the
1383 # [supported languages](https://cloud.google.com/vision/docs/languages).
1384 &quot;A String&quot;,
1385 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001386 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001387 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
1388 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
1389 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
1390 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
1391 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
1392 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
1393 # migrate existing products to these categories as well.
1394 &quot;A String&quot;,
1395 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001396 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
1397 # If it is not specified, system discretion will be applied.
1398 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1399 { # A vertex represents a 2D point in the image.
1400 # NOTE: the normalized vertex coordinates are relative to the original image
1401 # and range from 0 to 1.
1402 &quot;y&quot;: 3.14, # Y coordinate.
1403 &quot;x&quot;: 3.14, # X coordinate.
1404 },
1405 ],
1406 &quot;vertices&quot;: [ # The bounding polygon vertices.
1407 { # A vertex represents a 2D point in the image.
1408 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -07001409 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001410 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -07001411 },
1412 ],
1413 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001414 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
1415 # on Product labels. We currently support an AND of OR of key-value
1416 # expressions, where each expression within an OR must have the same key. An
1417 # &#x27;=&#x27; should be used to connect the key and value.
1418 #
1419 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
1420 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
1421 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
1422 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
1423 #
1424 # Format is:
1425 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1426 },
1427 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1428 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
1429 # of the image. For example, if the desired aspect ratio is 4/3, the
1430 # corresponding float value should be 1.33333. If not specified, the
1431 # best possible crop is returned. The number of provided aspect ratios is
1432 # limited to a maximum of 16; any aspect ratios provided after the 16th are
1433 # ignored.
1434 3.14,
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001435 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001436 },
1437 },
1438 &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001439 &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
1440 # Google Cloud Storage.
1441 # The valid range is [1, 100]. If not specified, the default value is 20.
1442 #
1443 # For example, for one pdf file with 100 pages, 100 response protos will
1444 # be generated. If `batch_size` = 20, then 5 json files each
1445 # containing 20 response protos will be written under the prefix
1446 # `gcs_destination`.`uri`.
1447 #
1448 # Currently, batch_size only applies to GcsDestination, with potential future
1449 # support for other output configurations.
Bu Sun Kim65020912020-05-20 12:08:20 -07001450 &quot;gcsDestination&quot;: { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1451 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI prefix where the results will be stored. Results
1452 # will be in JSON format and preceded by its corresponding input URI prefix.
1453 # This field can either represent a gcs file prefix or gcs directory. In
1454 # either case, the uri should be unique because in order to get all of the
1455 # output files, you will need to do a wildcard gcs search on the uri prefix
1456 # you provide.
1457 #
1458 # Examples:
1459 #
1460 # * File Prefix: gs://bucket-name/here/filenameprefix The output files
1461 # will be created in gs://bucket-name/here/ and the names of the
1462 # output files will begin with &quot;filenameprefix&quot;.
1463 #
1464 # * Directory Prefix: gs://bucket-name/some/location/ The output files
1465 # will be created in gs://bucket-name/some/location/ and the names of the
1466 # output files could be anything because there was no filename prefix
1467 # specified.
1468 #
1469 # If multiple outputs, each response is still AnnotateFileResponse, each of
1470 # which contains some subset of the full list of AnnotateImageResponse.
1471 # Multiple outputs can happen if, for example, the output JSON is too large
1472 # and overflows into multiple sharded files.
1473 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001474 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001475 &quot;features&quot;: [ # Required. Requested features.
1476 { # The type of Google Cloud Vision API detection to perform, and the maximum
1477 # number of results to return for that type. Multiple `Feature` objects can
1478 # be specified in the `features` list.
1479 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
1480 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1481 &quot;type&quot;: &quot;A String&quot;, # The feature type.
1482 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
1483 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
1484 # &quot;builtin/latest&quot;.
1485 },
1486 ],
1487 &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
1488 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
1489 # &quot;image/gif&quot; are supported. Wildcards are not supported.
1490 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
1491 # Note: As with all `bytes` fields, protobuffers use a pure binary
1492 # representation, whereas JSON representations use base64.
1493 #
1494 # Currently, this field only works for BatchAnnotateFiles requests. It does
1495 # not work for AsyncBatchAnnotateFiles requests.
1496 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
1497 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
1498 # Google Cloud Storage object. Wildcards are not currently supported.
1499 },
1500 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001501 },
1502 ],
1503 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
1504 #
1505 # Format: `projects/{project-id}/locations/{location-id}`.
1506 #
1507 # If no parent is specified, a region will be chosen automatically.
1508 #
1509 # Supported location-ids:
1510 # `us`: USA country only,
1511 # `asia`: East asia areas, like Japan, Taiwan,
1512 # `eu`: The European Union.
1513 #
1514 # Example: `projects/project-A/locations/eu`.
1515 }
1516
1517 x__xgafv: string, V1 error format.
1518 Allowed values
1519 1 - v1 error format
1520 2 - v2 error format
1521
1522Returns:
1523 An object of the form:
1524
1525 { # This resource represents a long-running operation that is the result of a
1526 # network API call.
1527 &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
1528 # If `true`, the operation is completed, and either `error` or `response` is
1529 # available.
Bu Sun Kim65020912020-05-20 12:08:20 -07001530 &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
1531 # originally returns it. If you use the default HTTP mapping, the
1532 # `name` should be a resource name ending with `operations/{unique_id}`.
1533 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
1534 # different programming environments, including REST APIs and RPC APIs. It is
1535 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1536 # three pieces of data: error code, error message, and error details.
1537 #
1538 # You can find out more about this error model and how to work with it in the
1539 # [API Design Guide](https://cloud.google.com/apis/design/errors).
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001540 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
Bu Sun Kim65020912020-05-20 12:08:20 -07001541 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1542 # message types for APIs to use.
1543 {
1544 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1545 },
1546 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001547 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1548 # user-facing error message should be localized and sent in the
1549 # google.rpc.Status.details field, or localized by the client.
1550 },
1551 &quot;response&quot;: { # The normal response of the operation in case of success. If the original
1552 # method returns no data on success, such as `Delete`, the response is
1553 # `google.protobuf.Empty`. If the original method is standard
1554 # `Get`/`Create`/`Update`, the response should be the resource. For other
1555 # methods, the response should have the type `XxxResponse`, where `Xxx`
1556 # is the original method name. For example, if the original method name
1557 # is `TakeSnapshot()`, the inferred response type is
1558 # `TakeSnapshotResponse`.
1559 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim65020912020-05-20 12:08:20 -07001560 },
1561 &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically
1562 # contains progress information and common metadata such as create time.
1563 # Some services might not provide such metadata. Any method that returns a
1564 # long-running operation should document the metadata type, if any.
1565 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1566 },
1567 }</pre>
1568</div>
1569
1570</body></html>