blob: 1fdbf21d6c4fdc8544d441f1837fe4e5f47ec06e [file] [log] [blame]
Bu Sun Kim65020912020-05-20 12:08:20 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1.html">Cloud Vision API</a> . <a href="vision_v1.projects.html">projects</a> . <a href="vision_v1.projects.images.html">images</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="#annotate">annotate(parent, body=None, x__xgafv=None)</a></code></p>
79<p class="firstline">Run image detection and annotation for a batch of images.</p>
80<p class="toc_element">
81 <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(parent, body=None, x__xgafv=None)</a></code></p>
82<p class="firstline">Run asynchronous image detection and annotation for a list of images.</p>
83<h3>Method Details</h3>
84<div class="method">
85 <code class="details" id="annotate">annotate(parent, body=None, x__xgafv=None)</code>
86 <pre>Run image detection and annotation for a batch of images.
87
88Args:
89 parent: string, Optional. Target project and location to make a call.
90
91Format: `projects/{project-id}/locations/{location-id}`.
92
93If no parent is specified, a region will be chosen automatically.
94
95Supported location-ids:
96 `us`: USA country only,
97 `asia`: East asia areas, like Japan, Taiwan,
98 `eu`: The European Union.
99
100Example: `projects/project-A/locations/eu`. (required)
101 body: object, The request body.
102 The object takes the form of:
103
104{ # Multiple image annotation requests are batched into a single service call.
Bu Sun Kim65020912020-05-20 12:08:20 -0700105 &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
106 { # Request for performing Google Cloud Vision API tasks over a user-provided
107 # image, with user-requested features, and with context information.
108 &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
Bu Sun Kim65020912020-05-20 12:08:20 -0700109 &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
110 # URL. If both `content` and `source` are provided for an image, `content`
111 # takes precedence and is used to perform the image annotation request.
112 &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
113 #
114 # The Google Cloud Storage URI of the form
115 # `gs://bucket_name/object_name`. Object versioning is not supported. See
116 # [Google Cloud Storage Request
117 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
118 &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
119 #
120 # 1. A Google Cloud Storage URI of the form
121 # `gs://bucket_name/object_name`. Object versioning is not supported. See
122 # [Google Cloud Storage Request
123 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more
124 # info.
125 #
126 # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
127 # HTTP/HTTPS URLs, Google cannot guarantee that the request will be
128 # completed. Your request may fail if the specified host denies the
129 # request (e.g. due to request throttling or DOS prevention), or if Google
130 # throttles requests to the site for abuse prevention. You should not
131 # depend on externally-hosted images for production applications.
132 #
133 # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
134 # precedence.
135 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700136 &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
137 # Note: As with all `bytes` fields, protobuffers use a pure binary
138 # representation, whereas JSON representations use base64.
139 #
140 # Currently, this field only works for BatchAnnotateImages requests. It does
141 # not work for AsyncBatchAnnotateImages requests.
Bu Sun Kim65020912020-05-20 12:08:20 -0700142 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700143 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700144 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
145 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
146 # of doubles representing degrees latitude and degrees longitude. Unless
147 # specified otherwise, this must conform to the
148 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
149 # standard&lt;/a&gt;. Values must be within normalized ranges.
150 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
151 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
152 },
153 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
154 # of doubles representing degrees latitude and degrees longitude. Unless
155 # specified otherwise, this must conform to the
156 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
157 # standard&lt;/a&gt;. Values must be within normalized ranges.
158 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
159 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
160 },
161 },
162 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
163 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
164 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700165 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
166 # yields the best results since it enables automatic language detection. For
167 # languages based on the Latin alphabet, setting `language_hints` is not
168 # needed. In rare cases, when the language of the text in the image is known,
169 # setting a hint will help get better results (although it will be a
170 # significant hindrance if the hint is wrong). Text detection returns an
171 # error if one or more of the specified languages is not one of the
172 # [supported languages](https://cloud.google.com/vision/docs/languages).
173 &quot;A String&quot;,
174 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700175 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700176 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
177 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
178 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
179 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
180 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
181 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
182 # migrate existing products to these categories as well.
183 &quot;A String&quot;,
184 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700185 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
186 # If it is not specified, system discretion will be applied.
187 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
188 { # A vertex represents a 2D point in the image.
189 # NOTE: the normalized vertex coordinates are relative to the original image
190 # and range from 0 to 1.
191 &quot;y&quot;: 3.14, # Y coordinate.
192 &quot;x&quot;: 3.14, # X coordinate.
193 },
194 ],
195 &quot;vertices&quot;: [ # The bounding polygon vertices.
196 { # A vertex represents a 2D point in the image.
197 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700198 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700199 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700200 },
201 ],
202 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700203 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
204 # on Product labels. We currently support an AND of OR of key-value
205 # expressions, where each expression within an OR must have the same key. An
206 # &#x27;=&#x27; should be used to connect the key and value.
207 #
208 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
209 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
210 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
211 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
212 #
213 # Format is:
214 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
215 },
216 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
217 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
218 # of the image. For example, if the desired aspect ratio is 4/3, the
219 # corresponding float value should be 1.33333. If not specified, the
220 # best possible crop is returned. The number of provided aspect ratios is
221 # limited to a maximum of 16; any aspect ratios provided after the 16th are
222 # ignored.
223 3.14,
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700224 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700225 },
226 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700227 &quot;features&quot;: [ # Requested features.
228 { # The type of Google Cloud Vision API detection to perform, and the maximum
229 # number of results to return for that type. Multiple `Feature` objects can
230 # be specified in the `features` list.
231 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
232 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
233 &quot;type&quot;: &quot;A String&quot;, # The feature type.
234 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
235 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
236 # &quot;builtin/latest&quot;.
237 },
238 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700239 },
240 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700241 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
242 #
243 # Format: `projects/{project-id}/locations/{location-id}`.
244 #
245 # If no parent is specified, a region will be chosen automatically.
246 #
247 # Supported location-ids:
248 # `us`: USA country only,
249 # `asia`: East asia areas, like Japan, Taiwan,
250 # `eu`: The European Union.
251 #
252 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim65020912020-05-20 12:08:20 -0700253 }
254
255 x__xgafv: string, V1 error format.
256 Allowed values
257 1 - v1 error format
258 2 - v2 error format
259
260Returns:
261 An object of the form:
262
263 { # Response to a batch image annotation request.
264 &quot;responses&quot;: [ # Individual responses to image annotation requests within the batch.
265 { # Response to an image annotation request.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700266 &quot;labelAnnotations&quot;: [ # If present, label detection has completed successfully.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700267 { # Set of detected entity features.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700268 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700269 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
270 # The accuracy of the entity detection in an image.
271 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
272 # this field represents the confidence that there is a tower in the query
273 # image. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700274 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
275 # for `LABEL_DETECTION` features.
276 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
277 { # A vertex represents a 2D point in the image.
278 # NOTE: the normalized vertex coordinates are relative to the original image
279 # and range from 0 to 1.
280 &quot;y&quot;: 3.14, # Y coordinate.
281 &quot;x&quot;: 3.14, # X coordinate.
282 },
283 ],
284 &quot;vertices&quot;: [ # The bounding polygon vertices.
285 { # A vertex represents a 2D point in the image.
286 # NOTE: the vertex coordinates are in the same scale as the original image.
287 &quot;x&quot;: 42, # X coordinate.
288 &quot;y&quot;: 42, # Y coordinate.
289 },
290 ],
291 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700292 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
293 # [Google Knowledge Graph Search
294 # API](https://developers.google.com/knowledge-graph/).
295 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
296 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700297 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
298 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
299 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
300 # detected distant towering building, even though the confidence that
301 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700302 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
303 # `LocationInfo` elements can be present because one location may
304 # indicate the location of the scene in the image, and another location
305 # may indicate the location of the place where the image was taken.
306 # Location information is usually present for landmarks.
307 { # Detected entity location information.
308 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
309 # of doubles representing degrees latitude and degrees longitude. Unless
310 # specified otherwise, this must conform to the
311 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
312 # standard&lt;/a&gt;. Values must be within normalized ranges.
313 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
314 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
315 },
316 },
317 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700318 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
319 # fields, such a score or string that qualifies the entity.
320 { # A `Property` consists of a user-supplied name/value pair.
321 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
322 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
323 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
324 },
325 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700326 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700327 },
328 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700329 &quot;safeSearchAnnotation&quot;: { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
330 # methods over safe-search verticals (for example, adult, spoof, medical,
331 # violence).
332 &quot;spoof&quot;: &quot;A String&quot;, # Spoof likelihood. The likelihood that an modification
333 # was made to the image&#x27;s canonical version to make it appear
334 # funny or offensive.
335 &quot;racy&quot;: &quot;A String&quot;, # Likelihood that the request image contains racy content. Racy content may
336 # include (but is not limited to) skimpy or sheer clothing, strategically
337 # covered nudity, lewd or provocative poses, or close-ups of sensitive
338 # body areas.
339 &quot;adult&quot;: &quot;A String&quot;, # Represents the adult content likelihood for the image. Adult content may
340 # contain elements such as nudity, pornographic images or cartoons, or
341 # sexual activities.
342 &quot;violence&quot;: &quot;A String&quot;, # Likelihood that this image contains violent content.
343 &quot;medical&quot;: &quot;A String&quot;, # Likelihood that this is a medical image.
344 },
345 &quot;webDetection&quot;: { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
346 &quot;partialMatchingImages&quot;: [ # Partial matching images from the Internet.
347 # Those images are similar enough to share some key-point features. For
348 # example an original image will likely have partial matching for its crops.
349 { # Metadata for online images.
350 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
351 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700352 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700353 ],
354 &quot;bestGuessLabels&quot;: [ # The service&#x27;s best guess as to the topic of the request image.
355 # Inferred from similar images on the open web.
356 { # Label to provide extra metadata for the web detection.
357 &quot;label&quot;: &quot;A String&quot;, # Label for extra metadata.
358 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code for `label`, such as &quot;en-US&quot; or &quot;sr-Latn&quot;.
359 # For more information, see
360 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
Bu Sun Kim65020912020-05-20 12:08:20 -0700361 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700362 ],
363 &quot;visuallySimilarImages&quot;: [ # The visually similar image results.
364 { # Metadata for online images.
365 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
366 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
367 },
368 ],
369 &quot;webEntities&quot;: [ # Deduced entities from similar images on the Internet.
370 { # Entity deduced from similar images on the Internet.
371 &quot;entityId&quot;: &quot;A String&quot;, # Opaque entity ID.
372 &quot;score&quot;: 3.14, # Overall relevancy score for the entity.
373 # Not normalized and not comparable across different image queries.
374 &quot;description&quot;: &quot;A String&quot;, # Canonical description of the entity, in English.
375 },
376 ],
377 &quot;fullMatchingImages&quot;: [ # Fully matching images from the Internet.
378 # Can include resized copies of the query image.
379 { # Metadata for online images.
380 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
381 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
382 },
383 ],
384 &quot;pagesWithMatchingImages&quot;: [ # Web pages containing the matching images from the Internet.
385 { # Metadata for web pages.
386 &quot;fullMatchingImages&quot;: [ # Fully matching images on the page.
387 # Can include resized copies of the query image.
388 { # Metadata for online images.
389 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
390 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
391 },
392 ],
393 &quot;pageTitle&quot;: &quot;A String&quot;, # Title for the web page, may contain HTML markups.
394 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the web page.
395 &quot;url&quot;: &quot;A String&quot;, # The result web page URL.
396 &quot;partialMatchingImages&quot;: [ # Partial matching images on the page.
397 # Those images are similar enough to share some key-point features. For
398 # example an original image will likely have partial matching for its
399 # crops.
400 { # Metadata for online images.
401 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
402 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
403 },
404 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700405 },
406 ],
407 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700408 &quot;productSearchResults&quot;: { # Results for a product search request. # If present, product search has completed successfully.
Bu Sun Kim65020912020-05-20 12:08:20 -0700409 &quot;productGroupedResults&quot;: [ # List of results grouped by products detected in the query image. Each entry
410 # corresponds to one bounding polygon in the query image, and contains the
411 # matching products specific to that region. There may be duplicate product
412 # matches in the union of all the per-product results.
413 { # Information about the products similar to a single product in a query
414 # image.
415 &quot;objectAnnotations&quot;: [ # List of generic predictions for the object in the bounding box.
416 { # Prediction for what the object in the bounding box is.
Bu Sun Kim65020912020-05-20 12:08:20 -0700417 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700418 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
Bu Sun Kim65020912020-05-20 12:08:20 -0700419 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
420 # information, see
421 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700422 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
Bu Sun Kim65020912020-05-20 12:08:20 -0700423 },
424 ],
425 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
426 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
427 { # A vertex represents a 2D point in the image.
428 # NOTE: the normalized vertex coordinates are relative to the original image
429 # and range from 0 to 1.
430 &quot;y&quot;: 3.14, # Y coordinate.
431 &quot;x&quot;: 3.14, # X coordinate.
432 },
433 ],
434 &quot;vertices&quot;: [ # The bounding polygon vertices.
435 { # A vertex represents a 2D point in the image.
436 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700437 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700438 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700439 },
440 ],
441 },
442 &quot;results&quot;: [ # List of results, one for each product match.
443 { # Information about a product.
444 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
445 # to the query.
446 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700447 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
448 # 4096 characters long.
Bu Sun Kim65020912020-05-20 12:08:20 -0700449 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
450 #
451 # Format is:
452 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
453 #
454 # This field is ignored when creating a product.
Bu Sun Kim65020912020-05-20 12:08:20 -0700455 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
456 # constraints can be specified based on the product_labels.
457 #
458 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
459 # strings with integer values can match a range-based restriction which is
460 # to be supported soon.
461 #
462 # Multiple values can be assigned to the same key. One product may have up to
463 # 500 product_labels.
464 #
465 # Notice that the total number of distinct product_labels over all products
466 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
467 # will refuse to work for that ProductSet.
468 { # A product label represented as a key-value pair.
Bu Sun Kim65020912020-05-20 12:08:20 -0700469 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
470 # exceed 128 bytes.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700471 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
472 # cannot exceed 128 bytes.
Bu Sun Kim65020912020-05-20 12:08:20 -0700473 },
474 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700475 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
476 # characters long.
477 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
478 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
479 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
480 # not be used for new products.
Bu Sun Kim65020912020-05-20 12:08:20 -0700481 },
482 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
483 # 1 (full confidence).
484 },
485 ],
486 },
487 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700488 &quot;indexTime&quot;: &quot;A String&quot;, # Timestamp of the index which provided these results. Products added to the
489 # product set and products removed from the product set after this time are
490 # not reflected in the current results.
Bu Sun Kim65020912020-05-20 12:08:20 -0700491 &quot;results&quot;: [ # List of results, one for each product match.
492 { # Information about a product.
493 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
494 # to the query.
495 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700496 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
497 # 4096 characters long.
Bu Sun Kim65020912020-05-20 12:08:20 -0700498 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
499 #
500 # Format is:
501 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
502 #
503 # This field is ignored when creating a product.
Bu Sun Kim65020912020-05-20 12:08:20 -0700504 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
505 # constraints can be specified based on the product_labels.
506 #
507 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
508 # strings with integer values can match a range-based restriction which is
509 # to be supported soon.
510 #
511 # Multiple values can be assigned to the same key. One product may have up to
512 # 500 product_labels.
513 #
514 # Notice that the total number of distinct product_labels over all products
515 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
516 # will refuse to work for that ProductSet.
517 { # A product label represented as a key-value pair.
Bu Sun Kim65020912020-05-20 12:08:20 -0700518 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
519 # exceed 128 bytes.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700520 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
521 # cannot exceed 128 bytes.
Bu Sun Kim65020912020-05-20 12:08:20 -0700522 },
523 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700524 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
525 # characters long.
526 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
527 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
528 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
529 # not be used for new products.
Bu Sun Kim65020912020-05-20 12:08:20 -0700530 },
531 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
532 # 1 (full confidence).
533 },
534 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700535 },
536 &quot;fullTextAnnotation&quot;: { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
537 # completed successfully.
538 # This annotation provides the structural hierarchy for the OCR detected
539 # text.
540 # The hierarchy of an OCR extracted text structure is like this:
541 # TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
542 # Each structural component, starting from Page, may further have their own
543 # properties. Properties describe detected languages, breaks etc.. Please refer
544 # to the TextAnnotation.TextProperty message definition below for more
545 # detail.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700546 &quot;text&quot;: &quot;A String&quot;, # UTF-8 text detected on the pages.
Bu Sun Kim65020912020-05-20 12:08:20 -0700547 &quot;pages&quot;: [ # List of pages detected by OCR.
548 { # Detected page from OCR.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700549 &quot;width&quot;: 42, # Page width. For PDFs the unit is points. For images (including
550 # TIFFs) the unit is pixels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700551 &quot;blocks&quot;: [ # List of blocks of text, images etc on this page.
552 { # Logical element on the page.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700553 &quot;paragraphs&quot;: [ # List of paragraphs in this block (if this blocks is of type text).
554 { # Structural unit of text representing a number of words in certain order.
555 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
556 # The vertices are in the order of top-left, top-right, bottom-right,
557 # bottom-left. When a rotation of the bounding box is detected the rotation
558 # is represented as around the top-left corner as defined when the text is
559 # read in the &#x27;natural&#x27; orientation.
560 # For example:
561 # * when the text is horizontal it might look like:
562 # 0----1
563 # | |
564 # 3----2
565 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
566 # 2----3
567 # | |
568 # 1----0
569 # and the vertex order will still be (0, 1, 2, 3).
570 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
571 { # A vertex represents a 2D point in the image.
572 # NOTE: the normalized vertex coordinates are relative to the original image
573 # and range from 0 to 1.
574 &quot;y&quot;: 3.14, # Y coordinate.
575 &quot;x&quot;: 3.14, # X coordinate.
576 },
577 ],
578 &quot;vertices&quot;: [ # The bounding polygon vertices.
579 { # A vertex represents a 2D point in the image.
580 # NOTE: the vertex coordinates are in the same scale as the original image.
581 &quot;x&quot;: 42, # X coordinate.
582 &quot;y&quot;: 42, # Y coordinate.
583 },
584 ],
585 },
586 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the paragraph.
587 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
588 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
589 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
590 },
591 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
592 { # Detected language for a structural component.
593 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
594 # information, see
595 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
596 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
597 },
598 ],
599 },
600 &quot;words&quot;: [ # List of all words in this paragraph.
601 { # A word representation.
602 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the word.
603 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
604 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
605 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
606 },
607 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
608 { # Detected language for a structural component.
609 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
610 # information, see
611 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
612 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
613 },
614 ],
615 },
616 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the word. Range [0, 1].
617 &quot;symbols&quot;: [ # List of symbols in the word.
618 # The order of the symbols follows the natural reading order.
619 { # A single symbol representation.
620 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
621 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
622 # The vertices are in the order of top-left, top-right, bottom-right,
623 # bottom-left. When a rotation of the bounding box is detected the rotation
624 # is represented as around the top-left corner as defined when the text is
625 # read in the &#x27;natural&#x27; orientation.
626 # For example:
627 # * when the text is horizontal it might look like:
628 # 0----1
629 # | |
630 # 3----2
631 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
632 # 2----3
633 # | |
634 # 1----0
635 # and the vertex order will still be (0, 1, 2, 3).
636 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
637 { # A vertex represents a 2D point in the image.
638 # NOTE: the normalized vertex coordinates are relative to the original image
639 # and range from 0 to 1.
640 &quot;y&quot;: 3.14, # Y coordinate.
641 &quot;x&quot;: 3.14, # X coordinate.
642 },
643 ],
644 &quot;vertices&quot;: [ # The bounding polygon vertices.
645 { # A vertex represents a 2D point in the image.
646 # NOTE: the vertex coordinates are in the same scale as the original image.
647 &quot;x&quot;: 42, # X coordinate.
648 &quot;y&quot;: 42, # Y coordinate.
649 },
650 ],
651 },
652 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the symbol.
653 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
654 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
655 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
656 },
657 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
658 { # Detected language for a structural component.
659 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
660 # information, see
661 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
662 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
663 },
664 ],
665 },
666 &quot;text&quot;: &quot;A String&quot;, # The actual UTF-8 representation of the symbol.
667 },
668 ],
669 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the word.
670 # The vertices are in the order of top-left, top-right, bottom-right,
671 # bottom-left. When a rotation of the bounding box is detected the rotation
672 # is represented as around the top-left corner as defined when the text is
673 # read in the &#x27;natural&#x27; orientation.
674 # For example:
675 # * when the text is horizontal it might look like:
676 # 0----1
677 # | |
678 # 3----2
679 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
680 # 2----3
681 # | |
682 # 1----0
683 # and the vertex order will still be (0, 1, 2, 3).
684 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
685 { # A vertex represents a 2D point in the image.
686 # NOTE: the normalized vertex coordinates are relative to the original image
687 # and range from 0 to 1.
688 &quot;y&quot;: 3.14, # Y coordinate.
689 &quot;x&quot;: 3.14, # X coordinate.
690 },
691 ],
692 &quot;vertices&quot;: [ # The bounding polygon vertices.
693 { # A vertex represents a 2D point in the image.
694 # NOTE: the vertex coordinates are in the same scale as the original image.
695 &quot;x&quot;: 42, # X coordinate.
696 &quot;y&quot;: 42, # Y coordinate.
697 },
698 ],
699 },
700 },
701 ],
702 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
703 },
704 ],
705 &quot;blockType&quot;: &quot;A String&quot;, # Detected block type (text, image etc) for this block.
Bu Sun Kim65020912020-05-20 12:08:20 -0700706 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the block.
Bu Sun Kim65020912020-05-20 12:08:20 -0700707 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
Bu Sun Kim65020912020-05-20 12:08:20 -0700708 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700709 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
Bu Sun Kim65020912020-05-20 12:08:20 -0700710 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700711 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
712 { # Detected language for a structural component.
713 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
714 # information, see
715 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
716 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
717 },
718 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700719 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700720 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the block. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700721 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the block.
722 # The vertices are in the order of top-left, top-right, bottom-right,
723 # bottom-left. When a rotation of the bounding box is detected the rotation
724 # is represented as around the top-left corner as defined when the text is
725 # read in the &#x27;natural&#x27; orientation.
726 # For example:
727 #
728 # * when the text is horizontal it might look like:
729 #
730 # 0----1
731 # | |
732 # 3----2
733 #
734 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
735 #
736 # 2----3
737 # | |
738 # 1----0
739 #
740 # and the vertex order will still be (0, 1, 2, 3).
741 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
742 { # A vertex represents a 2D point in the image.
743 # NOTE: the normalized vertex coordinates are relative to the original image
744 # and range from 0 to 1.
745 &quot;y&quot;: 3.14, # Y coordinate.
746 &quot;x&quot;: 3.14, # X coordinate.
747 },
748 ],
749 &quot;vertices&quot;: [ # The bounding polygon vertices.
750 { # A vertex represents a 2D point in the image.
751 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700752 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700753 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700754 },
755 ],
756 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700757 },
758 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700759 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the page. Range [0, 1].
760 &quot;height&quot;: 42, # Page height. For PDFs the unit is points. For images (including
761 # TIFFs) the unit is pixels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700762 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected on the page.
Bu Sun Kim65020912020-05-20 12:08:20 -0700763 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
Bu Sun Kim65020912020-05-20 12:08:20 -0700764 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700765 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
Bu Sun Kim65020912020-05-20 12:08:20 -0700766 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700767 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
768 { # Detected language for a structural component.
769 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
770 # information, see
771 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
772 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
773 },
774 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700775 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700776 },
777 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700778 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700779 &quot;localizedObjectAnnotations&quot;: [ # If present, localized object detection has completed successfully.
780 # This will be sorted descending by confidence score.
781 { # Set of detected objects with bounding boxes.
782 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
783 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
784 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
785 # information, see
786 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
787 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
Bu Sun Kim65020912020-05-20 12:08:20 -0700788 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
789 { # A vertex represents a 2D point in the image.
790 # NOTE: the normalized vertex coordinates are relative to the original image
791 # and range from 0 to 1.
792 &quot;y&quot;: 3.14, # Y coordinate.
793 &quot;x&quot;: 3.14, # X coordinate.
794 },
795 ],
796 &quot;vertices&quot;: [ # The bounding polygon vertices.
797 { # A vertex represents a 2D point in the image.
798 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700799 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700800 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700801 },
802 ],
803 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700804 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
Bu Sun Kim65020912020-05-20 12:08:20 -0700805 },
806 ],
807 &quot;imagePropertiesAnnotation&quot;: { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
808 &quot;dominantColors&quot;: { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
809 &quot;colors&quot;: [ # RGB color values with their score and pixel fraction.
810 { # Color information consists of RGB channels, score, and the fraction of
811 # the image that the color occupies in the image.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700812 &quot;score&quot;: 3.14, # Image-specific score for this color. Value in range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700813 &quot;color&quot;: { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
814 # for simplicity of conversion to/from color representations in various
815 # languages over compactness; for example, the fields of this representation
816 # can be trivially provided to the constructor of &quot;java.awt.Color&quot; in Java; it
817 # can also be trivially provided to UIColor&#x27;s &quot;+colorWithRed:green:blue:alpha&quot;
818 # method in iOS; and, with just a little work, it can be easily formatted into
819 # a CSS &quot;rgba()&quot; string in JavaScript, as well.
820 #
821 # Note: this proto does not carry information about the absolute color space
822 # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
823 # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
824 # space.
825 #
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700826 # Note: when color equality needs to be decided, implementations, unless
827 # documented otherwise, will treat two colors to be equal if all their red,
828 # green, blue and alpha values each differ by at most 1e-5.
829 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700830 # Example (Java):
831 #
832 # import com.google.type.Color;
833 #
834 # // ...
835 # public static java.awt.Color fromProto(Color protocolor) {
836 # float alpha = protocolor.hasAlpha()
837 # ? protocolor.getAlpha().getValue()
838 # : 1.0;
839 #
840 # return new java.awt.Color(
841 # protocolor.getRed(),
842 # protocolor.getGreen(),
843 # protocolor.getBlue(),
844 # alpha);
845 # }
846 #
847 # public static Color toProto(java.awt.Color color) {
848 # float red = (float) color.getRed();
849 # float green = (float) color.getGreen();
850 # float blue = (float) color.getBlue();
851 # float denominator = 255.0;
852 # Color.Builder resultBuilder =
853 # Color
854 # .newBuilder()
855 # .setRed(red / denominator)
856 # .setGreen(green / denominator)
857 # .setBlue(blue / denominator);
858 # int alpha = color.getAlpha();
859 # if (alpha != 255) {
860 # result.setAlpha(
861 # FloatValue
862 # .newBuilder()
863 # .setValue(((float) alpha) / denominator)
864 # .build());
865 # }
866 # return resultBuilder.build();
867 # }
868 # // ...
869 #
870 # Example (iOS / Obj-C):
871 #
872 # // ...
873 # static UIColor* fromProto(Color* protocolor) {
874 # float red = [protocolor red];
875 # float green = [protocolor green];
876 # float blue = [protocolor blue];
877 # FloatValue* alpha_wrapper = [protocolor alpha];
878 # float alpha = 1.0;
879 # if (alpha_wrapper != nil) {
880 # alpha = [alpha_wrapper value];
881 # }
882 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
883 # }
884 #
885 # static Color* toProto(UIColor* color) {
886 # CGFloat red, green, blue, alpha;
887 # if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
888 # return nil;
889 # }
890 # Color* result = [[Color alloc] init];
891 # [result setRed:red];
892 # [result setGreen:green];
893 # [result setBlue:blue];
894 # if (alpha &lt;= 0.9999) {
895 # [result setAlpha:floatWrapperWithValue(alpha)];
896 # }
897 # [result autorelease];
898 # return result;
899 # }
900 # // ...
901 #
902 # Example (JavaScript):
903 #
904 # // ...
905 #
906 # var protoToCssColor = function(rgb_color) {
907 # var redFrac = rgb_color.red || 0.0;
908 # var greenFrac = rgb_color.green || 0.0;
909 # var blueFrac = rgb_color.blue || 0.0;
910 # var red = Math.floor(redFrac * 255);
911 # var green = Math.floor(greenFrac * 255);
912 # var blue = Math.floor(blueFrac * 255);
913 #
914 # if (!(&#x27;alpha&#x27; in rgb_color)) {
915 # return rgbToCssColor_(red, green, blue);
916 # }
917 #
918 # var alphaFrac = rgb_color.alpha.value || 0.0;
919 # var rgbParams = [red, green, blue].join(&#x27;,&#x27;);
920 # return [&#x27;rgba(&#x27;, rgbParams, &#x27;,&#x27;, alphaFrac, &#x27;)&#x27;].join(&#x27;&#x27;);
921 # };
922 #
923 # var rgbToCssColor_ = function(red, green, blue) {
924 # var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
925 # var hexString = rgbNumber.toString(16);
926 # var missingZeros = 6 - hexString.length;
927 # var resultBuilder = [&#x27;#&#x27;];
928 # for (var i = 0; i &lt; missingZeros; i++) {
929 # resultBuilder.push(&#x27;0&#x27;);
930 # }
931 # resultBuilder.push(hexString);
932 # return resultBuilder.join(&#x27;&#x27;);
933 # };
934 #
935 # // ...
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700936 &quot;blue&quot;: 3.14, # The amount of blue in the color as a value in the interval [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700937 &quot;red&quot;: 3.14, # The amount of red in the color as a value in the interval [0, 1].
938 &quot;green&quot;: 3.14, # The amount of green in the color as a value in the interval [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700939 &quot;alpha&quot;: 3.14, # The fraction of this color that should be applied to the pixel. That is,
940 # the final pixel color is defined by the equation:
941 #
942 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
943 #
944 # This means that a value of 1.0 corresponds to a solid color, whereas
945 # a value of 0.0 corresponds to a completely transparent color. This
946 # uses a wrapper message rather than a simple float scalar so that it is
947 # possible to distinguish between a default value and the value being unset.
948 # If omitted, this color object is to be rendered as a solid color
949 # (as if the alpha value had been explicitly given with a value of 1.0).
950 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700951 &quot;pixelFraction&quot;: 3.14, # The fraction of pixels the color occupies in the image.
952 # Value in range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700953 },
954 ],
955 },
956 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700957 &quot;faceAnnotations&quot;: [ # If present, face detection has completed successfully.
958 { # A face annotation object contains the results of face detection.
959 &quot;fdBoundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
960 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
961 # is used to eliminate the face from any image analysis that detects the
962 # &quot;amount of skin&quot; visible in an image. It is not based on the
963 # landmarker results, only on the initial face detection, hence
964 # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
965 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
966 { # A vertex represents a 2D point in the image.
967 # NOTE: the normalized vertex coordinates are relative to the original image
968 # and range from 0 to 1.
969 &quot;y&quot;: 3.14, # Y coordinate.
970 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700971 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700972 ],
973 &quot;vertices&quot;: [ # The bounding polygon vertices.
974 { # A vertex represents a 2D point in the image.
975 # NOTE: the vertex coordinates are in the same scale as the original image.
976 &quot;x&quot;: 42, # X coordinate.
977 &quot;y&quot;: 42, # Y coordinate.
978 },
979 ],
980 },
981 &quot;sorrowLikelihood&quot;: &quot;A String&quot;, # Sorrow likelihood.
982 &quot;rollAngle&quot;: 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
983 # of the face relative to the image vertical about the axis perpendicular to
984 # the face. Range [-180,180].
985 &quot;angerLikelihood&quot;: &quot;A String&quot;, # Anger likelihood.
986 &quot;surpriseLikelihood&quot;: &quot;A String&quot;, # Surprise likelihood.
987 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
988 # are in the original image&#x27;s scale.
989 # The bounding box is computed to &quot;frame&quot; the face in accordance with human
990 # expectations. It is based on the landmarker results.
991 # Note that one or more x and/or y coordinates may not be generated in the
992 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
993 # appears in the image to be annotated.
994 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
995 { # A vertex represents a 2D point in the image.
996 # NOTE: the normalized vertex coordinates are relative to the original image
997 # and range from 0 to 1.
998 &quot;y&quot;: 3.14, # Y coordinate.
999 &quot;x&quot;: 3.14, # X coordinate.
1000 },
1001 ],
1002 &quot;vertices&quot;: [ # The bounding polygon vertices.
1003 { # A vertex represents a 2D point in the image.
1004 # NOTE: the vertex coordinates are in the same scale as the original image.
1005 &quot;x&quot;: 42, # X coordinate.
1006 &quot;y&quot;: 42, # Y coordinate.
1007 },
1008 ],
1009 },
1010 &quot;detectionConfidence&quot;: 3.14, # Detection confidence. Range [0, 1].
1011 &quot;headwearLikelihood&quot;: &quot;A String&quot;, # Headwear likelihood.
1012 &quot;panAngle&quot;: 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
1013 # pointing relative to the vertical plane perpendicular to the image. Range
1014 # [-180,180].
1015 &quot;landmarks&quot;: [ # Detected face landmarks.
1016 { # A face-specific landmark (for example, a face feature).
1017 &quot;position&quot;: { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
1018 # A valid Position must have both x and y coordinates.
1019 # The position coordinates are in the same scale as the original image.
1020 &quot;z&quot;: 3.14, # Z coordinate (or depth).
1021 &quot;y&quot;: 3.14, # Y coordinate.
1022 &quot;x&quot;: 3.14, # X coordinate.
1023 },
1024 &quot;type&quot;: &quot;A String&quot;, # Face landmark type.
Bu Sun Kim65020912020-05-20 12:08:20 -07001025 },
1026 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001027 &quot;landmarkingConfidence&quot;: 3.14, # Face landmarking confidence. Range [0, 1].
1028 &quot;joyLikelihood&quot;: &quot;A String&quot;, # Joy likelihood.
1029 &quot;tiltAngle&quot;: 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
1030 # pointing relative to the image&#x27;s horizontal plane. Range [-180,180].
1031 &quot;underExposedLikelihood&quot;: &quot;A String&quot;, # Under-exposed likelihood.
1032 &quot;blurredLikelihood&quot;: &quot;A String&quot;, # Blurred likelihood.
1033 },
1034 ],
1035 &quot;logoAnnotations&quot;: [ # If present, logo detection has completed successfully.
1036 { # Set of detected entity features.
1037 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
Bu Sun Kim65020912020-05-20 12:08:20 -07001038 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1039 # The accuracy of the entity detection in an image.
1040 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1041 # this field represents the confidence that there is a tower in the query
1042 # image. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -07001043 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1044 # for `LABEL_DETECTION` features.
1045 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1046 { # A vertex represents a 2D point in the image.
1047 # NOTE: the normalized vertex coordinates are relative to the original image
1048 # and range from 0 to 1.
1049 &quot;y&quot;: 3.14, # Y coordinate.
1050 &quot;x&quot;: 3.14, # X coordinate.
1051 },
1052 ],
1053 &quot;vertices&quot;: [ # The bounding polygon vertices.
1054 { # A vertex represents a 2D point in the image.
1055 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -07001056 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001057 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -07001058 },
1059 ],
1060 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001061 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1062 # [Google Knowledge Graph Search
1063 # API](https://developers.google.com/knowledge-graph/).
1064 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1065 # `description` is expressed.
Bu Sun Kim65020912020-05-20 12:08:20 -07001066 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1067 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1068 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1069 # detected distant towering building, even though the confidence that
1070 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001071 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1072 # `LocationInfo` elements can be present because one location may
1073 # indicate the location of the scene in the image, and another location
1074 # may indicate the location of the place where the image was taken.
1075 # Location information is usually present for landmarks.
1076 { # Detected entity location information.
1077 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1078 # of doubles representing degrees latitude and degrees longitude. Unless
1079 # specified otherwise, this must conform to the
1080 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1081 # standard&lt;/a&gt;. Values must be within normalized ranges.
1082 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1083 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1084 },
1085 },
1086 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001087 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1088 # fields, such a score or string that qualifies the entity.
1089 { # A `Property` consists of a user-supplied name/value pair.
1090 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1091 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1092 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1093 },
1094 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001095 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -07001096 },
1097 ],
1098 &quot;context&quot;: { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
1099 # comes from.
1100 # information about the source of that image.
Bu Sun Kim65020912020-05-20 12:08:20 -07001101 &quot;pageNumber&quot;: 42, # If the file was a PDF or TIFF, this field gives the page number within
1102 # the file used to produce the image.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001103 &quot;uri&quot;: &quot;A String&quot;, # The URI of the file used to produce the image.
Bu Sun Kim65020912020-05-20 12:08:20 -07001104 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001105 &quot;cropHintsAnnotation&quot;: { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
1106 &quot;cropHints&quot;: [ # Crop hint results.
1107 { # Single crop hint that is used to generate a new crop when serving an image.
1108 &quot;importanceFraction&quot;: 3.14, # Fraction of importance of this salient region with respect to the original
1109 # image.
1110 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
1111 # box are in the original image&#x27;s scale.
1112 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1113 { # A vertex represents a 2D point in the image.
1114 # NOTE: the normalized vertex coordinates are relative to the original image
1115 # and range from 0 to 1.
1116 &quot;y&quot;: 3.14, # Y coordinate.
1117 &quot;x&quot;: 3.14, # X coordinate.
1118 },
1119 ],
1120 &quot;vertices&quot;: [ # The bounding polygon vertices.
1121 { # A vertex represents a 2D point in the image.
1122 # NOTE: the vertex coordinates are in the same scale as the original image.
1123 &quot;x&quot;: 42, # X coordinate.
1124 &quot;y&quot;: 42, # Y coordinate.
1125 },
1126 ],
1127 },
1128 &quot;confidence&quot;: 3.14, # Confidence of this being a salient region. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001129 },
1130 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001131 },
1132 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
1133 # Note that filled-in image annotations are guaranteed to be
1134 # correct, even when `error` is set.
1135 # different programming environments, including REST APIs and RPC APIs. It is
1136 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1137 # three pieces of data: error code, error message, and error details.
1138 #
1139 # You can find out more about this error model and how to work with it in the
1140 # [API Design Guide](https://cloud.google.com/apis/design/errors).
1141 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
1142 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1143 # message types for APIs to use.
1144 {
1145 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001146 },
1147 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001148 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1149 # user-facing error message should be localized and sent in the
1150 # google.rpc.Status.details field, or localized by the client.
1151 },
1152 &quot;landmarkAnnotations&quot;: [ # If present, landmark detection has completed successfully.
1153 { # Set of detected entity features.
1154 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1155 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1156 # The accuracy of the entity detection in an image.
1157 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1158 # this field represents the confidence that there is a tower in the query
1159 # image. Range [0, 1].
1160 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1161 # for `LABEL_DETECTION` features.
1162 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1163 { # A vertex represents a 2D point in the image.
1164 # NOTE: the normalized vertex coordinates are relative to the original image
1165 # and range from 0 to 1.
1166 &quot;y&quot;: 3.14, # Y coordinate.
1167 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -07001168 },
1169 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001170 &quot;vertices&quot;: [ # The bounding polygon vertices.
1171 { # A vertex represents a 2D point in the image.
1172 # NOTE: the vertex coordinates are in the same scale as the original image.
1173 &quot;x&quot;: 42, # X coordinate.
1174 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -07001175 },
1176 ],
1177 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001178 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1179 # [Google Knowledge Graph Search
1180 # API](https://developers.google.com/knowledge-graph/).
1181 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1182 # `description` is expressed.
1183 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1184 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1185 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1186 # detected distant towering building, even though the confidence that
1187 # there is a tower in each image may be the same. Range [0, 1].
1188 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1189 # `LocationInfo` elements can be present because one location may
1190 # indicate the location of the scene in the image, and another location
1191 # may indicate the location of the place where the image was taken.
1192 # Location information is usually present for landmarks.
1193 { # Detected entity location information.
1194 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1195 # of doubles representing degrees latitude and degrees longitude. Unless
1196 # specified otherwise, this must conform to the
1197 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1198 # standard&lt;/a&gt;. Values must be within normalized ranges.
1199 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1200 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1201 },
1202 },
1203 ],
1204 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1205 # fields, such a score or string that qualifies the entity.
1206 { # A `Property` consists of a user-supplied name/value pair.
1207 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1208 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1209 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1210 },
1211 ],
1212 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1213 },
1214 ],
1215 &quot;textAnnotations&quot;: [ # If present, text (OCR) detection has completed successfully.
1216 { # Set of detected entity features.
1217 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1218 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1219 # The accuracy of the entity detection in an image.
1220 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1221 # this field represents the confidence that there is a tower in the query
1222 # image. Range [0, 1].
1223 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1224 # for `LABEL_DETECTION` features.
1225 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1226 { # A vertex represents a 2D point in the image.
1227 # NOTE: the normalized vertex coordinates are relative to the original image
1228 # and range from 0 to 1.
1229 &quot;y&quot;: 3.14, # Y coordinate.
1230 &quot;x&quot;: 3.14, # X coordinate.
1231 },
1232 ],
1233 &quot;vertices&quot;: [ # The bounding polygon vertices.
1234 { # A vertex represents a 2D point in the image.
1235 # NOTE: the vertex coordinates are in the same scale as the original image.
1236 &quot;x&quot;: 42, # X coordinate.
1237 &quot;y&quot;: 42, # Y coordinate.
1238 },
1239 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001240 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001241 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1242 # [Google Knowledge Graph Search
1243 # API](https://developers.google.com/knowledge-graph/).
1244 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1245 # `description` is expressed.
1246 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1247 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1248 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1249 # detected distant towering building, even though the confidence that
1250 # there is a tower in each image may be the same. Range [0, 1].
1251 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1252 # `LocationInfo` elements can be present because one location may
1253 # indicate the location of the scene in the image, and another location
1254 # may indicate the location of the place where the image was taken.
1255 # Location information is usually present for landmarks.
1256 { # Detected entity location information.
1257 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1258 # of doubles representing degrees latitude and degrees longitude. Unless
1259 # specified otherwise, this must conform to the
1260 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1261 # standard&lt;/a&gt;. Values must be within normalized ranges.
1262 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1263 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1264 },
1265 },
1266 ],
1267 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1268 # fields, such a score or string that qualifies the entity.
1269 { # A `Property` consists of a user-supplied name/value pair.
1270 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1271 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1272 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1273 },
1274 ],
1275 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1276 },
1277 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001278 },
1279 ],
1280 }</pre>
1281</div>
1282
1283<div class="method">
1284 <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(parent, body=None, x__xgafv=None)</code>
1285 <pre>Run asynchronous image detection and annotation for a list of images.
1286
1287Progress and results can be retrieved through the
1288`google.longrunning.Operations` interface.
1289`Operation.metadata` contains `OperationMetadata` (metadata).
1290`Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).
1291
1292This service will write image annotation outputs to json files in customer
1293GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
1294
1295Args:
1296 parent: string, Optional. Target project and location to make a call.
1297
1298Format: `projects/{project-id}/locations/{location-id}`.
1299
1300If no parent is specified, a region will be chosen automatically.
1301
1302Supported location-ids:
1303 `us`: USA country only,
1304 `asia`: East asia areas, like Japan, Taiwan,
1305 `eu`: The European Union.
1306
1307Example: `projects/project-A/locations/eu`. (required)
1308 body: object, The request body.
1309 The object takes the form of:
1310
1311{ # Request for async image annotation for a list of images.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001312 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
1313 #
1314 # Format: `projects/{project-id}/locations/{location-id}`.
1315 #
1316 # If no parent is specified, a region will be chosen automatically.
1317 #
1318 # Supported location-ids:
1319 # `us`: USA country only,
1320 # `asia`: East asia areas, like Japan, Taiwan,
1321 # `eu`: The European Union.
1322 #
1323 # Example: `projects/project-A/locations/eu`.
1324 &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
1325 &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
1326 # Google Cloud Storage.
1327 # The valid range is [1, 100]. If not specified, the default value is 20.
1328 #
1329 # For example, for one pdf file with 100 pages, 100 response protos will
1330 # be generated. If `batch_size` = 20, then 5 json files each
1331 # containing 20 response protos will be written under the prefix
1332 # `gcs_destination`.`uri`.
1333 #
1334 # Currently, batch_size only applies to GcsDestination, with potential future
1335 # support for other output configurations.
1336 &quot;gcsDestination&quot;: { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1337 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI prefix where the results will be stored. Results
1338 # will be in JSON format and preceded by its corresponding input URI prefix.
1339 # This field can either represent a gcs file prefix or gcs directory. In
1340 # either case, the uri should be unique because in order to get all of the
1341 # output files, you will need to do a wildcard gcs search on the uri prefix
1342 # you provide.
1343 #
1344 # Examples:
1345 #
1346 # * File Prefix: gs://bucket-name/here/filenameprefix The output files
1347 # will be created in gs://bucket-name/here/ and the names of the
1348 # output files will begin with &quot;filenameprefix&quot;.
1349 #
1350 # * Directory Prefix: gs://bucket-name/some/location/ The output files
1351 # will be created in gs://bucket-name/some/location/ and the names of the
1352 # output files could be anything because there was no filename prefix
1353 # specified.
1354 #
1355 # If multiple outputs, each response is still AnnotateFileResponse, each of
1356 # which contains some subset of the full list of AnnotateImageResponse.
1357 # Multiple outputs can happen if, for example, the output JSON is too large
1358 # and overflows into multiple sharded files.
1359 },
1360 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001361 &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
1362 { # Request for performing Google Cloud Vision API tasks over a user-provided
1363 # image, with user-requested features, and with context information.
1364 &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
1365 &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
1366 # URL. If both `content` and `source` are provided for an image, `content`
1367 # takes precedence and is used to perform the image annotation request.
1368 &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
1369 #
1370 # The Google Cloud Storage URI of the form
1371 # `gs://bucket_name/object_name`. Object versioning is not supported. See
1372 # [Google Cloud Storage Request
1373 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
1374 &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
1375 #
1376 # 1. A Google Cloud Storage URI of the form
1377 # `gs://bucket_name/object_name`. Object versioning is not supported. See
1378 # [Google Cloud Storage Request
1379 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more
1380 # info.
1381 #
1382 # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
1383 # HTTP/HTTPS URLs, Google cannot guarantee that the request will be
1384 # completed. Your request may fail if the specified host denies the
1385 # request (e.g. due to request throttling or DOS prevention), or if Google
1386 # throttles requests to the site for abuse prevention. You should not
1387 # depend on externally-hosted images for production applications.
1388 #
1389 # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
1390 # precedence.
1391 },
1392 &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
1393 # Note: As with all `bytes` fields, protobuffers use a pure binary
1394 # representation, whereas JSON representations use base64.
1395 #
1396 # Currently, this field only works for BatchAnnotateImages requests. It does
1397 # not work for AsyncBatchAnnotateImages requests.
1398 },
1399 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
1400 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1401 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1402 # of doubles representing degrees latitude and degrees longitude. Unless
1403 # specified otherwise, this must conform to the
1404 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1405 # standard&lt;/a&gt;. Values must be within normalized ranges.
1406 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1407 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1408 },
1409 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1410 # of doubles representing degrees latitude and degrees longitude. Unless
1411 # specified otherwise, this must conform to the
1412 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1413 # standard&lt;/a&gt;. Values must be within normalized ranges.
1414 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1415 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1416 },
1417 },
1418 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
1419 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
1420 },
1421 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1422 # yields the best results since it enables automatic language detection. For
1423 # languages based on the Latin alphabet, setting `language_hints` is not
1424 # needed. In rare cases, when the language of the text in the image is known,
1425 # setting a hint will help get better results (although it will be a
1426 # significant hindrance if the hint is wrong). Text detection returns an
1427 # error if one or more of the specified languages is not one of the
1428 # [supported languages](https://cloud.google.com/vision/docs/languages).
1429 &quot;A String&quot;,
1430 ],
1431 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
1432 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
1433 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
1434 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
1435 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
1436 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
1437 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
1438 # migrate existing products to these categories as well.
1439 &quot;A String&quot;,
1440 ],
1441 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
1442 # If it is not specified, system discretion will be applied.
1443 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1444 { # A vertex represents a 2D point in the image.
1445 # NOTE: the normalized vertex coordinates are relative to the original image
1446 # and range from 0 to 1.
1447 &quot;y&quot;: 3.14, # Y coordinate.
1448 &quot;x&quot;: 3.14, # X coordinate.
1449 },
1450 ],
1451 &quot;vertices&quot;: [ # The bounding polygon vertices.
1452 { # A vertex represents a 2D point in the image.
1453 # NOTE: the vertex coordinates are in the same scale as the original image.
1454 &quot;x&quot;: 42, # X coordinate.
1455 &quot;y&quot;: 42, # Y coordinate.
1456 },
1457 ],
1458 },
1459 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
1460 # on Product labels. We currently support an AND of OR of key-value
1461 # expressions, where each expression within an OR must have the same key. An
1462 # &#x27;=&#x27; should be used to connect the key and value.
1463 #
1464 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
1465 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
1466 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
1467 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
1468 #
1469 # Format is:
1470 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1471 },
1472 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1473 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
1474 # of the image. For example, if the desired aspect ratio is 4/3, the
1475 # corresponding float value should be 1.33333. If not specified, the
1476 # best possible crop is returned. The number of provided aspect ratios is
1477 # limited to a maximum of 16; any aspect ratios provided after the 16th are
1478 # ignored.
1479 3.14,
1480 ],
1481 },
1482 },
1483 &quot;features&quot;: [ # Requested features.
1484 { # The type of Google Cloud Vision API detection to perform, and the maximum
1485 # number of results to return for that type. Multiple `Feature` objects can
1486 # be specified in the `features` list.
1487 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
1488 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1489 &quot;type&quot;: &quot;A String&quot;, # The feature type.
1490 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
1491 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
1492 # &quot;builtin/latest&quot;.
1493 },
1494 ],
1495 },
1496 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001497 }
1498
1499 x__xgafv: string, V1 error format.
1500 Allowed values
1501 1 - v1 error format
1502 2 - v2 error format
1503
1504Returns:
1505 An object of the form:
1506
1507 { # This resource represents a long-running operation that is the result of a
1508 # network API call.
1509 &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
1510 # If `true`, the operation is completed, and either `error` or `response` is
1511 # available.
Bu Sun Kim65020912020-05-20 12:08:20 -07001512 &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
1513 # originally returns it. If you use the default HTTP mapping, the
1514 # `name` should be a resource name ending with `operations/{unique_id}`.
1515 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
1516 # different programming environments, including REST APIs and RPC APIs. It is
1517 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1518 # three pieces of data: error code, error message, and error details.
1519 #
1520 # You can find out more about this error model and how to work with it in the
1521 # [API Design Guide](https://cloud.google.com/apis/design/errors).
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001522 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
Bu Sun Kim65020912020-05-20 12:08:20 -07001523 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1524 # message types for APIs to use.
1525 {
1526 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1527 },
1528 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001529 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1530 # user-facing error message should be localized and sent in the
1531 # google.rpc.Status.details field, or localized by the client.
1532 },
1533 &quot;response&quot;: { # The normal response of the operation in case of success. If the original
1534 # method returns no data on success, such as `Delete`, the response is
1535 # `google.protobuf.Empty`. If the original method is standard
1536 # `Get`/`Create`/`Update`, the response should be the resource. For other
1537 # methods, the response should have the type `XxxResponse`, where `Xxx`
1538 # is the original method name. For example, if the original method name
1539 # is `TakeSnapshot()`, the inferred response type is
1540 # `TakeSnapshotResponse`.
1541 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim65020912020-05-20 12:08:20 -07001542 },
1543 &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically
1544 # contains progress information and common metadata such as create time.
1545 # Some services might not provide such metadata. Any method that returns a
1546 # long-running operation should document the metadata type, if any.
1547 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1548 },
1549 }</pre>
1550</div>
1551
1552</body></html>