blob: b24ec823ee6dd25496a3e810380afb314b7998a0 [file] [log] [blame]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1p1beta1.html">Cloud Vision API</a> . <a href="vision_v1p1beta1.images.html">images</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070078 <code><a href="#annotate">annotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070079<p class="firstline">Run image detection and annotation for a batch of images.</p>
80<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070081 <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070082<p class="firstline">Run asynchronous image detection and annotation for a list of images.</p>
83<h3>Method Details</h3>
84<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -070085 <code class="details" id="annotate">annotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070086 <pre>Run image detection and annotation for a batch of images.
87
88Args:
Dan O'Mearadd494642020-05-01 07:42:23 -070089 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070090 The object takes the form of:
91
92{ # Multiple image annotation requests are batched into a single service call.
Bu Sun Kim65020912020-05-20 12:08:20 -070093 &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070094 { # Request for performing Google Cloud Vision API tasks over a user-provided
95 # image, with user-requested features, and with context information.
Bu Sun Kimd059ad82020-07-22 17:02:09 -070096 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
97 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
98 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
99 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
100 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
101 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
102 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
103 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
104 # migrate existing products to these categories as well.
105 &quot;A String&quot;,
106 ],
107 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
108 # If it is not specified, system discretion will be applied.
109 &quot;vertices&quot;: [ # The bounding polygon vertices.
110 { # A vertex represents a 2D point in the image.
111 # NOTE: the vertex coordinates are in the same scale as the original image.
112 &quot;y&quot;: 42, # Y coordinate.
113 &quot;x&quot;: 42, # X coordinate.
114 },
115 ],
116 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
117 { # A vertex represents a 2D point in the image.
118 # NOTE: the normalized vertex coordinates are relative to the original image
119 # and range from 0 to 1.
120 &quot;x&quot;: 3.14, # X coordinate.
121 &quot;y&quot;: 3.14, # Y coordinate.
122 },
123 ],
124 },
125 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
126 # on Product labels. We currently support an AND of OR of key-value
127 # expressions, where each expression within an OR must have the same key. An
128 # &#x27;=&#x27; should be used to connect the key and value.
129 #
130 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
131 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
132 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
133 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
134 #
135 # Format is:
136 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
137 },
138 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
139 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
140 # of the image. For example, if the desired aspect ratio is 4/3, the
141 # corresponding float value should be 1.33333. If not specified, the
142 # best possible crop is returned. The number of provided aspect ratios is
143 # limited to a maximum of 16; any aspect ratios provided after the 16th are
144 # ignored.
145 3.14,
146 ],
147 },
148 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
149 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
150 },
151 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
152 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
153 # of doubles representing degrees latitude and degrees longitude. Unless
154 # specified otherwise, this must conform to the
155 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
156 # standard&lt;/a&gt;. Values must be within normalized ranges.
157 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
158 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
159 },
160 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
161 # of doubles representing degrees latitude and degrees longitude. Unless
162 # specified otherwise, this must conform to the
163 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
164 # standard&lt;/a&gt;. Values must be within normalized ranges.
165 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
166 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
167 },
168 },
169 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
170 # yields the best results since it enables automatic language detection. For
171 # languages based on the Latin alphabet, setting `language_hints` is not
172 # needed. In rare cases, when the language of the text in the image is known,
173 # setting a hint will help get better results (although it will be a
174 # significant hindrance if the hint is wrong). Text detection returns an
175 # error if one or more of the specified languages is not one of the
176 # [supported languages](https://cloud.google.com/vision/docs/languages).
177 &quot;A String&quot;,
178 ],
179 },
180 &quot;features&quot;: [ # Requested features.
181 { # The type of Google Cloud Vision API detection to perform, and the maximum
182 # number of results to return for that type. Multiple `Feature` objects can
183 # be specified in the `features` list.
184 &quot;type&quot;: &quot;A String&quot;, # The feature type.
185 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
186 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
187 # &quot;builtin/latest&quot;.
188 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
189 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
190 },
191 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700192 &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
193 &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700194 # Note: As with all `bytes` fields, protobuffers use a pure binary
195 # representation, whereas JSON representations use base64.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700196 #
197 # Currently, this field only works for BatchAnnotateImages requests. It does
198 # not work for AsyncBatchAnnotateImages requests.
Bu Sun Kim65020912020-05-20 12:08:20 -0700199 &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700200 # URL. If both `content` and `source` are provided for an image, `content`
201 # takes precedence and is used to perform the image annotation request.
Bu Sun Kim65020912020-05-20 12:08:20 -0700202 &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700203 #
204 # 1. A Google Cloud Storage URI of the form
205 # `gs://bucket_name/object_name`. Object versioning is not supported. See
206 # [Google Cloud Storage Request
207 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more
208 # info.
209 #
210 # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
211 # HTTP/HTTPS URLs, Google cannot guarantee that the request will be
212 # completed. Your request may fail if the specified host denies the
213 # request (e.g. due to request throttling or DOS prevention), or if Google
214 # throttles requests to the site for abuse prevention. You should not
215 # depend on externally-hosted images for production applications.
216 #
217 # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
218 # precedence.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700219 &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700220 #
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700221 # The Google Cloud Storage URI of the form
222 # `gs://bucket_name/object_name`. Object versioning is not supported. See
223 # [Google Cloud Storage Request
224 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
Bu Sun Kim65020912020-05-20 12:08:20 -0700225 },
226 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700227 },
228 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700229 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
230 #
231 # Format: `projects/{project-id}/locations/{location-id}`.
232 #
233 # If no parent is specified, a region will be chosen automatically.
234 #
235 # Supported location-ids:
236 # `us`: USA country only,
237 # `asia`: East asia areas, like Japan, Taiwan,
238 # `eu`: The European Union.
239 #
240 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700241 }
242
243 x__xgafv: string, V1 error format.
244 Allowed values
245 1 - v1 error format
246 2 - v2 error format
247
248Returns:
249 An object of the form:
250
251 { # Response to a batch image annotation request.
Bu Sun Kim65020912020-05-20 12:08:20 -0700252 &quot;responses&quot;: [ # Individual responses to image annotation requests within the batch.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700253 { # Response to an image annotation request.
Bu Sun Kim65020912020-05-20 12:08:20 -0700254 &quot;fullTextAnnotation&quot;: { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700255 # completed successfully.
256 # This annotation provides the structural hierarchy for the OCR detected
257 # text.
258 # The hierarchy of an OCR extracted text structure is like this:
Dan O'Mearadd494642020-05-01 07:42:23 -0700259 # TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700260 # Each structural component, starting from Page, may further have their own
261 # properties. Properties describe detected languages, breaks etc.. Please refer
262 # to the TextAnnotation.TextProperty message definition below for more
263 # detail.
Bu Sun Kim65020912020-05-20 12:08:20 -0700264 &quot;pages&quot;: [ # List of pages detected by OCR.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700265 { # Detected page from OCR.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700266 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the page. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700267 &quot;blocks&quot;: [ # List of blocks of text, images etc on this page.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700268 { # Logical element on the page.
Bu Sun Kim65020912020-05-20 12:08:20 -0700269 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the block.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700270 # The vertices are in the order of top-left, top-right, bottom-right,
271 # bottom-left. When a rotation of the bounding box is detected the rotation
272 # is represented as around the top-left corner as defined when the text is
Bu Sun Kim65020912020-05-20 12:08:20 -0700273 # read in the &#x27;natural&#x27; orientation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700274 # For example:
275 #
276 # * when the text is horizontal it might look like:
277 #
278 # 0----1
279 # | |
280 # 3----2
281 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700282 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700283 #
284 # 2----3
285 # | |
286 # 1----0
287 #
288 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700289 &quot;vertices&quot;: [ # The bounding polygon vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700290 { # A vertex represents a 2D point in the image.
291 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700292 &quot;y&quot;: 42, # Y coordinate.
293 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700294 },
295 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700296 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
297 { # A vertex represents a 2D point in the image.
298 # NOTE: the normalized vertex coordinates are relative to the original image
299 # and range from 0 to 1.
300 &quot;x&quot;: 3.14, # X coordinate.
301 &quot;y&quot;: 3.14, # Y coordinate.
302 },
303 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700304 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700305 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the block. Range [0, 1].
306 &quot;paragraphs&quot;: [ # List of paragraphs in this block (if this blocks is of type text).
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700307 { # Structural unit of text representing a number of words in certain order.
Bu Sun Kim65020912020-05-20 12:08:20 -0700308 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700309 # The vertices are in the order of top-left, top-right, bottom-right,
310 # bottom-left. When a rotation of the bounding box is detected the rotation
311 # is represented as around the top-left corner as defined when the text is
Bu Sun Kim65020912020-05-20 12:08:20 -0700312 # read in the &#x27;natural&#x27; orientation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700313 # For example:
314 # * when the text is horizontal it might look like:
315 # 0----1
316 # | |
317 # 3----2
Bu Sun Kim65020912020-05-20 12:08:20 -0700318 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700319 # 2----3
320 # | |
321 # 1----0
322 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700323 &quot;vertices&quot;: [ # The bounding polygon vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700324 { # A vertex represents a 2D point in the image.
325 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700326 &quot;y&quot;: 42, # Y coordinate.
327 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700328 },
329 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700330 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
331 { # A vertex represents a 2D point in the image.
332 # NOTE: the normalized vertex coordinates are relative to the original image
333 # and range from 0 to 1.
334 &quot;x&quot;: 3.14, # X coordinate.
335 &quot;y&quot;: 3.14, # Y coordinate.
336 },
337 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700338 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700339 &quot;words&quot;: [ # List of all words in this paragraph.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700340 { # A word representation.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700341 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the word. Range [0, 1].
342 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the word.
343 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
344 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
345 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
346 },
347 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
348 { # Detected language for a structural component.
349 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
350 # information, see
351 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
352 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
353 },
354 ],
355 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700356 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the word.
Dan O'Mearadd494642020-05-01 07:42:23 -0700357 # The vertices are in the order of top-left, top-right, bottom-right,
358 # bottom-left. When a rotation of the bounding box is detected the rotation
359 # is represented as around the top-left corner as defined when the text is
Bu Sun Kim65020912020-05-20 12:08:20 -0700360 # read in the &#x27;natural&#x27; orientation.
Dan O'Mearadd494642020-05-01 07:42:23 -0700361 # For example:
362 # * when the text is horizontal it might look like:
363 # 0----1
364 # | |
365 # 3----2
Bu Sun Kim65020912020-05-20 12:08:20 -0700366 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
Dan O'Mearadd494642020-05-01 07:42:23 -0700367 # 2----3
368 # | |
369 # 1----0
370 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700371 &quot;vertices&quot;: [ # The bounding polygon vertices.
Dan O'Mearadd494642020-05-01 07:42:23 -0700372 { # A vertex represents a 2D point in the image.
373 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700374 &quot;y&quot;: 42, # Y coordinate.
375 &quot;x&quot;: 42, # X coordinate.
Dan O'Mearadd494642020-05-01 07:42:23 -0700376 },
377 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700378 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
379 { # A vertex represents a 2D point in the image.
380 # NOTE: the normalized vertex coordinates are relative to the original image
381 # and range from 0 to 1.
382 &quot;x&quot;: 3.14, # X coordinate.
383 &quot;y&quot;: 3.14, # Y coordinate.
384 },
385 ],
Dan O'Mearadd494642020-05-01 07:42:23 -0700386 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700387 &quot;symbols&quot;: [ # List of symbols in the word.
388 # The order of the symbols follows the natural reading order.
389 { # A single symbol representation.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700390 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700391 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the symbol.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700392 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
393 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
394 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
395 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700396 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
397 { # Detected language for a structural component.
398 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
399 # information, see
400 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
401 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
402 },
403 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700404 },
405 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
406 # The vertices are in the order of top-left, top-right, bottom-right,
407 # bottom-left. When a rotation of the bounding box is detected the rotation
408 # is represented as around the top-left corner as defined when the text is
409 # read in the &#x27;natural&#x27; orientation.
410 # For example:
411 # * when the text is horizontal it might look like:
412 # 0----1
413 # | |
414 # 3----2
415 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
416 # 2----3
417 # | |
418 # 1----0
419 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700420 &quot;vertices&quot;: [ # The bounding polygon vertices.
421 { # A vertex represents a 2D point in the image.
422 # NOTE: the vertex coordinates are in the same scale as the original image.
423 &quot;y&quot;: 42, # Y coordinate.
424 &quot;x&quot;: 42, # X coordinate.
425 },
426 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700427 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
428 { # A vertex represents a 2D point in the image.
429 # NOTE: the normalized vertex coordinates are relative to the original image
430 # and range from 0 to 1.
431 &quot;x&quot;: 3.14, # X coordinate.
432 &quot;y&quot;: 3.14, # Y coordinate.
433 },
434 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700435 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700436 &quot;text&quot;: &quot;A String&quot;, # The actual UTF-8 representation of the symbol.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700437 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700438 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700439 },
440 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700441 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the paragraph.
442 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
443 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
444 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
445 },
446 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
447 { # Detected language for a structural component.
448 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
449 # information, see
450 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
451 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
452 },
453 ],
454 },
455 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700456 },
457 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700458 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the block.
459 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
460 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
461 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
462 },
463 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
464 { # Detected language for a structural component.
465 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
466 # information, see
467 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
468 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
469 },
470 ],
471 },
472 &quot;blockType&quot;: &quot;A String&quot;, # Detected block type (text, image etc) for this block.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700473 },
474 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700475 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected on the page.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700476 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
477 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
478 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
479 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700480 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
481 { # Detected language for a structural component.
482 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
483 # information, see
484 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
485 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
486 },
487 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700488 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700489 &quot;height&quot;: 42, # Page height. For PDFs the unit is points. For images (including
490 # TIFFs) the unit is pixels.
491 &quot;width&quot;: 42, # Page width. For PDFs the unit is points. For images (including
492 # TIFFs) the unit is pixels.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700493 },
494 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700495 &quot;text&quot;: &quot;A String&quot;, # UTF-8 text detected on the pages.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700496 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700497 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
498 # Note that filled-in image annotations are guaranteed to be
499 # correct, even when `error` is set.
500 # different programming environments, including REST APIs and RPC APIs. It is
501 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
502 # three pieces of data: error code, error message, and error details.
503 #
504 # You can find out more about this error model and how to work with it in the
505 # [API Design Guide](https://cloud.google.com/apis/design/errors).
506 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
507 # message types for APIs to use.
508 {
509 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
510 },
511 ],
512 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
513 # user-facing error message should be localized and sent in the
514 # google.rpc.Status.details field, or localized by the client.
515 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
516 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700517 &quot;textAnnotations&quot;: [ # If present, text (OCR) detection has completed successfully.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700518 { # Set of detected entity features.
Bu Sun Kim65020912020-05-20 12:08:20 -0700519 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
520 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700521 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
522 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
523 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
524 # detected distant towering building, even though the confidence that
525 # there is a tower in each image may be the same. Range [0, 1].
526 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
527 # fields, such a score or string that qualifies the entity.
528 { # A `Property` consists of a user-supplied name/value pair.
529 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
530 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
531 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
532 },
533 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700534 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
535 # [Google Knowledge Graph Search
536 # API](https://developers.google.com/knowledge-graph/).
537 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
538 # The accuracy of the entity detection in an image.
539 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
540 # this field represents the confidence that there is a tower in the query
541 # image. Range [0, 1].
542 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
543 # for `LABEL_DETECTION` features.
544 &quot;vertices&quot;: [ # The bounding polygon vertices.
545 { # A vertex represents a 2D point in the image.
546 # NOTE: the vertex coordinates are in the same scale as the original image.
547 &quot;y&quot;: 42, # Y coordinate.
548 &quot;x&quot;: 42, # X coordinate.
549 },
550 ],
551 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
552 { # A vertex represents a 2D point in the image.
553 # NOTE: the normalized vertex coordinates are relative to the original image
554 # and range from 0 to 1.
555 &quot;x&quot;: 3.14, # X coordinate.
556 &quot;y&quot;: 3.14, # Y coordinate.
557 },
558 ],
559 },
560 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
561 # `LocationInfo` elements can be present because one location may
562 # indicate the location of the scene in the image, and another location
563 # may indicate the location of the place where the image was taken.
564 # Location information is usually present for landmarks.
565 { # Detected entity location information.
566 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
567 # of doubles representing degrees latitude and degrees longitude. Unless
568 # specified otherwise, this must conform to the
569 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
570 # standard&lt;/a&gt;. Values must be within normalized ranges.
571 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
572 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
573 },
574 },
575 ],
576 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
577 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700578 },
579 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700580 &quot;faceAnnotations&quot;: [ # If present, face detection has completed successfully.
581 { # A face annotation object contains the results of face detection.
582 &quot;landmarkingConfidence&quot;: 3.14, # Face landmarking confidence. Range [0, 1].
583 &quot;panAngle&quot;: 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
584 # pointing relative to the vertical plane perpendicular to the image. Range
585 # [-180,180].
586 &quot;surpriseLikelihood&quot;: &quot;A String&quot;, # Surprise likelihood.
587 &quot;tiltAngle&quot;: 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
588 # pointing relative to the image&#x27;s horizontal plane. Range [-180,180].
589 &quot;blurredLikelihood&quot;: &quot;A String&quot;, # Blurred likelihood.
590 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
591 # are in the original image&#x27;s scale.
592 # The bounding box is computed to &quot;frame&quot; the face in accordance with human
593 # expectations. It is based on the landmarker results.
594 # Note that one or more x and/or y coordinates may not be generated in the
595 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
596 # appears in the image to be annotated.
597 &quot;vertices&quot;: [ # The bounding polygon vertices.
598 { # A vertex represents a 2D point in the image.
599 # NOTE: the vertex coordinates are in the same scale as the original image.
600 &quot;y&quot;: 42, # Y coordinate.
601 &quot;x&quot;: 42, # X coordinate.
602 },
603 ],
604 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
605 { # A vertex represents a 2D point in the image.
606 # NOTE: the normalized vertex coordinates are relative to the original image
607 # and range from 0 to 1.
608 &quot;x&quot;: 3.14, # X coordinate.
609 &quot;y&quot;: 3.14, # Y coordinate.
610 },
611 ],
612 },
613 &quot;headwearLikelihood&quot;: &quot;A String&quot;, # Headwear likelihood.
614 &quot;fdBoundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
615 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
616 # is used to eliminate the face from any image analysis that detects the
617 # &quot;amount of skin&quot; visible in an image. It is not based on the
618 # landmarker results, only on the initial face detection, hence
619 # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
620 &quot;vertices&quot;: [ # The bounding polygon vertices.
621 { # A vertex represents a 2D point in the image.
622 # NOTE: the vertex coordinates are in the same scale as the original image.
623 &quot;y&quot;: 42, # Y coordinate.
624 &quot;x&quot;: 42, # X coordinate.
625 },
626 ],
627 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
628 { # A vertex represents a 2D point in the image.
629 # NOTE: the normalized vertex coordinates are relative to the original image
630 # and range from 0 to 1.
631 &quot;x&quot;: 3.14, # X coordinate.
632 &quot;y&quot;: 3.14, # Y coordinate.
633 },
634 ],
635 },
636 &quot;detectionConfidence&quot;: 3.14, # Detection confidence. Range [0, 1].
637 &quot;underExposedLikelihood&quot;: &quot;A String&quot;, # Under-exposed likelihood.
638 &quot;joyLikelihood&quot;: &quot;A String&quot;, # Joy likelihood.
639 &quot;sorrowLikelihood&quot;: &quot;A String&quot;, # Sorrow likelihood.
640 &quot;landmarks&quot;: [ # Detected face landmarks.
641 { # A face-specific landmark (for example, a face feature).
642 &quot;position&quot;: { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
643 # A valid Position must have both x and y coordinates.
644 # The position coordinates are in the same scale as the original image.
645 &quot;x&quot;: 3.14, # X coordinate.
646 &quot;y&quot;: 3.14, # Y coordinate.
647 &quot;z&quot;: 3.14, # Z coordinate (or depth).
648 },
649 &quot;type&quot;: &quot;A String&quot;, # Face landmark type.
650 },
651 ],
652 &quot;rollAngle&quot;: 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
653 # of the face relative to the image vertical about the axis perpendicular to
654 # the face. Range [-180,180].
655 &quot;angerLikelihood&quot;: &quot;A String&quot;, # Anger likelihood.
656 },
657 ],
658 &quot;context&quot;: { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
659 # comes from.
660 # information about the source of that image.
661 &quot;uri&quot;: &quot;A String&quot;, # The URI of the file used to produce the image.
662 &quot;pageNumber&quot;: 42, # If the file was a PDF or TIFF, this field gives the page number within
663 # the file used to produce the image.
664 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700665 &quot;imagePropertiesAnnotation&quot;: { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
666 &quot;dominantColors&quot;: { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
667 &quot;colors&quot;: [ # RGB color values with their score and pixel fraction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700668 { # Color information consists of RGB channels, score, and the fraction of
669 # the image that the color occupies in the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700670 &quot;pixelFraction&quot;: 3.14, # The fraction of pixels the color occupies in the image.
671 # Value in range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700672 &quot;score&quot;: 3.14, # Image-specific score for this color. Value in range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700673 &quot;color&quot;: { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700674 # for simplicity of conversion to/from color representations in various
675 # languages over compactness; for example, the fields of this representation
Bu Sun Kim65020912020-05-20 12:08:20 -0700676 # can be trivially provided to the constructor of &quot;java.awt.Color&quot; in Java; it
677 # can also be trivially provided to UIColor&#x27;s &quot;+colorWithRed:green:blue:alpha&quot;
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700678 # method in iOS; and, with just a little work, it can be easily formatted into
Bu Sun Kim65020912020-05-20 12:08:20 -0700679 # a CSS &quot;rgba()&quot; string in JavaScript, as well.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700680 #
681 # Note: this proto does not carry information about the absolute color space
682 # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
683 # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
684 # space.
685 #
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700686 # Note: when color equality needs to be decided, implementations, unless
687 # documented otherwise, will treat two colors to be equal if all their red,
688 # green, blue and alpha values each differ by at most 1e-5.
689 #
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700690 # Example (Java):
691 #
692 # import com.google.type.Color;
693 #
694 # // ...
695 # public static java.awt.Color fromProto(Color protocolor) {
696 # float alpha = protocolor.hasAlpha()
697 # ? protocolor.getAlpha().getValue()
698 # : 1.0;
699 #
700 # return new java.awt.Color(
701 # protocolor.getRed(),
702 # protocolor.getGreen(),
703 # protocolor.getBlue(),
704 # alpha);
705 # }
706 #
707 # public static Color toProto(java.awt.Color color) {
708 # float red = (float) color.getRed();
709 # float green = (float) color.getGreen();
710 # float blue = (float) color.getBlue();
711 # float denominator = 255.0;
712 # Color.Builder resultBuilder =
713 # Color
714 # .newBuilder()
715 # .setRed(red / denominator)
716 # .setGreen(green / denominator)
717 # .setBlue(blue / denominator);
718 # int alpha = color.getAlpha();
719 # if (alpha != 255) {
720 # result.setAlpha(
721 # FloatValue
722 # .newBuilder()
723 # .setValue(((float) alpha) / denominator)
724 # .build());
725 # }
726 # return resultBuilder.build();
727 # }
728 # // ...
729 #
730 # Example (iOS / Obj-C):
731 #
732 # // ...
733 # static UIColor* fromProto(Color* protocolor) {
734 # float red = [protocolor red];
735 # float green = [protocolor green];
736 # float blue = [protocolor blue];
737 # FloatValue* alpha_wrapper = [protocolor alpha];
738 # float alpha = 1.0;
739 # if (alpha_wrapper != nil) {
740 # alpha = [alpha_wrapper value];
741 # }
742 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
743 # }
744 #
745 # static Color* toProto(UIColor* color) {
746 # CGFloat red, green, blue, alpha;
Dan O'Mearadd494642020-05-01 07:42:23 -0700747 # if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700748 # return nil;
749 # }
750 # Color* result = [[Color alloc] init];
751 # [result setRed:red];
752 # [result setGreen:green];
753 # [result setBlue:blue];
Dan O'Mearadd494642020-05-01 07:42:23 -0700754 # if (alpha &lt;= 0.9999) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700755 # [result setAlpha:floatWrapperWithValue(alpha)];
756 # }
757 # [result autorelease];
758 # return result;
759 # }
760 # // ...
761 #
762 # Example (JavaScript):
763 #
764 # // ...
765 #
766 # var protoToCssColor = function(rgb_color) {
767 # var redFrac = rgb_color.red || 0.0;
768 # var greenFrac = rgb_color.green || 0.0;
769 # var blueFrac = rgb_color.blue || 0.0;
770 # var red = Math.floor(redFrac * 255);
771 # var green = Math.floor(greenFrac * 255);
772 # var blue = Math.floor(blueFrac * 255);
773 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700774 # if (!(&#x27;alpha&#x27; in rgb_color)) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700775 # return rgbToCssColor_(red, green, blue);
776 # }
777 #
778 # var alphaFrac = rgb_color.alpha.value || 0.0;
Bu Sun Kim65020912020-05-20 12:08:20 -0700779 # var rgbParams = [red, green, blue].join(&#x27;,&#x27;);
780 # return [&#x27;rgba(&#x27;, rgbParams, &#x27;,&#x27;, alphaFrac, &#x27;)&#x27;].join(&#x27;&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700781 # };
782 #
783 # var rgbToCssColor_ = function(red, green, blue) {
Dan O'Mearadd494642020-05-01 07:42:23 -0700784 # var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700785 # var hexString = rgbNumber.toString(16);
786 # var missingZeros = 6 - hexString.length;
Bu Sun Kim65020912020-05-20 12:08:20 -0700787 # var resultBuilder = [&#x27;#&#x27;];
Dan O'Mearadd494642020-05-01 07:42:23 -0700788 # for (var i = 0; i &lt; missingZeros; i++) {
Bu Sun Kim65020912020-05-20 12:08:20 -0700789 # resultBuilder.push(&#x27;0&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700790 # }
791 # resultBuilder.push(hexString);
Bu Sun Kim65020912020-05-20 12:08:20 -0700792 # return resultBuilder.join(&#x27;&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700793 # };
794 #
795 # // ...
Bu Sun Kim65020912020-05-20 12:08:20 -0700796 &quot;blue&quot;: 3.14, # The amount of blue in the color as a value in the interval [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700797 &quot;green&quot;: 3.14, # The amount of green in the color as a value in the interval [0, 1].
798 &quot;red&quot;: 3.14, # The amount of red in the color as a value in the interval [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700799 &quot;alpha&quot;: 3.14, # The fraction of this color that should be applied to the pixel. That is,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700800 # the final pixel color is defined by the equation:
801 #
802 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
803 #
804 # This means that a value of 1.0 corresponds to a solid color, whereas
805 # a value of 0.0 corresponds to a completely transparent color. This
806 # uses a wrapper message rather than a simple float scalar so that it is
807 # possible to distinguish between a default value and the value being unset.
808 # If omitted, this color object is to be rendered as a solid color
809 # (as if the alpha value had been explicitly given with a value of 1.0).
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700810 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700811 },
812 ],
813 },
814 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700815 &quot;webDetection&quot;: { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
816 &quot;visuallySimilarImages&quot;: [ # The visually similar image results.
817 { # Metadata for online images.
Bu Sun Kim65020912020-05-20 12:08:20 -0700818 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700819 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700820 },
821 ],
822 &quot;bestGuessLabels&quot;: [ # The service&#x27;s best guess as to the topic of the request image.
823 # Inferred from similar images on the open web.
824 { # Label to provide extra metadata for the web detection.
825 &quot;label&quot;: &quot;A String&quot;, # Label for extra metadata.
826 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code for `label`, such as &quot;en-US&quot; or &quot;sr-Latn&quot;.
827 # For more information, see
828 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
829 },
830 ],
831 &quot;fullMatchingImages&quot;: [ # Fully matching images from the Internet.
832 # Can include resized copies of the query image.
833 { # Metadata for online images.
Bu Sun Kim65020912020-05-20 12:08:20 -0700834 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700835 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700836 },
837 ],
838 &quot;webEntities&quot;: [ # Deduced entities from similar images on the Internet.
839 { # Entity deduced from similar images on the Internet.
Bu Sun Kim65020912020-05-20 12:08:20 -0700840 &quot;description&quot;: &quot;A String&quot;, # Canonical description of the entity, in English.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700841 &quot;entityId&quot;: &quot;A String&quot;, # Opaque entity ID.
Bu Sun Kim65020912020-05-20 12:08:20 -0700842 &quot;score&quot;: 3.14, # Overall relevancy score for the entity.
843 # Not normalized and not comparable across different image queries.
844 },
845 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700846 &quot;partialMatchingImages&quot;: [ # Partial matching images from the Internet.
847 # Those images are similar enough to share some key-point features. For
848 # example an original image will likely have partial matching for its crops.
849 { # Metadata for online images.
Bu Sun Kim65020912020-05-20 12:08:20 -0700850 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700851 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
852 },
853 ],
854 &quot;pagesWithMatchingImages&quot;: [ # Web pages containing the matching images from the Internet.
855 { # Metadata for web pages.
856 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the web page.
857 &quot;url&quot;: &quot;A String&quot;, # The result web page URL.
858 &quot;partialMatchingImages&quot;: [ # Partial matching images on the page.
859 # Those images are similar enough to share some key-point features. For
860 # example an original image will likely have partial matching for its
861 # crops.
862 { # Metadata for online images.
863 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
864 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
865 },
866 ],
867 &quot;pageTitle&quot;: &quot;A String&quot;, # Title for the web page, may contain HTML markups.
868 &quot;fullMatchingImages&quot;: [ # Fully matching images on the page.
869 # Can include resized copies of the query image.
870 { # Metadata for online images.
871 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
872 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
873 },
874 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700875 },
876 ],
877 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700878 &quot;landmarkAnnotations&quot;: [ # If present, landmark detection has completed successfully.
879 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700880 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
881 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700882 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
883 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
884 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
885 # detected distant towering building, even though the confidence that
886 # there is a tower in each image may be the same. Range [0, 1].
887 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
888 # fields, such a score or string that qualifies the entity.
889 { # A `Property` consists of a user-supplied name/value pair.
890 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
891 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
892 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
893 },
894 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700895 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
896 # [Google Knowledge Graph Search
897 # API](https://developers.google.com/knowledge-graph/).
898 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
899 # The accuracy of the entity detection in an image.
900 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
901 # this field represents the confidence that there is a tower in the query
902 # image. Range [0, 1].
903 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
904 # for `LABEL_DETECTION` features.
905 &quot;vertices&quot;: [ # The bounding polygon vertices.
906 { # A vertex represents a 2D point in the image.
907 # NOTE: the vertex coordinates are in the same scale as the original image.
908 &quot;y&quot;: 42, # Y coordinate.
909 &quot;x&quot;: 42, # X coordinate.
910 },
911 ],
912 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
913 { # A vertex represents a 2D point in the image.
914 # NOTE: the normalized vertex coordinates are relative to the original image
915 # and range from 0 to 1.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700916 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700917 &quot;y&quot;: 3.14, # Y coordinate.
918 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700919 ],
920 },
921 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
922 # `LocationInfo` elements can be present because one location may
923 # indicate the location of the scene in the image, and another location
924 # may indicate the location of the place where the image was taken.
925 # Location information is usually present for landmarks.
926 { # Detected entity location information.
927 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
928 # of doubles representing degrees latitude and degrees longitude. Unless
929 # specified otherwise, this must conform to the
930 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
931 # standard&lt;/a&gt;. Values must be within normalized ranges.
932 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
933 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
934 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700935 },
936 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700937 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
938 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700939 },
940 ],
941 &quot;cropHintsAnnotation&quot;: { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
942 &quot;cropHints&quot;: [ # Crop hint results.
943 { # Single crop hint that is used to generate a new crop when serving an image.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700944 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
945 # box are in the original image&#x27;s scale.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700946 &quot;vertices&quot;: [ # The bounding polygon vertices.
947 { # A vertex represents a 2D point in the image.
948 # NOTE: the vertex coordinates are in the same scale as the original image.
949 &quot;y&quot;: 42, # Y coordinate.
950 &quot;x&quot;: 42, # X coordinate.
951 },
952 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700953 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
954 { # A vertex represents a 2D point in the image.
955 # NOTE: the normalized vertex coordinates are relative to the original image
956 # and range from 0 to 1.
957 &quot;x&quot;: 3.14, # X coordinate.
958 &quot;y&quot;: 3.14, # Y coordinate.
959 },
960 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700961 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700962 &quot;importanceFraction&quot;: 3.14, # Fraction of importance of this salient region with respect to the original
963 # image.
964 &quot;confidence&quot;: 3.14, # Confidence of this being a salient region. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700965 },
966 ],
967 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700968 &quot;logoAnnotations&quot;: [ # If present, logo detection has completed successfully.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700969 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700970 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
971 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700972 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
973 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
974 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
975 # detected distant towering building, even though the confidence that
976 # there is a tower in each image may be the same. Range [0, 1].
977 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
978 # fields, such a score or string that qualifies the entity.
979 { # A `Property` consists of a user-supplied name/value pair.
980 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
981 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
982 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
983 },
984 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700985 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
986 # [Google Knowledge Graph Search
987 # API](https://developers.google.com/knowledge-graph/).
988 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
989 # The accuracy of the entity detection in an image.
990 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
991 # this field represents the confidence that there is a tower in the query
992 # image. Range [0, 1].
993 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
994 # for `LABEL_DETECTION` features.
995 &quot;vertices&quot;: [ # The bounding polygon vertices.
996 { # A vertex represents a 2D point in the image.
997 # NOTE: the vertex coordinates are in the same scale as the original image.
998 &quot;y&quot;: 42, # Y coordinate.
999 &quot;x&quot;: 42, # X coordinate.
1000 },
1001 ],
1002 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1003 { # A vertex represents a 2D point in the image.
1004 # NOTE: the normalized vertex coordinates are relative to the original image
1005 # and range from 0 to 1.
1006 &quot;x&quot;: 3.14, # X coordinate.
1007 &quot;y&quot;: 3.14, # Y coordinate.
1008 },
1009 ],
1010 },
1011 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1012 # `LocationInfo` elements can be present because one location may
1013 # indicate the location of the scene in the image, and another location
1014 # may indicate the location of the place where the image was taken.
1015 # Location information is usually present for landmarks.
1016 { # Detected entity location information.
1017 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1018 # of doubles representing degrees latitude and degrees longitude. Unless
1019 # specified otherwise, this must conform to the
1020 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1021 # standard&lt;/a&gt;. Values must be within normalized ranges.
1022 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1023 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1024 },
1025 },
1026 ],
1027 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1028 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001029 },
1030 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001031 &quot;localizedObjectAnnotations&quot;: [ # If present, localized object detection has completed successfully.
1032 # This will be sorted descending by confidence score.
1033 { # Set of detected objects with bounding boxes.
1034 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
1035 &quot;vertices&quot;: [ # The bounding polygon vertices.
1036 { # A vertex represents a 2D point in the image.
1037 # NOTE: the vertex coordinates are in the same scale as the original image.
1038 &quot;y&quot;: 42, # Y coordinate.
1039 &quot;x&quot;: 42, # X coordinate.
1040 },
1041 ],
1042 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1043 { # A vertex represents a 2D point in the image.
1044 # NOTE: the normalized vertex coordinates are relative to the original image
1045 # and range from 0 to 1.
1046 &quot;x&quot;: 3.14, # X coordinate.
1047 &quot;y&quot;: 3.14, # Y coordinate.
1048 },
1049 ],
1050 },
1051 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
1052 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
1053 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
1054 # information, see
1055 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1056 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
1057 },
1058 ],
1059 &quot;labelAnnotations&quot;: [ # If present, label detection has completed successfully.
1060 { # Set of detected entity features.
1061 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1062 # `description` is expressed.
1063 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1064 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1065 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1066 # detected distant towering building, even though the confidence that
1067 # there is a tower in each image may be the same. Range [0, 1].
1068 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1069 # fields, such a score or string that qualifies the entity.
1070 { # A `Property` consists of a user-supplied name/value pair.
1071 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1072 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1073 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1074 },
1075 ],
1076 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1077 # [Google Knowledge Graph Search
1078 # API](https://developers.google.com/knowledge-graph/).
1079 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1080 # The accuracy of the entity detection in an image.
1081 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1082 # this field represents the confidence that there is a tower in the query
1083 # image. Range [0, 1].
1084 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1085 # for `LABEL_DETECTION` features.
1086 &quot;vertices&quot;: [ # The bounding polygon vertices.
1087 { # A vertex represents a 2D point in the image.
1088 # NOTE: the vertex coordinates are in the same scale as the original image.
1089 &quot;y&quot;: 42, # Y coordinate.
1090 &quot;x&quot;: 42, # X coordinate.
1091 },
1092 ],
1093 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1094 { # A vertex represents a 2D point in the image.
1095 # NOTE: the normalized vertex coordinates are relative to the original image
1096 # and range from 0 to 1.
1097 &quot;x&quot;: 3.14, # X coordinate.
1098 &quot;y&quot;: 3.14, # Y coordinate.
1099 },
1100 ],
1101 },
1102 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1103 # `LocationInfo` elements can be present because one location may
1104 # indicate the location of the scene in the image, and another location
1105 # may indicate the location of the place where the image was taken.
1106 # Location information is usually present for landmarks.
1107 { # Detected entity location information.
1108 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1109 # of doubles representing degrees latitude and degrees longitude. Unless
1110 # specified otherwise, this must conform to the
1111 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1112 # standard&lt;/a&gt;. Values must be within normalized ranges.
1113 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1114 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1115 },
1116 },
1117 ],
1118 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1119 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1120 },
1121 ],
1122 &quot;productSearchResults&quot;: { # Results for a product search request. # If present, product search has completed successfully.
1123 &quot;productGroupedResults&quot;: [ # List of results grouped by products detected in the query image. Each entry
1124 # corresponds to one bounding polygon in the query image, and contains the
1125 # matching products specific to that region. There may be duplicate product
1126 # matches in the union of all the per-product results.
1127 { # Information about the products similar to a single product in a query
1128 # image.
1129 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
1130 &quot;vertices&quot;: [ # The bounding polygon vertices.
1131 { # A vertex represents a 2D point in the image.
1132 # NOTE: the vertex coordinates are in the same scale as the original image.
1133 &quot;y&quot;: 42, # Y coordinate.
1134 &quot;x&quot;: 42, # X coordinate.
1135 },
1136 ],
1137 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1138 { # A vertex represents a 2D point in the image.
1139 # NOTE: the normalized vertex coordinates are relative to the original image
1140 # and range from 0 to 1.
1141 &quot;x&quot;: 3.14, # X coordinate.
1142 &quot;y&quot;: 3.14, # Y coordinate.
1143 },
1144 ],
1145 },
1146 &quot;results&quot;: [ # List of results, one for each product match.
1147 { # Information about a product.
1148 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
1149 # to the query.
1150 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
1151 # 1 (full confidence).
1152 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
1153 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
1154 # 4096 characters long.
1155 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
1156 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
1157 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
1158 # not be used for new products.
1159 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
1160 # constraints can be specified based on the product_labels.
1161 #
1162 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
1163 # strings with integer values can match a range-based restriction which is
1164 # to be supported soon.
1165 #
1166 # Multiple values can be assigned to the same key. One product may have up to
1167 # 500 product_labels.
1168 #
1169 # Notice that the total number of distinct product_labels over all products
1170 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
1171 # will refuse to work for that ProductSet.
1172 { # A product label represented as a key-value pair.
1173 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
1174 # exceed 128 bytes.
1175 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
1176 # cannot exceed 128 bytes.
1177 },
1178 ],
1179 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
1180 #
1181 # Format is:
1182 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
1183 #
1184 # This field is ignored when creating a product.
1185 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
1186 # characters long.
1187 },
1188 },
1189 ],
1190 &quot;objectAnnotations&quot;: [ # List of generic predictions for the object in the bounding box.
1191 { # Prediction for what the object in the bounding box is.
1192 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
1193 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
1194 # information, see
1195 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1196 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
1197 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
1198 },
1199 ],
1200 },
1201 ],
1202 &quot;results&quot;: [ # List of results, one for each product match.
1203 { # Information about a product.
1204 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
1205 # to the query.
1206 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
1207 # 1 (full confidence).
1208 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
1209 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
1210 # 4096 characters long.
1211 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
1212 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
1213 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
1214 # not be used for new products.
1215 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
1216 # constraints can be specified based on the product_labels.
1217 #
1218 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
1219 # strings with integer values can match a range-based restriction which is
1220 # to be supported soon.
1221 #
1222 # Multiple values can be assigned to the same key. One product may have up to
1223 # 500 product_labels.
1224 #
1225 # Notice that the total number of distinct product_labels over all products
1226 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
1227 # will refuse to work for that ProductSet.
1228 { # A product label represented as a key-value pair.
1229 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
1230 # exceed 128 bytes.
1231 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
1232 # cannot exceed 128 bytes.
1233 },
1234 ],
1235 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
1236 #
1237 # Format is:
1238 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
1239 #
1240 # This field is ignored when creating a product.
1241 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
1242 # characters long.
1243 },
1244 },
1245 ],
1246 &quot;indexTime&quot;: &quot;A String&quot;, # Timestamp of the index which provided these results. Products added to the
1247 # product set and products removed from the product set after this time are
1248 # not reflected in the current results.
1249 },
1250 &quot;safeSearchAnnotation&quot;: { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
1251 # methods over safe-search verticals (for example, adult, spoof, medical,
1252 # violence).
1253 &quot;medical&quot;: &quot;A String&quot;, # Likelihood that this is a medical image.
1254 &quot;racy&quot;: &quot;A String&quot;, # Likelihood that the request image contains racy content. Racy content may
1255 # include (but is not limited to) skimpy or sheer clothing, strategically
1256 # covered nudity, lewd or provocative poses, or close-ups of sensitive
1257 # body areas.
1258 &quot;spoof&quot;: &quot;A String&quot;, # Spoof likelihood. The likelihood that an modification
1259 # was made to the image&#x27;s canonical version to make it appear
1260 # funny or offensive.
1261 &quot;violence&quot;: &quot;A String&quot;, # Likelihood that this image contains violent content.
1262 &quot;adult&quot;: &quot;A String&quot;, # Represents the adult content likelihood for the image. Adult content may
1263 # contain elements such as nudity, pornographic images or cartoons, or
1264 # sexual activities.
1265 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001266 },
1267 ],
1268 }</pre>
1269</div>
1270
1271<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07001272 <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001273 <pre>Run asynchronous image detection and annotation for a list of images.
1274
1275Progress and results can be retrieved through the
1276`google.longrunning.Operations` interface.
1277`Operation.metadata` contains `OperationMetadata` (metadata).
1278`Operation.response` contains `AsyncBatchAnnotateImagesResponse` (results).
1279
1280This service will write image annotation outputs to json files in customer
1281GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
1282
1283Args:
Dan O'Mearadd494642020-05-01 07:42:23 -07001284 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001285 The object takes the form of:
1286
1287{ # Request for async image annotation for a list of images.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001288 &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
1289 { # Request for performing Google Cloud Vision API tasks over a user-provided
1290 # image, with user-requested features, and with context information.
1291 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
1292 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
1293 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
1294 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
1295 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
1296 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
1297 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
1298 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
1299 # migrate existing products to these categories as well.
1300 &quot;A String&quot;,
1301 ],
1302 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
1303 # If it is not specified, system discretion will be applied.
1304 &quot;vertices&quot;: [ # The bounding polygon vertices.
1305 { # A vertex represents a 2D point in the image.
1306 # NOTE: the vertex coordinates are in the same scale as the original image.
1307 &quot;y&quot;: 42, # Y coordinate.
1308 &quot;x&quot;: 42, # X coordinate.
1309 },
1310 ],
1311 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1312 { # A vertex represents a 2D point in the image.
1313 # NOTE: the normalized vertex coordinates are relative to the original image
1314 # and range from 0 to 1.
1315 &quot;x&quot;: 3.14, # X coordinate.
1316 &quot;y&quot;: 3.14, # Y coordinate.
1317 },
1318 ],
1319 },
1320 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
1321 # on Product labels. We currently support an AND of OR of key-value
1322 # expressions, where each expression within an OR must have the same key. An
1323 # &#x27;=&#x27; should be used to connect the key and value.
1324 #
1325 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
1326 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
1327 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
1328 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
1329 #
1330 # Format is:
1331 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1332 },
1333 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1334 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
1335 # of the image. For example, if the desired aspect ratio is 4/3, the
1336 # corresponding float value should be 1.33333. If not specified, the
1337 # best possible crop is returned. The number of provided aspect ratios is
1338 # limited to a maximum of 16; any aspect ratios provided after the 16th are
1339 # ignored.
1340 3.14,
1341 ],
1342 },
1343 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
1344 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
1345 },
1346 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1347 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1348 # of doubles representing degrees latitude and degrees longitude. Unless
1349 # specified otherwise, this must conform to the
1350 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1351 # standard&lt;/a&gt;. Values must be within normalized ranges.
1352 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1353 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1354 },
1355 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1356 # of doubles representing degrees latitude and degrees longitude. Unless
1357 # specified otherwise, this must conform to the
1358 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1359 # standard&lt;/a&gt;. Values must be within normalized ranges.
1360 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1361 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1362 },
1363 },
1364 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1365 # yields the best results since it enables automatic language detection. For
1366 # languages based on the Latin alphabet, setting `language_hints` is not
1367 # needed. In rare cases, when the language of the text in the image is known,
1368 # setting a hint will help get better results (although it will be a
1369 # significant hindrance if the hint is wrong). Text detection returns an
1370 # error if one or more of the specified languages is not one of the
1371 # [supported languages](https://cloud.google.com/vision/docs/languages).
1372 &quot;A String&quot;,
1373 ],
1374 },
1375 &quot;features&quot;: [ # Requested features.
1376 { # The type of Google Cloud Vision API detection to perform, and the maximum
1377 # number of results to return for that type. Multiple `Feature` objects can
1378 # be specified in the `features` list.
1379 &quot;type&quot;: &quot;A String&quot;, # The feature type.
1380 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
1381 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
1382 # &quot;builtin/latest&quot;.
1383 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
1384 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1385 },
1386 ],
1387 &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
1388 &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
1389 # Note: As with all `bytes` fields, protobuffers use a pure binary
1390 # representation, whereas JSON representations use base64.
1391 #
1392 # Currently, this field only works for BatchAnnotateImages requests. It does
1393 # not work for AsyncBatchAnnotateImages requests.
1394 &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
1395 # URL. If both `content` and `source` are provided for an image, `content`
1396 # takes precedence and is used to perform the image annotation request.
1397 &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
1398 #
1399 # 1. A Google Cloud Storage URI of the form
1400 # `gs://bucket_name/object_name`. Object versioning is not supported. See
1401 # [Google Cloud Storage Request
1402 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more
1403 # info.
1404 #
1405 # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
1406 # HTTP/HTTPS URLs, Google cannot guarantee that the request will be
1407 # completed. Your request may fail if the specified host denies the
1408 # request (e.g. due to request throttling or DOS prevention), or if Google
1409 # throttles requests to the site for abuse prevention. You should not
1410 # depend on externally-hosted images for production applications.
1411 #
1412 # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
1413 # precedence.
1414 &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
1415 #
1416 # The Google Cloud Storage URI of the form
1417 # `gs://bucket_name/object_name`. Object versioning is not supported. See
1418 # [Google Cloud Storage Request
1419 # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
1420 },
1421 },
1422 },
1423 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001424 &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
1425 &quot;gcsDestination&quot;: { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1426 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI prefix where the results will be stored. Results
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001427 # will be in JSON format and preceded by its corresponding input URI prefix.
1428 # This field can either represent a gcs file prefix or gcs directory. In
1429 # either case, the uri should be unique because in order to get all of the
1430 # output files, you will need to do a wildcard gcs search on the uri prefix
1431 # you provide.
1432 #
1433 # Examples:
1434 #
1435 # * File Prefix: gs://bucket-name/here/filenameprefix The output files
1436 # will be created in gs://bucket-name/here/ and the names of the
Bu Sun Kim65020912020-05-20 12:08:20 -07001437 # output files will begin with &quot;filenameprefix&quot;.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001438 #
1439 # * Directory Prefix: gs://bucket-name/some/location/ The output files
1440 # will be created in gs://bucket-name/some/location/ and the names of the
1441 # output files could be anything because there was no filename prefix
1442 # specified.
1443 #
1444 # If multiple outputs, each response is still AnnotateFileResponse, each of
1445 # which contains some subset of the full list of AnnotateImageResponse.
1446 # Multiple outputs can happen if, for example, the output JSON is too large
1447 # and overflows into multiple sharded files.
1448 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001449 &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
1450 # Google Cloud Storage.
1451 # The valid range is [1, 100]. If not specified, the default value is 20.
1452 #
1453 # For example, for one pdf file with 100 pages, 100 response protos will
1454 # be generated. If `batch_size` = 20, then 5 json files each
1455 # containing 20 response protos will be written under the prefix
1456 # `gcs_destination`.`uri`.
1457 #
1458 # Currently, batch_size only applies to GcsDestination, with potential future
1459 # support for other output configurations.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001460 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001461 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
1462 #
1463 # Format: `projects/{project-id}/locations/{location-id}`.
1464 #
1465 # If no parent is specified, a region will be chosen automatically.
1466 #
1467 # Supported location-ids:
1468 # `us`: USA country only,
1469 # `asia`: East asia areas, like Japan, Taiwan,
1470 # `eu`: The European Union.
1471 #
1472 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001473 }
1474
1475 x__xgafv: string, V1 error format.
1476 Allowed values
1477 1 - v1 error format
1478 2 - v2 error format
1479
1480Returns:
1481 An object of the form:
1482
1483 { # This resource represents a long-running operation that is the result of a
1484 # network API call.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001485 &quot;response&quot;: { # The normal response of the operation in case of success. If the original
1486 # method returns no data on success, such as `Delete`, the response is
1487 # `google.protobuf.Empty`. If the original method is standard
1488 # `Get`/`Create`/`Update`, the response should be the resource. For other
1489 # methods, the response should have the type `XxxResponse`, where `Xxx`
1490 # is the original method name. For example, if the original method name
1491 # is `TakeSnapshot()`, the inferred response type is
1492 # `TakeSnapshotResponse`.
1493 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1494 },
1495 &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically
1496 # contains progress information and common metadata such as create time.
1497 # Some services might not provide such metadata. Any method that returns a
1498 # long-running operation should document the metadata type, if any.
1499 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1500 },
1501 &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
1502 # originally returns it. If you use the default HTTP mapping, the
1503 # `name` should be a resource name ending with `operations/{unique_id}`.
1504 &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
1505 # If `true`, the operation is completed, and either `error` or `response` is
1506 # available.
Bu Sun Kim65020912020-05-20 12:08:20 -07001507 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
1508 # different programming environments, including REST APIs and RPC APIs. It is
1509 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1510 # three pieces of data: error code, error message, and error details.
1511 #
1512 # You can find out more about this error model and how to work with it in the
1513 # [API Design Guide](https://cloud.google.com/apis/design/errors).
Bu Sun Kim65020912020-05-20 12:08:20 -07001514 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1515 # message types for APIs to use.
1516 {
1517 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1518 },
1519 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001520 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1521 # user-facing error message should be localized and sent in the
1522 # google.rpc.Status.details field, or localized by the client.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001523 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
Bu Sun Kim65020912020-05-20 12:08:20 -07001524 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001525 }</pre>
1526</div>
1527
1528</body></html>