blob: f6614d30347fb17b7c4c10f571ccd9410fd69476 [file] [log] [blame]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1p2beta1.html">Cloud Vision API</a> . <a href="vision_v1p2beta1.files.html">files</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070078 <code><a href="#annotate">annotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070079<p class="firstline">Service that performs image detection and annotation for a batch of files.</p>
80<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070081 <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070082<p class="firstline">Run asynchronous image detection and annotation for a list of generic</p>
83<h3>Method Details</h3>
84<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -070085 <code class="details" id="annotate">annotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070086 <pre>Service that performs image detection and annotation for a batch of files.
Bu Sun Kim65020912020-05-20 12:08:20 -070087Now only &quot;application/pdf&quot;, &quot;image/tiff&quot; and &quot;image/gif&quot; are supported.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070088
89This service will extract at most 5 (customers can specify which 5 in
90AnnotateFileRequest.pages) frames (gif) or pages (pdf or tiff) from each
91file provided and perform detection and annotation for each image
92extracted.
93
94Args:
Dan O'Mearadd494642020-05-01 07:42:23 -070095 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070096 The object takes the form of:
97
98{ # A list of requests to annotate files using the BatchAnnotateFiles API.
Bu Sun Kim65020912020-05-20 12:08:20 -070099 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
100 #
101 # Format: `projects/{project-id}/locations/{location-id}`.
102 #
103 # If no parent is specified, a region will be chosen automatically.
104 #
105 # Supported location-ids:
106 # `us`: USA country only,
107 # `asia`: East asia areas, like Japan, Taiwan,
108 # `eu`: The European Union.
109 #
110 # Example: `projects/project-A/locations/eu`.
111 &quot;requests&quot;: [ # Required. The list of file annotation requests. Right now we support only one
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700112 # AnnotateFileRequest in BatchAnnotateFilesRequest.
113 { # A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
Bu Sun Kim65020912020-05-20 12:08:20 -0700114 &quot;pages&quot;: [ # Pages of the file to perform image annotation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700115 #
116 # Pages starts from 1, we assume the first page of the file is page 1.
117 # At most 5 pages are supported per request. Pages can be negative.
118 #
119 # Page 1 means the first page.
120 # Page 2 means the second page.
121 # Page -1 means the last page.
122 # Page -2 means the second to the last page.
123 #
124 # If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
125 #
126 # If this field is empty, by default the service performs image annotation
127 # for the first 5 pages of the file.
128 42,
129 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700130 &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
131 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
132 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
133 # Google Cloud Storage object. Wildcards are not currently supported.
134 },
135 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
136 # &quot;image/gif&quot; are supported. Wildcards are not supported.
137 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700138 # Note: As with all `bytes` fields, protobuffers use a pure binary
139 # representation, whereas JSON representations use base64.
140 #
141 # Currently, this field only works for BatchAnnotateFiles requests. It does
142 # not work for AsyncBatchAnnotateFiles requests.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700143 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700144 &quot;features&quot;: [ # Required. Requested features.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700145 { # The type of Google Cloud Vision API detection to perform, and the maximum
146 # number of results to return for that type. Multiple `Feature` objects can
147 # be specified in the `features` list.
Bu Sun Kim65020912020-05-20 12:08:20 -0700148 &quot;type&quot;: &quot;A String&quot;, # The feature type.
149 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700150 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
Bu Sun Kim65020912020-05-20 12:08:20 -0700151 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
152 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
153 # &quot;builtin/latest&quot;.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700154 },
155 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700156 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
157 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
158 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
159 # of the image. For example, if the desired aspect ratio is 4/3, the
160 # corresponding float value should be 1.33333. If not specified, the
161 # best possible crop is returned. The number of provided aspect ratios is
162 # limited to a maximum of 16; any aspect ratios provided after the 16th are
163 # ignored.
164 3.14,
165 ],
166 },
167 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
168 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
169 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
170 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
171 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
172 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
173 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
174 # migrate existing products to these categories as well.
175 &quot;A String&quot;,
176 ],
177 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
178 # on Product labels. We currently support an AND of OR of key-value
179 # expressions, where each expression within an OR must have the same key. An
180 # &#x27;=&#x27; should be used to connect the key and value.
181 #
182 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
183 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
184 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
185 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
186 #
187 # Format is:
188 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
189 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
190 # If it is not specified, system discretion will be applied.
191 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
192 { # A vertex represents a 2D point in the image.
193 # NOTE: the normalized vertex coordinates are relative to the original image
194 # and range from 0 to 1.
195 &quot;x&quot;: 3.14, # X coordinate.
196 &quot;y&quot;: 3.14, # Y coordinate.
197 },
198 ],
199 &quot;vertices&quot;: [ # The bounding polygon vertices.
200 { # A vertex represents a 2D point in the image.
201 # NOTE: the vertex coordinates are in the same scale as the original image.
202 &quot;y&quot;: 42, # Y coordinate.
203 &quot;x&quot;: 42, # X coordinate.
204 },
205 ],
206 },
207 },
208 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
209 # yields the best results since it enables automatic language detection. For
210 # languages based on the Latin alphabet, setting `language_hints` is not
211 # needed. In rare cases, when the language of the text in the image is known,
212 # setting a hint will help get better results (although it will be a
213 # significant hindrance if the hint is wrong). Text detection returns an
214 # error if one or more of the specified languages is not one of the
215 # [supported languages](https://cloud.google.com/vision/docs/languages).
216 &quot;A String&quot;,
217 ],
218 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
219 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
220 },
221 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
222 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
223 # of doubles representing degrees latitude and degrees longitude. Unless
224 # specified otherwise, this must conform to the
225 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
226 # standard&lt;/a&gt;. Values must be within normalized ranges.
227 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
228 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
229 },
230 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
231 # of doubles representing degrees latitude and degrees longitude. Unless
232 # specified otherwise, this must conform to the
233 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
234 # standard&lt;/a&gt;. Values must be within normalized ranges.
235 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
236 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
237 },
238 },
239 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700240 },
241 ],
242 }
243
244 x__xgafv: string, V1 error format.
245 Allowed values
246 1 - v1 error format
247 2 - v2 error format
248
249Returns:
250 An object of the form:
251
252 { # A list of file annotation responses.
Bu Sun Kim65020912020-05-20 12:08:20 -0700253 &quot;responses&quot;: [ # The list of file annotation responses, each response corresponding to each
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700254 # AnnotateFileRequest in BatchAnnotateFilesRequest.
255 { # Response to a single file annotation request. A file may contain one or more
256 # images, which individually have their own responses.
Bu Sun Kim65020912020-05-20 12:08:20 -0700257 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
258 # `responses` field will not be set in this case.
259 # different programming environments, including REST APIs and RPC APIs. It is
260 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
261 # three pieces of data: error code, error message, and error details.
262 #
263 # You can find out more about this error model and how to work with it in the
264 # [API Design Guide](https://cloud.google.com/apis/design/errors).
265 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
266 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
267 # user-facing error message should be localized and sent in the
268 # google.rpc.Status.details field, or localized by the client.
269 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
270 # message types for APIs to use.
271 {
272 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
273 },
274 ],
Dan O'Mearadd494642020-05-01 07:42:23 -0700275 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700276 &quot;responses&quot;: [ # Individual responses to images found within the file. This field will be
Dan O'Mearadd494642020-05-01 07:42:23 -0700277 # empty if the `error` field is set.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700278 { # Response to an image annotation request.
Bu Sun Kim65020912020-05-20 12:08:20 -0700279 &quot;localizedObjectAnnotations&quot;: [ # If present, localized object detection has completed successfully.
280 # This will be sorted descending by confidence score.
281 { # Set of detected objects with bounding boxes.
Bu Sun Kim65020912020-05-20 12:08:20 -0700282 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
283 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
284 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
285 { # A vertex represents a 2D point in the image.
286 # NOTE: the normalized vertex coordinates are relative to the original image
287 # and range from 0 to 1.
Bu Sun Kim65020912020-05-20 12:08:20 -0700288 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700289 &quot;y&quot;: 3.14, # Y coordinate.
Bu Sun Kim65020912020-05-20 12:08:20 -0700290 },
291 ],
292 &quot;vertices&quot;: [ # The bounding polygon vertices.
293 { # A vertex represents a 2D point in the image.
294 # NOTE: the vertex coordinates are in the same scale as the original image.
295 &quot;y&quot;: 42, # Y coordinate.
296 &quot;x&quot;: 42, # X coordinate.
297 },
298 ],
299 },
300 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700301 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
302 # information, see
303 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
304 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
Bu Sun Kim65020912020-05-20 12:08:20 -0700305 },
306 ],
307 &quot;productSearchResults&quot;: { # Results for a product search request. # If present, product search has completed successfully.
Bu Sun Kim65020912020-05-20 12:08:20 -0700308 &quot;results&quot;: [ # List of results, one for each product match.
309 { # Information about a product.
310 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
311 # to the query.
312 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
Bu Sun Kim65020912020-05-20 12:08:20 -0700313 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
314 # constraints can be specified based on the product_labels.
315 #
316 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
317 # strings with integer values can match a range-based restriction which is
318 # to be supported soon.
319 #
320 # Multiple values can be assigned to the same key. One product may have up to
321 # 500 product_labels.
322 #
323 # Notice that the total number of distinct product_labels over all products
324 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
325 # will refuse to work for that ProductSet.
326 { # A product label represented as a key-value pair.
327 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
328 # cannot exceed 128 bytes.
329 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
330 # exceed 128 bytes.
331 },
332 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700333 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
334 #
335 # Format is:
336 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
337 #
338 # This field is ignored when creating a product.
339 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
340 # 4096 characters long.
341 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
342 # characters long.
343 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
344 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
345 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
346 # not be used for new products.
Bu Sun Kim65020912020-05-20 12:08:20 -0700347 },
348 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
349 # 1 (full confidence).
350 },
351 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700352 &quot;indexTime&quot;: &quot;A String&quot;, # Timestamp of the index which provided these results. Products added to the
353 # product set and products removed from the product set after this time are
354 # not reflected in the current results.
355 &quot;productGroupedResults&quot;: [ # List of results grouped by products detected in the query image. Each entry
356 # corresponds to one bounding polygon in the query image, and contains the
357 # matching products specific to that region. There may be duplicate product
358 # matches in the union of all the per-product results.
359 { # Information about the products similar to a single product in a query
360 # image.
361 &quot;objectAnnotations&quot;: [ # List of generic predictions for the object in the bounding box.
362 { # Prediction for what the object in the bounding box is.
363 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
364 # information, see
365 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
366 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
367 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
368 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
369 },
370 ],
371 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
372 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
373 { # A vertex represents a 2D point in the image.
374 # NOTE: the normalized vertex coordinates are relative to the original image
375 # and range from 0 to 1.
376 &quot;x&quot;: 3.14, # X coordinate.
377 &quot;y&quot;: 3.14, # Y coordinate.
378 },
379 ],
380 &quot;vertices&quot;: [ # The bounding polygon vertices.
381 { # A vertex represents a 2D point in the image.
382 # NOTE: the vertex coordinates are in the same scale as the original image.
383 &quot;y&quot;: 42, # Y coordinate.
384 &quot;x&quot;: 42, # X coordinate.
385 },
386 ],
387 },
388 &quot;results&quot;: [ # List of results, one for each product match.
389 { # Information about a product.
390 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
391 # to the query.
392 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
393 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
394 # constraints can be specified based on the product_labels.
395 #
396 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
397 # strings with integer values can match a range-based restriction which is
398 # to be supported soon.
399 #
400 # Multiple values can be assigned to the same key. One product may have up to
401 # 500 product_labels.
402 #
403 # Notice that the total number of distinct product_labels over all products
404 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
405 # will refuse to work for that ProductSet.
406 { # A product label represented as a key-value pair.
407 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
408 # cannot exceed 128 bytes.
409 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
410 # exceed 128 bytes.
411 },
412 ],
413 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
414 #
415 # Format is:
416 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
417 #
418 # This field is ignored when creating a product.
419 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
420 # 4096 characters long.
421 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
422 # characters long.
423 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
424 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
425 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
426 # not be used for new products.
427 },
428 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
429 # 1 (full confidence).
430 },
431 ],
432 },
433 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700434 },
435 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
436 # Note that filled-in image annotations are guaranteed to be
437 # correct, even when `error` is set.
438 # different programming environments, including REST APIs and RPC APIs. It is
439 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
440 # three pieces of data: error code, error message, and error details.
441 #
442 # You can find out more about this error model and how to work with it in the
443 # [API Design Guide](https://cloud.google.com/apis/design/errors).
444 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
445 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
446 # user-facing error message should be localized and sent in the
447 # google.rpc.Status.details field, or localized by the client.
448 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
449 # message types for APIs to use.
450 {
451 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
452 },
453 ],
454 },
455 &quot;fullTextAnnotation&quot;: { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700456 # completed successfully.
457 # This annotation provides the structural hierarchy for the OCR detected
458 # text.
459 # The hierarchy of an OCR extracted text structure is like this:
Dan O'Mearadd494642020-05-01 07:42:23 -0700460 # TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700461 # Each structural component, starting from Page, may further have their own
462 # properties. Properties describe detected languages, breaks etc.. Please refer
463 # to the TextAnnotation.TextProperty message definition below for more
464 # detail.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700465 &quot;text&quot;: &quot;A String&quot;, # UTF-8 text detected on the pages.
Bu Sun Kim65020912020-05-20 12:08:20 -0700466 &quot;pages&quot;: [ # List of pages detected by OCR.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700467 { # Detected page from OCR.
Bu Sun Kim65020912020-05-20 12:08:20 -0700468 &quot;width&quot;: 42, # Page width. For PDFs the unit is points. For images (including
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700469 # TIFFs) the unit is pixels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700470 &quot;blocks&quot;: [ # List of blocks of text, images etc on this page.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700471 { # Logical element on the page.
Bu Sun Kim65020912020-05-20 12:08:20 -0700472 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the block.
473 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
474 { # Detected language for a structural component.
475 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
476 # information, see
477 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
478 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
479 },
480 ],
481 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
482 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
483 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
484 },
485 },
486 &quot;blockType&quot;: &quot;A String&quot;, # Detected block type (text, image etc) for this block.
487 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the block.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700488 # The vertices are in the order of top-left, top-right, bottom-right,
489 # bottom-left. When a rotation of the bounding box is detected the rotation
490 # is represented as around the top-left corner as defined when the text is
Bu Sun Kim65020912020-05-20 12:08:20 -0700491 # read in the &#x27;natural&#x27; orientation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700492 # For example:
493 #
494 # * when the text is horizontal it might look like:
495 #
496 # 0----1
497 # | |
498 # 3----2
499 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700500 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700501 #
502 # 2----3
503 # | |
504 # 1----0
505 #
506 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700507 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700508 { # A vertex represents a 2D point in the image.
509 # NOTE: the normalized vertex coordinates are relative to the original image
510 # and range from 0 to 1.
Bu Sun Kim65020912020-05-20 12:08:20 -0700511 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700512 &quot;y&quot;: 3.14, # Y coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700513 },
514 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700515 &quot;vertices&quot;: [ # The bounding polygon vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700516 { # A vertex represents a 2D point in the image.
517 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700518 &quot;y&quot;: 42, # Y coordinate.
519 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700520 },
521 ],
522 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700523 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the block. Range [0, 1].
524 &quot;paragraphs&quot;: [ # List of paragraphs in this block (if this blocks is of type text).
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700525 { # Structural unit of text representing a number of words in certain order.
Bu Sun Kim65020912020-05-20 12:08:20 -0700526 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700527 # The vertices are in the order of top-left, top-right, bottom-right,
528 # bottom-left. When a rotation of the bounding box is detected the rotation
529 # is represented as around the top-left corner as defined when the text is
Bu Sun Kim65020912020-05-20 12:08:20 -0700530 # read in the &#x27;natural&#x27; orientation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700531 # For example:
532 # * when the text is horizontal it might look like:
533 # 0----1
534 # | |
535 # 3----2
Bu Sun Kim65020912020-05-20 12:08:20 -0700536 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700537 # 2----3
538 # | |
539 # 1----0
540 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700541 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700542 { # A vertex represents a 2D point in the image.
543 # NOTE: the normalized vertex coordinates are relative to the original image
544 # and range from 0 to 1.
Bu Sun Kim65020912020-05-20 12:08:20 -0700545 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700546 &quot;y&quot;: 3.14, # Y coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700547 },
548 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700549 &quot;vertices&quot;: [ # The bounding polygon vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700550 { # A vertex represents a 2D point in the image.
551 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700552 &quot;y&quot;: 42, # Y coordinate.
553 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700554 },
555 ],
556 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700557 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
558 &quot;words&quot;: [ # List of all words in this paragraph.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700559 { # A word representation.
Bu Sun Kim65020912020-05-20 12:08:20 -0700560 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the word.
Dan O'Mearadd494642020-05-01 07:42:23 -0700561 # The vertices are in the order of top-left, top-right, bottom-right,
562 # bottom-left. When a rotation of the bounding box is detected the rotation
563 # is represented as around the top-left corner as defined when the text is
Bu Sun Kim65020912020-05-20 12:08:20 -0700564 # read in the &#x27;natural&#x27; orientation.
Dan O'Mearadd494642020-05-01 07:42:23 -0700565 # For example:
566 # * when the text is horizontal it might look like:
567 # 0----1
568 # | |
569 # 3----2
Bu Sun Kim65020912020-05-20 12:08:20 -0700570 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
Dan O'Mearadd494642020-05-01 07:42:23 -0700571 # 2----3
572 # | |
573 # 1----0
574 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700575 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
Dan O'Mearadd494642020-05-01 07:42:23 -0700576 { # A vertex represents a 2D point in the image.
577 # NOTE: the normalized vertex coordinates are relative to the original image
578 # and range from 0 to 1.
Bu Sun Kim65020912020-05-20 12:08:20 -0700579 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700580 &quot;y&quot;: 3.14, # Y coordinate.
Dan O'Mearadd494642020-05-01 07:42:23 -0700581 },
582 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700583 &quot;vertices&quot;: [ # The bounding polygon vertices.
Dan O'Mearadd494642020-05-01 07:42:23 -0700584 { # A vertex represents a 2D point in the image.
585 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700586 &quot;y&quot;: 42, # Y coordinate.
587 &quot;x&quot;: 42, # X coordinate.
Dan O'Mearadd494642020-05-01 07:42:23 -0700588 },
589 ],
590 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700591 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the word. Range [0, 1].
592 &quot;symbols&quot;: [ # List of symbols in the word.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700593 # The order of the symbols follows the natural reading order.
594 { # A single symbol representation.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700595 &quot;text&quot;: &quot;A String&quot;, # The actual UTF-8 representation of the symbol.
596 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the symbol.
597 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
598 { # Detected language for a structural component.
599 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
600 # information, see
601 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
602 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
603 },
604 ],
605 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
606 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
607 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
608 },
609 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700610 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700611 # The vertices are in the order of top-left, top-right, bottom-right,
612 # bottom-left. When a rotation of the bounding box is detected the rotation
613 # is represented as around the top-left corner as defined when the text is
Bu Sun Kim65020912020-05-20 12:08:20 -0700614 # read in the &#x27;natural&#x27; orientation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700615 # For example:
616 # * when the text is horizontal it might look like:
617 # 0----1
618 # | |
619 # 3----2
Bu Sun Kim65020912020-05-20 12:08:20 -0700620 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700621 # 2----3
622 # | |
623 # 1----0
Dan O'Mearadd494642020-05-01 07:42:23 -0700624 # and the vertex order will still be (0, 1, 2, 3).
Bu Sun Kim65020912020-05-20 12:08:20 -0700625 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700626 { # A vertex represents a 2D point in the image.
627 # NOTE: the normalized vertex coordinates are relative to the original image
628 # and range from 0 to 1.
Bu Sun Kim65020912020-05-20 12:08:20 -0700629 &quot;x&quot;: 3.14, # X coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700630 &quot;y&quot;: 3.14, # Y coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700631 },
632 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700633 &quot;vertices&quot;: [ # The bounding polygon vertices.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700634 { # A vertex represents a 2D point in the image.
635 # NOTE: the vertex coordinates are in the same scale as the original image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700636 &quot;y&quot;: 42, # Y coordinate.
637 &quot;x&quot;: 42, # X coordinate.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700638 },
639 ],
640 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700641 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700642 },
643 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700644 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the word.
645 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
646 { # Detected language for a structural component.
647 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
648 # information, see
649 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
650 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
651 },
652 ],
653 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
654 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
655 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
656 },
657 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700658 },
659 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700660 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the paragraph.
661 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
662 { # Detected language for a structural component.
663 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
664 # information, see
665 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
666 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
667 },
668 ],
669 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
670 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
671 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
672 },
673 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700674 },
675 ],
676 },
677 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700678 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected on the page.
679 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
680 { # Detected language for a structural component.
681 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
682 # information, see
683 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
684 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
685 },
686 ],
687 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
688 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
689 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
690 },
691 },
692 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the page. Range [0, 1].
693 &quot;height&quot;: 42, # Page height. For PDFs the unit is points. For images (including
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700694 # TIFFs) the unit is pixels.
695 },
696 ],
697 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700698 &quot;textAnnotations&quot;: [ # If present, text (OCR) detection has completed successfully.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700699 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700700 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
701 # `description` is expressed.
702 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
703 # for `LABEL_DETECTION` features.
704 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
705 { # A vertex represents a 2D point in the image.
706 # NOTE: the normalized vertex coordinates are relative to the original image
707 # and range from 0 to 1.
708 &quot;x&quot;: 3.14, # X coordinate.
709 &quot;y&quot;: 3.14, # Y coordinate.
710 },
711 ],
712 &quot;vertices&quot;: [ # The bounding polygon vertices.
713 { # A vertex represents a 2D point in the image.
714 # NOTE: the vertex coordinates are in the same scale as the original image.
715 &quot;y&quot;: 42, # Y coordinate.
716 &quot;x&quot;: 42, # X coordinate.
717 },
718 ],
719 },
720 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
721 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
722 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
723 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
724 # detected distant towering building, even though the confidence that
725 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700726 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
727 # fields, such a score or string that qualifies the entity.
728 { # A `Property` consists of a user-supplied name/value pair.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700729 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
Bu Sun Kim65020912020-05-20 12:08:20 -0700730 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
731 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
Bu Sun Kim65020912020-05-20 12:08:20 -0700732 },
733 ],
734 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
735 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700736 # `LocationInfo` elements can be present because one location may
737 # indicate the location of the scene in the image, and another location
738 # may indicate the location of the place where the image was taken.
739 # Location information is usually present for landmarks.
740 { # Detected entity location information.
Bu Sun Kim65020912020-05-20 12:08:20 -0700741 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700742 # of doubles representing degrees latitude and degrees longitude. Unless
743 # specified otherwise, this must conform to the
Bu Sun Kim65020912020-05-20 12:08:20 -0700744 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
Dan O'Mearadd494642020-05-01 07:42:23 -0700745 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim65020912020-05-20 12:08:20 -0700746 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
747 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700748 },
749 },
750 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700751 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
752 # [Google Knowledge Graph Search
753 # API](https://developers.google.com/knowledge-graph/).
754 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
755 # The accuracy of the entity detection in an image.
756 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
757 # this field represents the confidence that there is a tower in the query
758 # image. Range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700759 },
760 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700761 &quot;imagePropertiesAnnotation&quot;: { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
762 &quot;dominantColors&quot;: { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
763 &quot;colors&quot;: [ # RGB color values with their score and pixel fraction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700764 { # Color information consists of RGB channels, score, and the fraction of
765 # the image that the color occupies in the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700766 &quot;pixelFraction&quot;: 3.14, # The fraction of pixels the color occupies in the image.
767 # Value in range [0, 1].
768 &quot;color&quot;: { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700769 # for simplicity of conversion to/from color representations in various
770 # languages over compactness; for example, the fields of this representation
Bu Sun Kim65020912020-05-20 12:08:20 -0700771 # can be trivially provided to the constructor of &quot;java.awt.Color&quot; in Java; it
772 # can also be trivially provided to UIColor&#x27;s &quot;+colorWithRed:green:blue:alpha&quot;
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700773 # method in iOS; and, with just a little work, it can be easily formatted into
Bu Sun Kim65020912020-05-20 12:08:20 -0700774 # a CSS &quot;rgba()&quot; string in JavaScript, as well.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700775 #
776 # Note: this proto does not carry information about the absolute color space
777 # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
778 # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
779 # space.
780 #
781 # Example (Java):
782 #
783 # import com.google.type.Color;
784 #
785 # // ...
786 # public static java.awt.Color fromProto(Color protocolor) {
787 # float alpha = protocolor.hasAlpha()
788 # ? protocolor.getAlpha().getValue()
789 # : 1.0;
790 #
791 # return new java.awt.Color(
792 # protocolor.getRed(),
793 # protocolor.getGreen(),
794 # protocolor.getBlue(),
795 # alpha);
796 # }
797 #
798 # public static Color toProto(java.awt.Color color) {
799 # float red = (float) color.getRed();
800 # float green = (float) color.getGreen();
801 # float blue = (float) color.getBlue();
802 # float denominator = 255.0;
803 # Color.Builder resultBuilder =
804 # Color
805 # .newBuilder()
806 # .setRed(red / denominator)
807 # .setGreen(green / denominator)
808 # .setBlue(blue / denominator);
809 # int alpha = color.getAlpha();
810 # if (alpha != 255) {
811 # result.setAlpha(
812 # FloatValue
813 # .newBuilder()
814 # .setValue(((float) alpha) / denominator)
815 # .build());
816 # }
817 # return resultBuilder.build();
818 # }
819 # // ...
820 #
821 # Example (iOS / Obj-C):
822 #
823 # // ...
824 # static UIColor* fromProto(Color* protocolor) {
825 # float red = [protocolor red];
826 # float green = [protocolor green];
827 # float blue = [protocolor blue];
828 # FloatValue* alpha_wrapper = [protocolor alpha];
829 # float alpha = 1.0;
830 # if (alpha_wrapper != nil) {
831 # alpha = [alpha_wrapper value];
832 # }
833 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
834 # }
835 #
836 # static Color* toProto(UIColor* color) {
837 # CGFloat red, green, blue, alpha;
Dan O'Mearadd494642020-05-01 07:42:23 -0700838 # if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700839 # return nil;
840 # }
841 # Color* result = [[Color alloc] init];
842 # [result setRed:red];
843 # [result setGreen:green];
844 # [result setBlue:blue];
Dan O'Mearadd494642020-05-01 07:42:23 -0700845 # if (alpha &lt;= 0.9999) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700846 # [result setAlpha:floatWrapperWithValue(alpha)];
847 # }
848 # [result autorelease];
849 # return result;
850 # }
851 # // ...
852 #
853 # Example (JavaScript):
854 #
855 # // ...
856 #
857 # var protoToCssColor = function(rgb_color) {
858 # var redFrac = rgb_color.red || 0.0;
859 # var greenFrac = rgb_color.green || 0.0;
860 # var blueFrac = rgb_color.blue || 0.0;
861 # var red = Math.floor(redFrac * 255);
862 # var green = Math.floor(greenFrac * 255);
863 # var blue = Math.floor(blueFrac * 255);
864 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700865 # if (!(&#x27;alpha&#x27; in rgb_color)) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700866 # return rgbToCssColor_(red, green, blue);
867 # }
868 #
869 # var alphaFrac = rgb_color.alpha.value || 0.0;
Bu Sun Kim65020912020-05-20 12:08:20 -0700870 # var rgbParams = [red, green, blue].join(&#x27;,&#x27;);
871 # return [&#x27;rgba(&#x27;, rgbParams, &#x27;,&#x27;, alphaFrac, &#x27;)&#x27;].join(&#x27;&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700872 # };
873 #
874 # var rgbToCssColor_ = function(red, green, blue) {
Dan O'Mearadd494642020-05-01 07:42:23 -0700875 # var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700876 # var hexString = rgbNumber.toString(16);
877 # var missingZeros = 6 - hexString.length;
Bu Sun Kim65020912020-05-20 12:08:20 -0700878 # var resultBuilder = [&#x27;#&#x27;];
Dan O'Mearadd494642020-05-01 07:42:23 -0700879 # for (var i = 0; i &lt; missingZeros; i++) {
Bu Sun Kim65020912020-05-20 12:08:20 -0700880 # resultBuilder.push(&#x27;0&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700881 # }
882 # resultBuilder.push(hexString);
Bu Sun Kim65020912020-05-20 12:08:20 -0700883 # return resultBuilder.join(&#x27;&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700884 # };
885 #
886 # // ...
Bu Sun Kim65020912020-05-20 12:08:20 -0700887 &quot;green&quot;: 3.14, # The amount of green in the color as a value in the interval [0, 1].
888 &quot;blue&quot;: 3.14, # The amount of blue in the color as a value in the interval [0, 1].
889 &quot;alpha&quot;: 3.14, # The fraction of this color that should be applied to the pixel. That is,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700890 # the final pixel color is defined by the equation:
891 #
892 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
893 #
894 # This means that a value of 1.0 corresponds to a solid color, whereas
895 # a value of 0.0 corresponds to a completely transparent color. This
896 # uses a wrapper message rather than a simple float scalar so that it is
897 # possible to distinguish between a default value and the value being unset.
898 # If omitted, this color object is to be rendered as a solid color
899 # (as if the alpha value had been explicitly given with a value of 1.0).
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700900 &quot;red&quot;: 3.14, # The amount of red in the color as a value in the interval [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700901 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700902 &quot;score&quot;: 3.14, # Image-specific score for this color. Value in range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700903 },
904 ],
905 },
906 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700907 &quot;logoAnnotations&quot;: [ # If present, logo detection has completed successfully.
908 { # Set of detected entity features.
909 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
910 # `description` is expressed.
911 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
912 # for `LABEL_DETECTION` features.
913 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
914 { # A vertex represents a 2D point in the image.
915 # NOTE: the normalized vertex coordinates are relative to the original image
916 # and range from 0 to 1.
917 &quot;x&quot;: 3.14, # X coordinate.
918 &quot;y&quot;: 3.14, # Y coordinate.
919 },
920 ],
921 &quot;vertices&quot;: [ # The bounding polygon vertices.
922 { # A vertex represents a 2D point in the image.
923 # NOTE: the vertex coordinates are in the same scale as the original image.
924 &quot;y&quot;: 42, # Y coordinate.
925 &quot;x&quot;: 42, # X coordinate.
926 },
927 ],
928 },
929 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
930 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
931 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
932 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
933 # detected distant towering building, even though the confidence that
934 # there is a tower in each image may be the same. Range [0, 1].
935 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
936 # fields, such a score or string that qualifies the entity.
937 { # A `Property` consists of a user-supplied name/value pair.
938 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
939 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
940 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
941 },
942 ],
943 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
944 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
945 # `LocationInfo` elements can be present because one location may
946 # indicate the location of the scene in the image, and another location
947 # may indicate the location of the place where the image was taken.
948 # Location information is usually present for landmarks.
949 { # Detected entity location information.
950 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
951 # of doubles representing degrees latitude and degrees longitude. Unless
952 # specified otherwise, this must conform to the
953 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
954 # standard&lt;/a&gt;. Values must be within normalized ranges.
955 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
956 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
957 },
958 },
959 ],
960 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
961 # [Google Knowledge Graph Search
962 # API](https://developers.google.com/knowledge-graph/).
963 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
964 # The accuracy of the entity detection in an image.
965 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
966 # this field represents the confidence that there is a tower in the query
967 # image. Range [0, 1].
968 },
969 ],
970 &quot;context&quot;: { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
971 # comes from.
972 # information about the source of that image.
973 &quot;uri&quot;: &quot;A String&quot;, # The URI of the file used to produce the image.
974 &quot;pageNumber&quot;: 42, # If the file was a PDF or TIFF, this field gives the page number within
975 # the file used to produce the image.
976 },
977 &quot;webDetection&quot;: { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
978 &quot;visuallySimilarImages&quot;: [ # The visually similar image results.
979 { # Metadata for online images.
980 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
981 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
982 },
983 ],
984 &quot;bestGuessLabels&quot;: [ # The service&#x27;s best guess as to the topic of the request image.
985 # Inferred from similar images on the open web.
986 { # Label to provide extra metadata for the web detection.
987 &quot;label&quot;: &quot;A String&quot;, # Label for extra metadata.
988 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code for `label`, such as &quot;en-US&quot; or &quot;sr-Latn&quot;.
989 # For more information, see
990 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
991 },
992 ],
993 &quot;fullMatchingImages&quot;: [ # Fully matching images from the Internet.
994 # Can include resized copies of the query image.
995 { # Metadata for online images.
996 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
997 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
998 },
999 ],
1000 &quot;webEntities&quot;: [ # Deduced entities from similar images on the Internet.
1001 { # Entity deduced from similar images on the Internet.
1002 &quot;entityId&quot;: &quot;A String&quot;, # Opaque entity ID.
1003 &quot;description&quot;: &quot;A String&quot;, # Canonical description of the entity, in English.
1004 &quot;score&quot;: 3.14, # Overall relevancy score for the entity.
1005 # Not normalized and not comparable across different image queries.
1006 },
1007 ],
1008 &quot;pagesWithMatchingImages&quot;: [ # Web pages containing the matching images from the Internet.
1009 { # Metadata for web pages.
1010 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the web page.
1011 &quot;partialMatchingImages&quot;: [ # Partial matching images on the page.
1012 # Those images are similar enough to share some key-point features. For
1013 # example an original image will likely have partial matching for its
1014 # crops.
1015 { # Metadata for online images.
1016 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
1017 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
1018 },
1019 ],
1020 &quot;url&quot;: &quot;A String&quot;, # The result web page URL.
1021 &quot;pageTitle&quot;: &quot;A String&quot;, # Title for the web page, may contain HTML markups.
1022 &quot;fullMatchingImages&quot;: [ # Fully matching images on the page.
1023 # Can include resized copies of the query image.
1024 { # Metadata for online images.
1025 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
1026 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
1027 },
1028 ],
1029 },
1030 ],
1031 &quot;partialMatchingImages&quot;: [ # Partial matching images from the Internet.
1032 # Those images are similar enough to share some key-point features. For
1033 # example an original image will likely have partial matching for its crops.
1034 { # Metadata for online images.
1035 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
1036 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
1037 },
1038 ],
1039 },
1040 &quot;safeSearchAnnotation&quot;: { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
1041 # methods over safe-search verticals (for example, adult, spoof, medical,
1042 # violence).
1043 &quot;adult&quot;: &quot;A String&quot;, # Represents the adult content likelihood for the image. Adult content may
1044 # contain elements such as nudity, pornographic images or cartoons, or
1045 # sexual activities.
1046 &quot;spoof&quot;: &quot;A String&quot;, # Spoof likelihood. The likelihood that an modification
1047 # was made to the image&#x27;s canonical version to make it appear
1048 # funny or offensive.
1049 &quot;medical&quot;: &quot;A String&quot;, # Likelihood that this is a medical image.
1050 &quot;racy&quot;: &quot;A String&quot;, # Likelihood that the request image contains racy content. Racy content may
1051 # include (but is not limited to) skimpy or sheer clothing, strategically
1052 # covered nudity, lewd or provocative poses, or close-ups of sensitive
1053 # body areas.
1054 &quot;violence&quot;: &quot;A String&quot;, # Likelihood that this image contains violent content.
1055 },
1056 &quot;landmarkAnnotations&quot;: [ # If present, landmark detection has completed successfully.
1057 { # Set of detected entity features.
1058 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1059 # `description` is expressed.
1060 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1061 # for `LABEL_DETECTION` features.
1062 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1063 { # A vertex represents a 2D point in the image.
1064 # NOTE: the normalized vertex coordinates are relative to the original image
1065 # and range from 0 to 1.
1066 &quot;x&quot;: 3.14, # X coordinate.
1067 &quot;y&quot;: 3.14, # Y coordinate.
1068 },
1069 ],
1070 &quot;vertices&quot;: [ # The bounding polygon vertices.
1071 { # A vertex represents a 2D point in the image.
1072 # NOTE: the vertex coordinates are in the same scale as the original image.
1073 &quot;y&quot;: 42, # Y coordinate.
1074 &quot;x&quot;: 42, # X coordinate.
1075 },
1076 ],
1077 },
1078 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1079 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1080 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1081 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1082 # detected distant towering building, even though the confidence that
1083 # there is a tower in each image may be the same. Range [0, 1].
1084 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1085 # fields, such a score or string that qualifies the entity.
1086 { # A `Property` consists of a user-supplied name/value pair.
1087 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1088 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1089 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1090 },
1091 ],
1092 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1093 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1094 # `LocationInfo` elements can be present because one location may
1095 # indicate the location of the scene in the image, and another location
1096 # may indicate the location of the place where the image was taken.
1097 # Location information is usually present for landmarks.
1098 { # Detected entity location information.
1099 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1100 # of doubles representing degrees latitude and degrees longitude. Unless
1101 # specified otherwise, this must conform to the
1102 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1103 # standard&lt;/a&gt;. Values must be within normalized ranges.
1104 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1105 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1106 },
1107 },
1108 ],
1109 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1110 # [Google Knowledge Graph Search
1111 # API](https://developers.google.com/knowledge-graph/).
1112 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1113 # The accuracy of the entity detection in an image.
1114 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1115 # this field represents the confidence that there is a tower in the query
1116 # image. Range [0, 1].
1117 },
1118 ],
1119 &quot;faceAnnotations&quot;: [ # If present, face detection has completed successfully.
1120 { # A face annotation object contains the results of face detection.
1121 &quot;underExposedLikelihood&quot;: &quot;A String&quot;, # Under-exposed likelihood.
1122 &quot;panAngle&quot;: 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
1123 # pointing relative to the vertical plane perpendicular to the image. Range
1124 # [-180,180].
1125 &quot;detectionConfidence&quot;: 3.14, # Detection confidence. Range [0, 1].
1126 &quot;blurredLikelihood&quot;: &quot;A String&quot;, # Blurred likelihood.
1127 &quot;headwearLikelihood&quot;: &quot;A String&quot;, # Headwear likelihood.
1128 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
1129 # are in the original image&#x27;s scale.
1130 # The bounding box is computed to &quot;frame&quot; the face in accordance with human
1131 # expectations. It is based on the landmarker results.
1132 # Note that one or more x and/or y coordinates may not be generated in the
1133 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
1134 # appears in the image to be annotated.
1135 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1136 { # A vertex represents a 2D point in the image.
1137 # NOTE: the normalized vertex coordinates are relative to the original image
1138 # and range from 0 to 1.
1139 &quot;x&quot;: 3.14, # X coordinate.
1140 &quot;y&quot;: 3.14, # Y coordinate.
1141 },
1142 ],
1143 &quot;vertices&quot;: [ # The bounding polygon vertices.
1144 { # A vertex represents a 2D point in the image.
1145 # NOTE: the vertex coordinates are in the same scale as the original image.
1146 &quot;y&quot;: 42, # Y coordinate.
1147 &quot;x&quot;: 42, # X coordinate.
1148 },
1149 ],
1150 },
1151 &quot;rollAngle&quot;: 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
1152 # of the face relative to the image vertical about the axis perpendicular to
1153 # the face. Range [-180,180].
1154 &quot;sorrowLikelihood&quot;: &quot;A String&quot;, # Sorrow likelihood.
1155 &quot;tiltAngle&quot;: 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
1156 # pointing relative to the image&#x27;s horizontal plane. Range [-180,180].
1157 &quot;fdBoundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
1158 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
1159 # is used to eliminate the face from any image analysis that detects the
1160 # &quot;amount of skin&quot; visible in an image. It is not based on the
1161 # landmarker results, only on the initial face detection, hence
1162 # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
1163 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1164 { # A vertex represents a 2D point in the image.
1165 # NOTE: the normalized vertex coordinates are relative to the original image
1166 # and range from 0 to 1.
1167 &quot;x&quot;: 3.14, # X coordinate.
1168 &quot;y&quot;: 3.14, # Y coordinate.
1169 },
1170 ],
1171 &quot;vertices&quot;: [ # The bounding polygon vertices.
1172 { # A vertex represents a 2D point in the image.
1173 # NOTE: the vertex coordinates are in the same scale as the original image.
1174 &quot;y&quot;: 42, # Y coordinate.
1175 &quot;x&quot;: 42, # X coordinate.
1176 },
1177 ],
1178 },
1179 &quot;angerLikelihood&quot;: &quot;A String&quot;, # Anger likelihood.
1180 &quot;landmarks&quot;: [ # Detected face landmarks.
1181 { # A face-specific landmark (for example, a face feature).
1182 &quot;type&quot;: &quot;A String&quot;, # Face landmark type.
1183 &quot;position&quot;: { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
1184 # A valid Position must have both x and y coordinates.
1185 # The position coordinates are in the same scale as the original image.
1186 &quot;x&quot;: 3.14, # X coordinate.
1187 &quot;z&quot;: 3.14, # Z coordinate (or depth).
1188 &quot;y&quot;: 3.14, # Y coordinate.
1189 },
1190 },
1191 ],
1192 &quot;surpriseLikelihood&quot;: &quot;A String&quot;, # Surprise likelihood.
1193 &quot;landmarkingConfidence&quot;: 3.14, # Face landmarking confidence. Range [0, 1].
1194 &quot;joyLikelihood&quot;: &quot;A String&quot;, # Joy likelihood.
1195 },
1196 ],
1197 &quot;cropHintsAnnotation&quot;: { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
1198 &quot;cropHints&quot;: [ # Crop hint results.
1199 { # Single crop hint that is used to generate a new crop when serving an image.
1200 &quot;confidence&quot;: 3.14, # Confidence of this being a salient region. Range [0, 1].
1201 &quot;importanceFraction&quot;: 3.14, # Fraction of importance of this salient region with respect to the original
1202 # image.
1203 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
1204 # box are in the original image&#x27;s scale.
1205 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1206 { # A vertex represents a 2D point in the image.
1207 # NOTE: the normalized vertex coordinates are relative to the original image
1208 # and range from 0 to 1.
1209 &quot;x&quot;: 3.14, # X coordinate.
1210 &quot;y&quot;: 3.14, # Y coordinate.
1211 },
1212 ],
1213 &quot;vertices&quot;: [ # The bounding polygon vertices.
1214 { # A vertex represents a 2D point in the image.
1215 # NOTE: the vertex coordinates are in the same scale as the original image.
1216 &quot;y&quot;: 42, # Y coordinate.
1217 &quot;x&quot;: 42, # X coordinate.
1218 },
1219 ],
1220 },
1221 },
1222 ],
1223 },
1224 &quot;labelAnnotations&quot;: [ # If present, label detection has completed successfully.
1225 { # Set of detected entity features.
1226 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1227 # `description` is expressed.
1228 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1229 # for `LABEL_DETECTION` features.
1230 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1231 { # A vertex represents a 2D point in the image.
1232 # NOTE: the normalized vertex coordinates are relative to the original image
1233 # and range from 0 to 1.
1234 &quot;x&quot;: 3.14, # X coordinate.
1235 &quot;y&quot;: 3.14, # Y coordinate.
1236 },
1237 ],
1238 &quot;vertices&quot;: [ # The bounding polygon vertices.
1239 { # A vertex represents a 2D point in the image.
1240 # NOTE: the vertex coordinates are in the same scale as the original image.
1241 &quot;y&quot;: 42, # Y coordinate.
1242 &quot;x&quot;: 42, # X coordinate.
1243 },
1244 ],
1245 },
1246 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1247 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1248 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1249 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1250 # detected distant towering building, even though the confidence that
1251 # there is a tower in each image may be the same. Range [0, 1].
1252 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1253 # fields, such a score or string that qualifies the entity.
1254 { # A `Property` consists of a user-supplied name/value pair.
1255 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1256 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1257 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1258 },
1259 ],
1260 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1261 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1262 # `LocationInfo` elements can be present because one location may
1263 # indicate the location of the scene in the image, and another location
1264 # may indicate the location of the place where the image was taken.
1265 # Location information is usually present for landmarks.
1266 { # Detected entity location information.
1267 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1268 # of doubles representing degrees latitude and degrees longitude. Unless
1269 # specified otherwise, this must conform to the
1270 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1271 # standard&lt;/a&gt;. Values must be within normalized ranges.
1272 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1273 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1274 },
1275 },
1276 ],
1277 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1278 # [Google Knowledge Graph Search
1279 # API](https://developers.google.com/knowledge-graph/).
1280 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1281 # The accuracy of the entity detection in an image.
1282 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1283 # this field represents the confidence that there is a tower in the query
1284 # image. Range [0, 1].
1285 },
1286 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001287 },
1288 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001289 &quot;inputConfig&quot;: { # The desired input location and metadata. # Information about the file for which this response is generated.
1290 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
1291 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
1292 # Google Cloud Storage object. Wildcards are not currently supported.
1293 },
1294 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
1295 # &quot;image/gif&quot; are supported. Wildcards are not supported.
1296 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
1297 # Note: As with all `bytes` fields, protobuffers use a pure binary
1298 # representation, whereas JSON representations use base64.
1299 #
1300 # Currently, this field only works for BatchAnnotateFiles requests. It does
1301 # not work for AsyncBatchAnnotateFiles requests.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001302 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001303 &quot;totalPages&quot;: 42, # This field gives the total number of pages in the file.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001304 },
1305 ],
1306 }</pre>
1307</div>
1308
1309<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07001310 <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001311 <pre>Run asynchronous image detection and annotation for a list of generic
1312files, such as PDF files, which may contain multiple pages and multiple
1313images per page. Progress and results can be retrieved through the
1314`google.longrunning.Operations` interface.
1315`Operation.metadata` contains `OperationMetadata` (metadata).
1316`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
1317
1318Args:
Dan O'Mearadd494642020-05-01 07:42:23 -07001319 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001320 The object takes the form of:
1321
1322{ # Multiple async file annotation requests are batched into a single service
1323 # call.
Bu Sun Kim65020912020-05-20 12:08:20 -07001324 &quot;requests&quot;: [ # Required. Individual async file annotation requests for this batch.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001325 { # An offline file annotation request.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001326 &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
1327 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
1328 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
1329 # Google Cloud Storage object. Wildcards are not currently supported.
1330 },
1331 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
1332 # &quot;image/gif&quot; are supported. Wildcards are not supported.
1333 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
1334 # Note: As with all `bytes` fields, protobuffers use a pure binary
1335 # representation, whereas JSON representations use base64.
1336 #
1337 # Currently, this field only works for BatchAnnotateFiles requests. It does
1338 # not work for AsyncBatchAnnotateFiles requests.
1339 },
1340 &quot;features&quot;: [ # Required. Requested features.
1341 { # The type of Google Cloud Vision API detection to perform, and the maximum
1342 # number of results to return for that type. Multiple `Feature` objects can
1343 # be specified in the `features` list.
1344 &quot;type&quot;: &quot;A String&quot;, # The feature type.
1345 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
1346 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1347 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
1348 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
1349 # &quot;builtin/latest&quot;.
1350 },
1351 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001352 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
Bu Sun Kim65020912020-05-20 12:08:20 -07001353 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1354 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001355 # of the image. For example, if the desired aspect ratio is 4/3, the
1356 # corresponding float value should be 1.33333. If not specified, the
1357 # best possible crop is returned. The number of provided aspect ratios is
1358 # limited to a maximum of 16; any aspect ratios provided after the 16th are
1359 # ignored.
1360 3.14,
1361 ],
1362 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001363 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
Bu Sun Kim65020912020-05-20 12:08:20 -07001364 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
1365 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
1366 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
1367 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
1368 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
1369 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
1370 # migrate existing products to these categories as well.
1371 &quot;A String&quot;,
1372 ],
1373 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
1374 # on Product labels. We currently support an AND of OR of key-value
1375 # expressions, where each expression within an OR must have the same key. An
1376 # &#x27;=&#x27; should be used to connect the key and value.
1377 #
1378 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
1379 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
1380 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001381 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
1382 #
1383 # Format is:
1384 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1385 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
1386 # If it is not specified, system discretion will be applied.
1387 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1388 { # A vertex represents a 2D point in the image.
1389 # NOTE: the normalized vertex coordinates are relative to the original image
1390 # and range from 0 to 1.
1391 &quot;x&quot;: 3.14, # X coordinate.
1392 &quot;y&quot;: 3.14, # Y coordinate.
1393 },
1394 ],
1395 &quot;vertices&quot;: [ # The bounding polygon vertices.
1396 { # A vertex represents a 2D point in the image.
1397 # NOTE: the vertex coordinates are in the same scale as the original image.
1398 &quot;y&quot;: 42, # Y coordinate.
1399 &quot;x&quot;: 42, # X coordinate.
1400 },
1401 ],
1402 },
1403 },
1404 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1405 # yields the best results since it enables automatic language detection. For
1406 # languages based on the Latin alphabet, setting `language_hints` is not
1407 # needed. In rare cases, when the language of the text in the image is known,
1408 # setting a hint will help get better results (although it will be a
1409 # significant hindrance if the hint is wrong). Text detection returns an
1410 # error if one or more of the specified languages is not one of the
1411 # [supported languages](https://cloud.google.com/vision/docs/languages).
1412 &quot;A String&quot;,
1413 ],
1414 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
1415 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
1416 },
1417 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1418 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1419 # of doubles representing degrees latitude and degrees longitude. Unless
1420 # specified otherwise, this must conform to the
1421 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1422 # standard&lt;/a&gt;. Values must be within normalized ranges.
1423 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1424 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1425 },
1426 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1427 # of doubles representing degrees latitude and degrees longitude. Unless
1428 # specified otherwise, this must conform to the
1429 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1430 # standard&lt;/a&gt;. Values must be within normalized ranges.
1431 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1432 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1433 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001434 },
1435 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001436 &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
1437 &quot;gcsDestination&quot;: { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1438 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI prefix where the results will be stored. Results
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001439 # will be in JSON format and preceded by its corresponding input URI prefix.
1440 # This field can either represent a gcs file prefix or gcs directory. In
1441 # either case, the uri should be unique because in order to get all of the
1442 # output files, you will need to do a wildcard gcs search on the uri prefix
1443 # you provide.
1444 #
1445 # Examples:
1446 #
1447 # * File Prefix: gs://bucket-name/here/filenameprefix The output files
1448 # will be created in gs://bucket-name/here/ and the names of the
Bu Sun Kim65020912020-05-20 12:08:20 -07001449 # output files will begin with &quot;filenameprefix&quot;.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001450 #
1451 # * Directory Prefix: gs://bucket-name/some/location/ The output files
1452 # will be created in gs://bucket-name/some/location/ and the names of the
1453 # output files could be anything because there was no filename prefix
1454 # specified.
1455 #
1456 # If multiple outputs, each response is still AnnotateFileResponse, each of
1457 # which contains some subset of the full list of AnnotateImageResponse.
1458 # Multiple outputs can happen if, for example, the output JSON is too large
1459 # and overflows into multiple sharded files.
1460 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001461 &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
1462 # Google Cloud Storage.
1463 # The valid range is [1, 100]. If not specified, the default value is 20.
1464 #
1465 # For example, for one pdf file with 100 pages, 100 response protos will
1466 # be generated. If `batch_size` = 20, then 5 json files each
1467 # containing 20 response protos will be written under the prefix
1468 # `gcs_destination`.`uri`.
1469 #
1470 # Currently, batch_size only applies to GcsDestination, with potential future
1471 # support for other output configurations.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001472 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001473 },
1474 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001475 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
Dan O'Mearadd494642020-05-01 07:42:23 -07001476 #
1477 # Format: `projects/{project-id}/locations/{location-id}`.
1478 #
1479 # If no parent is specified, a region will be chosen automatically.
1480 #
1481 # Supported location-ids:
1482 # `us`: USA country only,
1483 # `asia`: East asia areas, like Japan, Taiwan,
1484 # `eu`: The European Union.
1485 #
1486 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001487 }
1488
1489 x__xgafv: string, V1 error format.
1490 Allowed values
1491 1 - v1 error format
1492 2 - v2 error format
1493
1494Returns:
1495 An object of the form:
1496
1497 { # This resource represents a long-running operation that is the result of a
1498 # network API call.
Bu Sun Kim65020912020-05-20 12:08:20 -07001499 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001500 # different programming environments, including REST APIs and RPC APIs. It is
1501 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1502 # three pieces of data: error code, error message, and error details.
1503 #
1504 # You can find out more about this error model and how to work with it in the
1505 # [API Design Guide](https://cloud.google.com/apis/design/errors).
Bu Sun Kim65020912020-05-20 12:08:20 -07001506 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
1507 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001508 # user-facing error message should be localized and sent in the
1509 # google.rpc.Status.details field, or localized by the client.
Bu Sun Kim65020912020-05-20 12:08:20 -07001510 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001511 # message types for APIs to use.
1512 {
Bu Sun Kim65020912020-05-20 12:08:20 -07001513 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001514 },
1515 ],
1516 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001517 &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically
1518 # contains progress information and common metadata such as create time.
1519 # Some services might not provide such metadata. Any method that returns a
1520 # long-running operation should document the metadata type, if any.
1521 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1522 },
1523 &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001524 # If `true`, the operation is completed, and either `error` or `response` is
1525 # available.
Bu Sun Kim65020912020-05-20 12:08:20 -07001526 &quot;response&quot;: { # The normal response of the operation in case of success. If the original
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001527 # method returns no data on success, such as `Delete`, the response is
1528 # `google.protobuf.Empty`. If the original method is standard
1529 # `Get`/`Create`/`Update`, the response should be the resource. For other
1530 # methods, the response should have the type `XxxResponse`, where `Xxx`
1531 # is the original method name. For example, if the original method name
1532 # is `TakeSnapshot()`, the inferred response type is
1533 # `TakeSnapshotResponse`.
Bu Sun Kim65020912020-05-20 12:08:20 -07001534 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001535 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001536 &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001537 # originally returns it. If you use the default HTTP mapping, the
1538 # `name` should be a resource name ending with `operations/{unique_id}`.
1539 }</pre>
1540</div>
1541
1542</body></html>