blob: 7ce32e5133c54642ccf625043e0e723cd952dc10 [file] [log] [blame]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1p2beta1.html">Cloud Vision API</a> . <a href="vision_v1p2beta1.files.html">files</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070078 <code><a href="#annotate">annotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070079<p class="firstline">Service that performs image detection and annotation for a batch of files.</p>
80<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070081 <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070082<p class="firstline">Run asynchronous image detection and annotation for a list of generic</p>
83<h3>Method Details</h3>
84<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -070085 <code class="details" id="annotate">annotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070086 <pre>Service that performs image detection and annotation for a batch of files.
Bu Sun Kim65020912020-05-20 12:08:20 -070087Now only &quot;application/pdf&quot;, &quot;image/tiff&quot; and &quot;image/gif&quot; are supported.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070088
89This service will extract at most 5 (customers can specify which 5 in
90AnnotateFileRequest.pages) frames (gif) or pages (pdf or tiff) from each
91file provided and perform detection and annotation for each image
92extracted.
93
94Args:
Dan O'Mearadd494642020-05-01 07:42:23 -070095 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070096 The object takes the form of:
97
98{ # A list of requests to annotate files using the BatchAnnotateFiles API.
Bu Sun Kim65020912020-05-20 12:08:20 -070099 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
100 #
101 # Format: `projects/{project-id}/locations/{location-id}`.
102 #
103 # If no parent is specified, a region will be chosen automatically.
104 #
105 # Supported location-ids:
106 # `us`: USA country only,
107 # `asia`: East asia areas, like Japan, Taiwan,
108 # `eu`: The European Union.
109 #
110 # Example: `projects/project-A/locations/eu`.
111 &quot;requests&quot;: [ # Required. The list of file annotation requests. Right now we support only one
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700112 # AnnotateFileRequest in BatchAnnotateFilesRequest.
113 { # A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700114 &quot;features&quot;: [ # Required. Requested features.
115 { # The type of Google Cloud Vision API detection to perform, and the maximum
116 # number of results to return for that type. Multiple `Feature` objects can
117 # be specified in the `features` list.
118 &quot;type&quot;: &quot;A String&quot;, # The feature type.
119 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
120 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
121 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
122 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
123 # &quot;builtin/latest&quot;.
124 },
125 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700126 &quot;pages&quot;: [ # Pages of the file to perform image annotation.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700127 #
128 # Pages starts from 1, we assume the first page of the file is page 1.
129 # At most 5 pages are supported per request. Pages can be negative.
130 #
131 # Page 1 means the first page.
132 # Page 2 means the second page.
133 # Page -1 means the last page.
134 # Page -2 means the second to the last page.
135 #
136 # If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
137 #
138 # If this field is empty, by default the service performs image annotation
139 # for the first 5 pages of the file.
140 42,
141 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700142 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
143 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
144 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
145 # of the image. For example, if the desired aspect ratio is 4/3, the
146 # corresponding float value should be 1.33333. If not specified, the
147 # best possible crop is returned. The number of provided aspect ratios is
148 # limited to a maximum of 16; any aspect ratios provided after the 16th are
149 # ignored.
150 3.14,
151 ],
152 },
153 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
154 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
155 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
156 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
157 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
158 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
159 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
160 # migrate existing products to these categories as well.
161 &quot;A String&quot;,
162 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700163 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
164 # If it is not specified, system discretion will be applied.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700165 &quot;vertices&quot;: [ # The bounding polygon vertices.
166 { # A vertex represents a 2D point in the image.
167 # NOTE: the vertex coordinates are in the same scale as the original image.
168 &quot;x&quot;: 42, # X coordinate.
169 &quot;y&quot;: 42, # Y coordinate.
170 },
171 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700172 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
173 { # A vertex represents a 2D point in the image.
174 # NOTE: the normalized vertex coordinates are relative to the original image
175 # and range from 0 to 1.
176 &quot;x&quot;: 3.14, # X coordinate.
177 &quot;y&quot;: 3.14, # Y coordinate.
178 },
179 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700180 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700181 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
182 #
183 # Format is:
184 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
185 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
186 # on Product labels. We currently support an AND of OR of key-value
187 # expressions, where each expression within an OR must have the same key. An
188 # &#x27;=&#x27; should be used to connect the key and value.
189 #
190 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
191 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
192 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700193 },
194 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
195 # yields the best results since it enables automatic language detection. For
196 # languages based on the Latin alphabet, setting `language_hints` is not
197 # needed. In rare cases, when the language of the text in the image is known,
198 # setting a hint will help get better results (although it will be a
199 # significant hindrance if the hint is wrong). Text detection returns an
200 # error if one or more of the specified languages is not one of the
201 # [supported languages](https://cloud.google.com/vision/docs/languages).
202 &quot;A String&quot;,
203 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700204 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
205 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
206 # of doubles representing degrees latitude and degrees longitude. Unless
207 # specified otherwise, this must conform to the
208 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
209 # standard&lt;/a&gt;. Values must be within normalized ranges.
210 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
211 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
212 },
213 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
214 # of doubles representing degrees latitude and degrees longitude. Unless
215 # specified otherwise, this must conform to the
216 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
217 # standard&lt;/a&gt;. Values must be within normalized ranges.
218 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
219 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
220 },
221 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700222 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
223 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
224 },
225 },
226 &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
227 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
228 # Note: As with all `bytes` fields, protobuffers use a pure binary
229 # representation, whereas JSON representations use base64.
230 #
231 # Currently, this field only works for BatchAnnotateFiles requests. It does
232 # not work for AsyncBatchAnnotateFiles requests.
233 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
234 # &quot;image/gif&quot; are supported. Wildcards are not supported.
235 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
236 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
237 # Google Cloud Storage object. Wildcards are not currently supported.
238 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700239 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700240 },
241 ],
242 }
243
244 x__xgafv: string, V1 error format.
245 Allowed values
246 1 - v1 error format
247 2 - v2 error format
248
249Returns:
250 An object of the form:
251
252 { # A list of file annotation responses.
Bu Sun Kim65020912020-05-20 12:08:20 -0700253 &quot;responses&quot;: [ # The list of file annotation responses, each response corresponding to each
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700254 # AnnotateFileRequest in BatchAnnotateFilesRequest.
255 { # Response to a single file annotation request. A file may contain one or more
256 # images, which individually have their own responses.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700257 &quot;inputConfig&quot;: { # The desired input location and metadata. # Information about the file for which this response is generated.
258 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
259 # Note: As with all `bytes` fields, protobuffers use a pure binary
260 # representation, whereas JSON representations use base64.
261 #
262 # Currently, this field only works for BatchAnnotateFiles requests. It does
263 # not work for AsyncBatchAnnotateFiles requests.
264 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
265 # &quot;image/gif&quot; are supported. Wildcards are not supported.
266 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
267 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
268 # Google Cloud Storage object. Wildcards are not currently supported.
269 },
270 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700271 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
272 # `responses` field will not be set in this case.
273 # different programming environments, including REST APIs and RPC APIs. It is
274 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
275 # three pieces of data: error code, error message, and error details.
276 #
277 # You can find out more about this error model and how to work with it in the
278 # [API Design Guide](https://cloud.google.com/apis/design/errors).
279 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
Bu Sun Kim65020912020-05-20 12:08:20 -0700280 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
281 # message types for APIs to use.
282 {
283 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
284 },
285 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700286 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
287 # user-facing error message should be localized and sent in the
288 # google.rpc.Status.details field, or localized by the client.
Dan O'Mearadd494642020-05-01 07:42:23 -0700289 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700290 &quot;totalPages&quot;: 42, # This field gives the total number of pages in the file.
Bu Sun Kim65020912020-05-20 12:08:20 -0700291 &quot;responses&quot;: [ # Individual responses to images found within the file. This field will be
Dan O'Mearadd494642020-05-01 07:42:23 -0700292 # empty if the `error` field is set.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700293 { # Response to an image annotation request.
Bu Sun Kim65020912020-05-20 12:08:20 -0700294 &quot;productSearchResults&quot;: { # Results for a product search request. # If present, product search has completed successfully.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700295 &quot;indexTime&quot;: &quot;A String&quot;, # Timestamp of the index which provided these results. Products added to the
296 # product set and products removed from the product set after this time are
297 # not reflected in the current results.
Bu Sun Kim65020912020-05-20 12:08:20 -0700298 &quot;results&quot;: [ # List of results, one for each product match.
299 { # Information about a product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700300 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
301 # 1 (full confidence).
Bu Sun Kim65020912020-05-20 12:08:20 -0700302 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
303 # to the query.
304 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700305 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
306 # 4096 characters long.
307 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
308 #
309 # Format is:
310 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
311 #
312 # This field is ignored when creating a product.
313 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
314 # characters long.
315 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
316 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
317 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
318 # not be used for new products.
Bu Sun Kim65020912020-05-20 12:08:20 -0700319 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
320 # constraints can be specified based on the product_labels.
321 #
322 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
323 # strings with integer values can match a range-based restriction which is
324 # to be supported soon.
325 #
326 # Multiple values can be assigned to the same key. One product may have up to
327 # 500 product_labels.
328 #
329 # Notice that the total number of distinct product_labels over all products
330 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
331 # will refuse to work for that ProductSet.
332 { # A product label represented as a key-value pair.
Bu Sun Kim65020912020-05-20 12:08:20 -0700333 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
334 # exceed 128 bytes.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700335 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
336 # cannot exceed 128 bytes.
Bu Sun Kim65020912020-05-20 12:08:20 -0700337 },
338 ],
339 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700340 },
341 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700342 &quot;productGroupedResults&quot;: [ # List of results grouped by products detected in the query image. Each entry
343 # corresponds to one bounding polygon in the query image, and contains the
344 # matching products specific to that region. There may be duplicate product
345 # matches in the union of all the per-product results.
346 { # Information about the products similar to a single product in a query
347 # image.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700348 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700349 &quot;vertices&quot;: [ # The bounding polygon vertices.
350 { # A vertex represents a 2D point in the image.
351 # NOTE: the vertex coordinates are in the same scale as the original image.
352 &quot;x&quot;: 42, # X coordinate.
353 &quot;y&quot;: 42, # Y coordinate.
354 },
355 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700356 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
357 { # A vertex represents a 2D point in the image.
358 # NOTE: the normalized vertex coordinates are relative to the original image
359 # and range from 0 to 1.
360 &quot;x&quot;: 3.14, # X coordinate.
361 &quot;y&quot;: 3.14, # Y coordinate.
362 },
363 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700364 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700365 &quot;objectAnnotations&quot;: [ # List of generic predictions for the object in the bounding box.
366 { # Prediction for what the object in the bounding box is.
367 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
368 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
369 # information, see
370 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
371 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
372 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
373 },
374 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700375 &quot;results&quot;: [ # List of results, one for each product match.
376 { # Information about a product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700377 &quot;score&quot;: 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
378 # 1 (full confidence).
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700379 &quot;image&quot;: &quot;A String&quot;, # The resource name of the image from the product that is the closest match
380 # to the query.
381 &quot;product&quot;: { # A Product contains ReferenceImages. # The Product.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700382 &quot;displayName&quot;: &quot;A String&quot;, # The user-provided name for this Product. Must not be empty. Must be at most
383 # 4096 characters long.
384 &quot;name&quot;: &quot;A String&quot;, # The resource name of the product.
385 #
386 # Format is:
387 # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
388 #
389 # This field is ignored when creating a product.
390 &quot;description&quot;: &quot;A String&quot;, # User-provided metadata to be stored with this product. Must be at most 4096
391 # characters long.
392 &quot;productCategory&quot;: &quot;A String&quot;, # Immutable. The category for the product identified by the reference image. This should
393 # be either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, or &quot;toys-v2&quot;. The legacy categories
394 # &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported, but these should
395 # not be used for new products.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700396 &quot;productLabels&quot;: [ # Key-value pairs that can be attached to a product. At query time,
397 # constraints can be specified based on the product_labels.
398 #
399 # Note that integer values can be provided as strings, e.g. &quot;1199&quot;. Only
400 # strings with integer values can match a range-based restriction which is
401 # to be supported soon.
402 #
403 # Multiple values can be assigned to the same key. One product may have up to
404 # 500 product_labels.
405 #
406 # Notice that the total number of distinct product_labels over all products
407 # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
408 # will refuse to work for that ProductSet.
409 { # A product label represented as a key-value pair.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700410 &quot;key&quot;: &quot;A String&quot;, # The key of the label attached to the product. Cannot be empty and cannot
411 # exceed 128 bytes.
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700412 &quot;value&quot;: &quot;A String&quot;, # The value of the label attached to the product. Cannot be empty and
413 # cannot exceed 128 bytes.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700414 },
415 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700416 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700417 },
418 ],
419 },
420 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700421 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700422 &quot;textAnnotations&quot;: [ # If present, text (OCR) detection has completed successfully.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700423 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700424 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
425 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
426 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
427 # detected distant towering building, even though the confidence that
428 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700429 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
430 # `description` is expressed.
Bu Sun Kim65020912020-05-20 12:08:20 -0700431 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700432 # `LocationInfo` elements can be present because one location may
433 # indicate the location of the scene in the image, and another location
434 # may indicate the location of the place where the image was taken.
435 # Location information is usually present for landmarks.
436 { # Detected entity location information.
Bu Sun Kim65020912020-05-20 12:08:20 -0700437 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700438 # of doubles representing degrees latitude and degrees longitude. Unless
439 # specified otherwise, this must conform to the
Bu Sun Kim65020912020-05-20 12:08:20 -0700440 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
Dan O'Mearadd494642020-05-01 07:42:23 -0700441 # standard&lt;/a&gt;. Values must be within normalized ranges.
Bu Sun Kim65020912020-05-20 12:08:20 -0700442 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
443 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700444 },
445 },
446 ],
Bu Sun Kim65020912020-05-20 12:08:20 -0700447 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
448 # [Google Knowledge Graph Search
449 # API](https://developers.google.com/knowledge-graph/).
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700450 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
Bu Sun Kim65020912020-05-20 12:08:20 -0700451 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
452 # The accuracy of the entity detection in an image.
453 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
454 # this field represents the confidence that there is a tower in the query
455 # image. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700456 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
457 # for `LABEL_DETECTION` features.
458 &quot;vertices&quot;: [ # The bounding polygon vertices.
459 { # A vertex represents a 2D point in the image.
460 # NOTE: the vertex coordinates are in the same scale as the original image.
461 &quot;x&quot;: 42, # X coordinate.
462 &quot;y&quot;: 42, # Y coordinate.
463 },
464 ],
465 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
466 { # A vertex represents a 2D point in the image.
467 # NOTE: the normalized vertex coordinates are relative to the original image
468 # and range from 0 to 1.
469 &quot;x&quot;: 3.14, # X coordinate.
470 &quot;y&quot;: 3.14, # Y coordinate.
471 },
472 ],
473 },
474 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
475 # fields, such a score or string that qualifies the entity.
476 { # A `Property` consists of a user-supplied name/value pair.
477 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
478 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
479 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
480 },
481 ],
482 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700483 },
484 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700485 &quot;safeSearchAnnotation&quot;: { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
486 # methods over safe-search verticals (for example, adult, spoof, medical,
487 # violence).
488 &quot;racy&quot;: &quot;A String&quot;, # Likelihood that the request image contains racy content. Racy content may
489 # include (but is not limited to) skimpy or sheer clothing, strategically
490 # covered nudity, lewd or provocative poses, or close-ups of sensitive
491 # body areas.
492 &quot;medical&quot;: &quot;A String&quot;, # Likelihood that this is a medical image.
493 &quot;adult&quot;: &quot;A String&quot;, # Represents the adult content likelihood for the image. Adult content may
494 # contain elements such as nudity, pornographic images or cartoons, or
495 # sexual activities.
496 &quot;violence&quot;: &quot;A String&quot;, # Likelihood that this image contains violent content.
497 &quot;spoof&quot;: &quot;A String&quot;, # Spoof likelihood. The likelihood that an modification
498 # was made to the image&#x27;s canonical version to make it appear
499 # funny or offensive.
500 },
501 &quot;webDetection&quot;: { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
502 &quot;fullMatchingImages&quot;: [ # Fully matching images from the Internet.
503 # Can include resized copies of the query image.
504 { # Metadata for online images.
505 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
506 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
507 },
508 ],
509 &quot;bestGuessLabels&quot;: [ # The service&#x27;s best guess as to the topic of the request image.
510 # Inferred from similar images on the open web.
511 { # Label to provide extra metadata for the web detection.
512 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code for `label`, such as &quot;en-US&quot; or &quot;sr-Latn&quot;.
513 # For more information, see
514 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
515 &quot;label&quot;: &quot;A String&quot;, # Label for extra metadata.
516 },
517 ],
518 &quot;visuallySimilarImages&quot;: [ # The visually similar image results.
519 { # Metadata for online images.
520 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
521 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
522 },
523 ],
524 &quot;partialMatchingImages&quot;: [ # Partial matching images from the Internet.
525 # Those images are similar enough to share some key-point features. For
526 # example an original image will likely have partial matching for its crops.
527 { # Metadata for online images.
528 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
529 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
530 },
531 ],
532 &quot;webEntities&quot;: [ # Deduced entities from similar images on the Internet.
533 { # Entity deduced from similar images on the Internet.
534 &quot;entityId&quot;: &quot;A String&quot;, # Opaque entity ID.
535 &quot;score&quot;: 3.14, # Overall relevancy score for the entity.
536 # Not normalized and not comparable across different image queries.
537 &quot;description&quot;: &quot;A String&quot;, # Canonical description of the entity, in English.
538 },
539 ],
540 &quot;pagesWithMatchingImages&quot;: [ # Web pages containing the matching images from the Internet.
541 { # Metadata for web pages.
542 &quot;partialMatchingImages&quot;: [ # Partial matching images on the page.
543 # Those images are similar enough to share some key-point features. For
544 # example an original image will likely have partial matching for its
545 # crops.
546 { # Metadata for online images.
547 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
548 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
549 },
550 ],
551 &quot;url&quot;: &quot;A String&quot;, # The result web page URL.
552 &quot;fullMatchingImages&quot;: [ # Fully matching images on the page.
553 # Can include resized copies of the query image.
554 { # Metadata for online images.
555 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the image.
556 &quot;url&quot;: &quot;A String&quot;, # The result image URL.
557 },
558 ],
559 &quot;score&quot;: 3.14, # (Deprecated) Overall relevancy score for the web page.
560 &quot;pageTitle&quot;: &quot;A String&quot;, # Title for the web page, may contain HTML markups.
561 },
562 ],
563 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700564 &quot;imagePropertiesAnnotation&quot;: { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
565 &quot;dominantColors&quot;: { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
566 &quot;colors&quot;: [ # RGB color values with their score and pixel fraction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700567 { # Color information consists of RGB channels, score, and the fraction of
568 # the image that the color occupies in the image.
Bu Sun Kim65020912020-05-20 12:08:20 -0700569 &quot;pixelFraction&quot;: 3.14, # The fraction of pixels the color occupies in the image.
570 # Value in range [0, 1].
571 &quot;color&quot;: { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700572 # for simplicity of conversion to/from color representations in various
573 # languages over compactness; for example, the fields of this representation
Bu Sun Kim65020912020-05-20 12:08:20 -0700574 # can be trivially provided to the constructor of &quot;java.awt.Color&quot; in Java; it
575 # can also be trivially provided to UIColor&#x27;s &quot;+colorWithRed:green:blue:alpha&quot;
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700576 # method in iOS; and, with just a little work, it can be easily formatted into
Bu Sun Kim65020912020-05-20 12:08:20 -0700577 # a CSS &quot;rgba()&quot; string in JavaScript, as well.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700578 #
579 # Note: this proto does not carry information about the absolute color space
580 # that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB,
581 # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
582 # space.
583 #
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700584 # Note: when color equality needs to be decided, implementations, unless
585 # documented otherwise, will treat two colors to be equal if all their red,
586 # green, blue and alpha values each differ by at most 1e-5.
587 #
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700588 # Example (Java):
589 #
590 # import com.google.type.Color;
591 #
592 # // ...
593 # public static java.awt.Color fromProto(Color protocolor) {
594 # float alpha = protocolor.hasAlpha()
595 # ? protocolor.getAlpha().getValue()
596 # : 1.0;
597 #
598 # return new java.awt.Color(
599 # protocolor.getRed(),
600 # protocolor.getGreen(),
601 # protocolor.getBlue(),
602 # alpha);
603 # }
604 #
605 # public static Color toProto(java.awt.Color color) {
606 # float red = (float) color.getRed();
607 # float green = (float) color.getGreen();
608 # float blue = (float) color.getBlue();
609 # float denominator = 255.0;
610 # Color.Builder resultBuilder =
611 # Color
612 # .newBuilder()
613 # .setRed(red / denominator)
614 # .setGreen(green / denominator)
615 # .setBlue(blue / denominator);
616 # int alpha = color.getAlpha();
617 # if (alpha != 255) {
618 # result.setAlpha(
619 # FloatValue
620 # .newBuilder()
621 # .setValue(((float) alpha) / denominator)
622 # .build());
623 # }
624 # return resultBuilder.build();
625 # }
626 # // ...
627 #
628 # Example (iOS / Obj-C):
629 #
630 # // ...
631 # static UIColor* fromProto(Color* protocolor) {
632 # float red = [protocolor red];
633 # float green = [protocolor green];
634 # float blue = [protocolor blue];
635 # FloatValue* alpha_wrapper = [protocolor alpha];
636 # float alpha = 1.0;
637 # if (alpha_wrapper != nil) {
638 # alpha = [alpha_wrapper value];
639 # }
640 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
641 # }
642 #
643 # static Color* toProto(UIColor* color) {
644 # CGFloat red, green, blue, alpha;
Dan O'Mearadd494642020-05-01 07:42:23 -0700645 # if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700646 # return nil;
647 # }
648 # Color* result = [[Color alloc] init];
649 # [result setRed:red];
650 # [result setGreen:green];
651 # [result setBlue:blue];
Dan O'Mearadd494642020-05-01 07:42:23 -0700652 # if (alpha &lt;= 0.9999) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700653 # [result setAlpha:floatWrapperWithValue(alpha)];
654 # }
655 # [result autorelease];
656 # return result;
657 # }
658 # // ...
659 #
660 # Example (JavaScript):
661 #
662 # // ...
663 #
664 # var protoToCssColor = function(rgb_color) {
665 # var redFrac = rgb_color.red || 0.0;
666 # var greenFrac = rgb_color.green || 0.0;
667 # var blueFrac = rgb_color.blue || 0.0;
668 # var red = Math.floor(redFrac * 255);
669 # var green = Math.floor(greenFrac * 255);
670 # var blue = Math.floor(blueFrac * 255);
671 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700672 # if (!(&#x27;alpha&#x27; in rgb_color)) {
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700673 # return rgbToCssColor_(red, green, blue);
674 # }
675 #
676 # var alphaFrac = rgb_color.alpha.value || 0.0;
Bu Sun Kim65020912020-05-20 12:08:20 -0700677 # var rgbParams = [red, green, blue].join(&#x27;,&#x27;);
678 # return [&#x27;rgba(&#x27;, rgbParams, &#x27;,&#x27;, alphaFrac, &#x27;)&#x27;].join(&#x27;&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700679 # };
680 #
681 # var rgbToCssColor_ = function(red, green, blue) {
Dan O'Mearadd494642020-05-01 07:42:23 -0700682 # var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700683 # var hexString = rgbNumber.toString(16);
684 # var missingZeros = 6 - hexString.length;
Bu Sun Kim65020912020-05-20 12:08:20 -0700685 # var resultBuilder = [&#x27;#&#x27;];
Dan O'Mearadd494642020-05-01 07:42:23 -0700686 # for (var i = 0; i &lt; missingZeros; i++) {
Bu Sun Kim65020912020-05-20 12:08:20 -0700687 # resultBuilder.push(&#x27;0&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700688 # }
689 # resultBuilder.push(hexString);
Bu Sun Kim65020912020-05-20 12:08:20 -0700690 # return resultBuilder.join(&#x27;&#x27;);
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700691 # };
692 #
693 # // ...
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700694 &quot;red&quot;: 3.14, # The amount of red in the color as a value in the interval [0, 1].
Bu Sun Kim65020912020-05-20 12:08:20 -0700695 &quot;alpha&quot;: 3.14, # The fraction of this color that should be applied to the pixel. That is,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700696 # the final pixel color is defined by the equation:
697 #
698 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
699 #
700 # This means that a value of 1.0 corresponds to a solid color, whereas
701 # a value of 0.0 corresponds to a completely transparent color. This
702 # uses a wrapper message rather than a simple float scalar so that it is
703 # possible to distinguish between a default value and the value being unset.
704 # If omitted, this color object is to be rendered as a solid color
705 # (as if the alpha value had been explicitly given with a value of 1.0).
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700706 &quot;blue&quot;: 3.14, # The amount of blue in the color as a value in the interval [0, 1].
707 &quot;green&quot;: 3.14, # The amount of green in the color as a value in the interval [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700708 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700709 &quot;score&quot;: 3.14, # Image-specific score for this color. Value in range [0, 1].
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700710 },
711 ],
712 },
713 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -0700714 &quot;cropHintsAnnotation&quot;: { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
715 &quot;cropHints&quot;: [ # Crop hint results.
716 { # Single crop hint that is used to generate a new crop when serving an image.
717 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
718 # box are in the original image&#x27;s scale.
719 &quot;vertices&quot;: [ # The bounding polygon vertices.
720 { # A vertex represents a 2D point in the image.
721 # NOTE: the vertex coordinates are in the same scale as the original image.
722 &quot;x&quot;: 42, # X coordinate.
723 &quot;y&quot;: 42, # Y coordinate.
724 },
725 ],
726 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
727 { # A vertex represents a 2D point in the image.
728 # NOTE: the normalized vertex coordinates are relative to the original image
729 # and range from 0 to 1.
730 &quot;x&quot;: 3.14, # X coordinate.
731 &quot;y&quot;: 3.14, # Y coordinate.
732 },
733 ],
734 },
735 &quot;importanceFraction&quot;: 3.14, # Fraction of importance of this salient region with respect to the original
736 # image.
737 &quot;confidence&quot;: 3.14, # Confidence of this being a salient region. Range [0, 1].
738 },
739 ],
740 },
741 &quot;fullTextAnnotation&quot;: { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
742 # completed successfully.
743 # This annotation provides the structural hierarchy for the OCR detected
744 # text.
745 # The hierarchy of an OCR extracted text structure is like this:
746 # TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
747 # Each structural component, starting from Page, may further have their own
748 # properties. Properties describe detected languages, breaks etc.. Please refer
749 # to the TextAnnotation.TextProperty message definition below for more
750 # detail.
751 &quot;text&quot;: &quot;A String&quot;, # UTF-8 text detected on the pages.
752 &quot;pages&quot;: [ # List of pages detected by OCR.
753 { # Detected page from OCR.
754 &quot;blocks&quot;: [ # List of blocks of text, images etc on this page.
755 { # Logical element on the page.
756 &quot;blockType&quot;: &quot;A String&quot;, # Detected block type (text, image etc) for this block.
757 &quot;paragraphs&quot;: [ # List of paragraphs in this block (if this blocks is of type text).
758 { # Structural unit of text representing a number of words in certain order.
759 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
760 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the paragraph.
761 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
762 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
763 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
764 },
765 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
766 { # Detected language for a structural component.
767 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
768 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
769 # information, see
770 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
771 },
772 ],
773 },
774 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
775 # The vertices are in the order of top-left, top-right, bottom-right,
776 # bottom-left. When a rotation of the bounding box is detected the rotation
777 # is represented as around the top-left corner as defined when the text is
778 # read in the &#x27;natural&#x27; orientation.
779 # For example:
780 # * when the text is horizontal it might look like:
781 # 0----1
782 # | |
783 # 3----2
784 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
785 # 2----3
786 # | |
787 # 1----0
788 # and the vertex order will still be (0, 1, 2, 3).
789 &quot;vertices&quot;: [ # The bounding polygon vertices.
790 { # A vertex represents a 2D point in the image.
791 # NOTE: the vertex coordinates are in the same scale as the original image.
792 &quot;x&quot;: 42, # X coordinate.
793 &quot;y&quot;: 42, # Y coordinate.
794 },
795 ],
796 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
797 { # A vertex represents a 2D point in the image.
798 # NOTE: the normalized vertex coordinates are relative to the original image
799 # and range from 0 to 1.
800 &quot;x&quot;: 3.14, # X coordinate.
801 &quot;y&quot;: 3.14, # Y coordinate.
802 },
803 ],
804 },
805 &quot;words&quot;: [ # List of all words in this paragraph.
806 { # A word representation.
807 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the word. Range [0, 1].
808 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the word.
809 # The vertices are in the order of top-left, top-right, bottom-right,
810 # bottom-left. When a rotation of the bounding box is detected the rotation
811 # is represented as around the top-left corner as defined when the text is
812 # read in the &#x27;natural&#x27; orientation.
813 # For example:
814 # * when the text is horizontal it might look like:
815 # 0----1
816 # | |
817 # 3----2
818 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
819 # 2----3
820 # | |
821 # 1----0
822 # and the vertex order will still be (0, 1, 2, 3).
823 &quot;vertices&quot;: [ # The bounding polygon vertices.
824 { # A vertex represents a 2D point in the image.
825 # NOTE: the vertex coordinates are in the same scale as the original image.
826 &quot;x&quot;: 42, # X coordinate.
827 &quot;y&quot;: 42, # Y coordinate.
828 },
829 ],
830 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
831 { # A vertex represents a 2D point in the image.
832 # NOTE: the normalized vertex coordinates are relative to the original image
833 # and range from 0 to 1.
834 &quot;x&quot;: 3.14, # X coordinate.
835 &quot;y&quot;: 3.14, # Y coordinate.
836 },
837 ],
838 },
839 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the word.
840 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
841 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
842 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
843 },
844 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
845 { # Detected language for a structural component.
846 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
847 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
848 # information, see
849 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
850 },
851 ],
852 },
853 &quot;symbols&quot;: [ # List of symbols in the word.
854 # The order of the symbols follows the natural reading order.
855 { # A single symbol representation.
856 &quot;confidence&quot;: 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
857 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the symbol.
858 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
859 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
860 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
861 },
862 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
863 { # Detected language for a structural component.
864 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
865 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
866 # information, see
867 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
868 },
869 ],
870 },
871 &quot;text&quot;: &quot;A String&quot;, # The actual UTF-8 representation of the symbol.
872 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
873 # The vertices are in the order of top-left, top-right, bottom-right,
874 # bottom-left. When a rotation of the bounding box is detected the rotation
875 # is represented as around the top-left corner as defined when the text is
876 # read in the &#x27;natural&#x27; orientation.
877 # For example:
878 # * when the text is horizontal it might look like:
879 # 0----1
880 # | |
881 # 3----2
882 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
883 # 2----3
884 # | |
885 # 1----0
886 # and the vertex order will still be (0, 1, 2, 3).
887 &quot;vertices&quot;: [ # The bounding polygon vertices.
888 { # A vertex represents a 2D point in the image.
889 # NOTE: the vertex coordinates are in the same scale as the original image.
890 &quot;x&quot;: 42, # X coordinate.
891 &quot;y&quot;: 42, # Y coordinate.
892 },
893 ],
894 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
895 { # A vertex represents a 2D point in the image.
896 # NOTE: the normalized vertex coordinates are relative to the original image
897 # and range from 0 to 1.
898 &quot;x&quot;: 3.14, # X coordinate.
899 &quot;y&quot;: 3.14, # Y coordinate.
900 },
901 ],
902 },
903 },
904 ],
905 },
906 ],
907 },
908 ],
909 &quot;boundingBox&quot;: { # A bounding polygon for the detected image annotation. # The bounding box for the block.
910 # The vertices are in the order of top-left, top-right, bottom-right,
911 # bottom-left. When a rotation of the bounding box is detected the rotation
912 # is represented as around the top-left corner as defined when the text is
913 # read in the &#x27;natural&#x27; orientation.
914 # For example:
915 #
916 # * when the text is horizontal it might look like:
917 #
918 # 0----1
919 # | |
920 # 3----2
921 #
922 # * when it&#x27;s rotated 180 degrees around the top-left corner it becomes:
923 #
924 # 2----3
925 # | |
926 # 1----0
927 #
928 # and the vertex order will still be (0, 1, 2, 3).
929 &quot;vertices&quot;: [ # The bounding polygon vertices.
930 { # A vertex represents a 2D point in the image.
931 # NOTE: the vertex coordinates are in the same scale as the original image.
932 &quot;x&quot;: 42, # X coordinate.
933 &quot;y&quot;: 42, # Y coordinate.
934 },
935 ],
936 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
937 { # A vertex represents a 2D point in the image.
938 # NOTE: the normalized vertex coordinates are relative to the original image
939 # and range from 0 to 1.
940 &quot;x&quot;: 3.14, # X coordinate.
941 &quot;y&quot;: 3.14, # Y coordinate.
942 },
943 ],
944 },
945 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the block. Range [0, 1].
946 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected for the block.
947 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
948 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
949 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
950 },
951 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
952 { # Detected language for a structural component.
953 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
954 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
955 # information, see
956 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
957 },
958 ],
959 },
960 },
961 ],
962 &quot;property&quot;: { # Additional information detected on the structural component. # Additional information detected on the page.
963 &quot;detectedBreak&quot;: { # Detected start or end of a structural component. # Detected start or end of a text segment.
964 &quot;type&quot;: &quot;A String&quot;, # Detected break type.
965 &quot;isPrefix&quot;: True or False, # True if break prepends the element.
966 },
967 &quot;detectedLanguages&quot;: [ # A list of detected languages together with confidence.
968 { # Detected language for a structural component.
969 &quot;confidence&quot;: 3.14, # Confidence of detected language. Range [0, 1].
970 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
971 # information, see
972 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
973 },
974 ],
975 },
976 &quot;width&quot;: 42, # Page width. For PDFs the unit is points. For images (including
977 # TIFFs) the unit is pixels.
978 &quot;confidence&quot;: 3.14, # Confidence of the OCR results on the page. Range [0, 1].
979 &quot;height&quot;: 42, # Page height. For PDFs the unit is points. For images (including
980 # TIFFs) the unit is pixels.
981 },
982 ],
983 },
984 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
985 # Note that filled-in image annotations are guaranteed to be
986 # correct, even when `error` is set.
987 # different programming environments, including REST APIs and RPC APIs. It is
988 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
989 # three pieces of data: error code, error message, and error details.
990 #
991 # You can find out more about this error model and how to work with it in the
992 # [API Design Guide](https://cloud.google.com/apis/design/errors).
993 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
994 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
995 # message types for APIs to use.
996 {
997 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
998 },
999 ],
1000 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1001 # user-facing error message should be localized and sent in the
1002 # google.rpc.Status.details field, or localized by the client.
1003 },
1004 &quot;localizedObjectAnnotations&quot;: [ # If present, localized object detection has completed successfully.
1005 # This will be sorted descending by confidence score.
1006 { # Set of detected objects with bounding boxes.
1007 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
1008 &quot;vertices&quot;: [ # The bounding polygon vertices.
1009 { # A vertex represents a 2D point in the image.
1010 # NOTE: the vertex coordinates are in the same scale as the original image.
1011 &quot;x&quot;: 42, # X coordinate.
1012 &quot;y&quot;: 42, # Y coordinate.
1013 },
1014 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001015 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1016 { # A vertex represents a 2D point in the image.
1017 # NOTE: the normalized vertex coordinates are relative to the original image
1018 # and range from 0 to 1.
1019 &quot;x&quot;: 3.14, # X coordinate.
1020 &quot;y&quot;: 3.14, # Y coordinate.
1021 },
1022 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001023 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001024 &quot;name&quot;: &quot;A String&quot;, # Object name, expressed in its `language_code` language.
1025 &quot;mid&quot;: &quot;A String&quot;, # Object ID that should align with EntityAnnotation mid.
1026 &quot;languageCode&quot;: &quot;A String&quot;, # The BCP-47 language code, such as &quot;en-US&quot; or &quot;sr-Latn&quot;. For more
1027 # information, see
1028 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1029 &quot;score&quot;: 3.14, # Score of the result. Range [0, 1].
1030 },
1031 ],
1032 &quot;labelAnnotations&quot;: [ # If present, label detection has completed successfully.
1033 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001034 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1035 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1036 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1037 # detected distant towering building, even though the confidence that
1038 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001039 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1040 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001041 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1042 # `LocationInfo` elements can be present because one location may
1043 # indicate the location of the scene in the image, and another location
1044 # may indicate the location of the place where the image was taken.
1045 # Location information is usually present for landmarks.
1046 { # Detected entity location information.
1047 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1048 # of doubles representing degrees latitude and degrees longitude. Unless
1049 # specified otherwise, this must conform to the
1050 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1051 # standard&lt;/a&gt;. Values must be within normalized ranges.
1052 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1053 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1054 },
1055 },
1056 ],
1057 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1058 # [Google Knowledge Graph Search
1059 # API](https://developers.google.com/knowledge-graph/).
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001060 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001061 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1062 # The accuracy of the entity detection in an image.
1063 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1064 # this field represents the confidence that there is a tower in the query
1065 # image. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001066 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1067 # for `LABEL_DETECTION` features.
1068 &quot;vertices&quot;: [ # The bounding polygon vertices.
1069 { # A vertex represents a 2D point in the image.
1070 # NOTE: the vertex coordinates are in the same scale as the original image.
1071 &quot;x&quot;: 42, # X coordinate.
1072 &quot;y&quot;: 42, # Y coordinate.
1073 },
1074 ],
1075 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1076 { # A vertex represents a 2D point in the image.
1077 # NOTE: the normalized vertex coordinates are relative to the original image
1078 # and range from 0 to 1.
1079 &quot;x&quot;: 3.14, # X coordinate.
1080 &quot;y&quot;: 3.14, # Y coordinate.
1081 },
1082 ],
1083 },
1084 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1085 # fields, such a score or string that qualifies the entity.
1086 { # A `Property` consists of a user-supplied name/value pair.
1087 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1088 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1089 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1090 },
1091 ],
1092 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
1093 },
1094 ],
1095 &quot;logoAnnotations&quot;: [ # If present, logo detection has completed successfully.
1096 { # Set of detected entity features.
1097 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1098 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1099 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1100 # detected distant towering building, even though the confidence that
1101 # there is a tower in each image may be the same. Range [0, 1].
1102 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1103 # `description` is expressed.
1104 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1105 # `LocationInfo` elements can be present because one location may
1106 # indicate the location of the scene in the image, and another location
1107 # may indicate the location of the place where the image was taken.
1108 # Location information is usually present for landmarks.
1109 { # Detected entity location information.
1110 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1111 # of doubles representing degrees latitude and degrees longitude. Unless
1112 # specified otherwise, this must conform to the
1113 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1114 # standard&lt;/a&gt;. Values must be within normalized ranges.
1115 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1116 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1117 },
1118 },
1119 ],
1120 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1121 # [Google Knowledge Graph Search
1122 # API](https://developers.google.com/knowledge-graph/).
1123 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
1124 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1125 # The accuracy of the entity detection in an image.
1126 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1127 # this field represents the confidence that there is a tower in the query
1128 # image. Range [0, 1].
1129 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1130 # for `LABEL_DETECTION` features.
1131 &quot;vertices&quot;: [ # The bounding polygon vertices.
1132 { # A vertex represents a 2D point in the image.
1133 # NOTE: the vertex coordinates are in the same scale as the original image.
1134 &quot;x&quot;: 42, # X coordinate.
1135 &quot;y&quot;: 42, # Y coordinate.
1136 },
1137 ],
1138 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1139 { # A vertex represents a 2D point in the image.
1140 # NOTE: the normalized vertex coordinates are relative to the original image
1141 # and range from 0 to 1.
1142 &quot;x&quot;: 3.14, # X coordinate.
1143 &quot;y&quot;: 3.14, # Y coordinate.
1144 },
1145 ],
1146 },
1147 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1148 # fields, such a score or string that qualifies the entity.
1149 { # A `Property` consists of a user-supplied name/value pair.
1150 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1151 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1152 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1153 },
1154 ],
1155 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001156 },
1157 ],
1158 &quot;context&quot;: { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
1159 # comes from.
1160 # information about the source of that image.
1161 &quot;uri&quot;: &quot;A String&quot;, # The URI of the file used to produce the image.
1162 &quot;pageNumber&quot;: 42, # If the file was a PDF or TIFF, this field gives the page number within
1163 # the file used to produce the image.
1164 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001165 &quot;faceAnnotations&quot;: [ # If present, face detection has completed successfully.
1166 { # A face annotation object contains the results of face detection.
1167 &quot;surpriseLikelihood&quot;: &quot;A String&quot;, # Surprise likelihood.
1168 &quot;headwearLikelihood&quot;: &quot;A String&quot;, # Headwear likelihood.
1169 &quot;angerLikelihood&quot;: &quot;A String&quot;, # Anger likelihood.
1170 &quot;detectionConfidence&quot;: 3.14, # Detection confidence. Range [0, 1].
1171 &quot;landmarkingConfidence&quot;: 3.14, # Face landmarking confidence. Range [0, 1].
1172 &quot;blurredLikelihood&quot;: &quot;A String&quot;, # Blurred likelihood.
1173 &quot;tiltAngle&quot;: 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
1174 # pointing relative to the image&#x27;s horizontal plane. Range [-180,180].
1175 &quot;sorrowLikelihood&quot;: &quot;A String&quot;, # Sorrow likelihood.
1176 &quot;panAngle&quot;: 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
1177 # pointing relative to the vertical plane perpendicular to the image. Range
1178 # [-180,180].
1179 &quot;landmarks&quot;: [ # Detected face landmarks.
1180 { # A face-specific landmark (for example, a face feature).
1181 &quot;position&quot;: { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
1182 # A valid Position must have both x and y coordinates.
1183 # The position coordinates are in the same scale as the original image.
1184 &quot;z&quot;: 3.14, # Z coordinate (or depth).
1185 &quot;y&quot;: 3.14, # Y coordinate.
1186 &quot;x&quot;: 3.14, # X coordinate.
1187 },
1188 &quot;type&quot;: &quot;A String&quot;, # Face landmark type.
1189 },
1190 ],
1191 &quot;rollAngle&quot;: 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
1192 # of the face relative to the image vertical about the axis perpendicular to
1193 # the face. Range [-180,180].
1194 &quot;underExposedLikelihood&quot;: &quot;A String&quot;, # Under-exposed likelihood.
1195 &quot;joyLikelihood&quot;: &quot;A String&quot;, # Joy likelihood.
1196 &quot;fdBoundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
1197 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
1198 # is used to eliminate the face from any image analysis that detects the
1199 # &quot;amount of skin&quot; visible in an image. It is not based on the
1200 # landmarker results, only on the initial face detection, hence
1201 # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
1202 &quot;vertices&quot;: [ # The bounding polygon vertices.
1203 { # A vertex represents a 2D point in the image.
1204 # NOTE: the vertex coordinates are in the same scale as the original image.
1205 &quot;x&quot;: 42, # X coordinate.
1206 &quot;y&quot;: 42, # Y coordinate.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001207 },
1208 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001209 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1210 { # A vertex represents a 2D point in the image.
1211 # NOTE: the normalized vertex coordinates are relative to the original image
1212 # and range from 0 to 1.
1213 &quot;x&quot;: 3.14, # X coordinate.
1214 &quot;y&quot;: 3.14, # Y coordinate.
1215 },
1216 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001217 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001218 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
1219 # are in the original image&#x27;s scale.
1220 # The bounding box is computed to &quot;frame&quot; the face in accordance with human
1221 # expectations. It is based on the landmarker results.
1222 # Note that one or more x and/or y coordinates may not be generated in the
1223 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
1224 # appears in the image to be annotated.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001225 &quot;vertices&quot;: [ # The bounding polygon vertices.
1226 { # A vertex represents a 2D point in the image.
1227 # NOTE: the vertex coordinates are in the same scale as the original image.
1228 &quot;x&quot;: 42, # X coordinate.
1229 &quot;y&quot;: 42, # Y coordinate.
1230 },
1231 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001232 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1233 { # A vertex represents a 2D point in the image.
1234 # NOTE: the normalized vertex coordinates are relative to the original image
1235 # and range from 0 to 1.
1236 &quot;x&quot;: 3.14, # X coordinate.
1237 &quot;y&quot;: 3.14, # Y coordinate.
1238 },
1239 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001240 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001241 },
1242 ],
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001243 &quot;landmarkAnnotations&quot;: [ # If present, landmark detection has completed successfully.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001244 { # Set of detected entity features.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001245 &quot;topicality&quot;: 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
1246 # image. For example, the relevancy of &quot;tower&quot; is likely higher to an image
1247 # containing the detected &quot;Eiffel Tower&quot; than to an image containing a
1248 # detected distant towering building, even though the confidence that
1249 # there is a tower in each image may be the same. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001250 &quot;locale&quot;: &quot;A String&quot;, # The language code for the locale in which the entity textual
1251 # `description` is expressed.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001252 &quot;locations&quot;: [ # The location information for the detected entity. Multiple
1253 # `LocationInfo` elements can be present because one location may
1254 # indicate the location of the scene in the image, and another location
1255 # may indicate the location of the place where the image was taken.
1256 # Location information is usually present for landmarks.
1257 { # Detected entity location information.
1258 &quot;latLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
1259 # of doubles representing degrees latitude and degrees longitude. Unless
1260 # specified otherwise, this must conform to the
1261 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1262 # standard&lt;/a&gt;. Values must be within normalized ranges.
1263 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1264 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1265 },
1266 },
1267 ],
1268 &quot;mid&quot;: &quot;A String&quot;, # Opaque entity ID. Some IDs may be available in
1269 # [Google Knowledge Graph Search
1270 # API](https://developers.google.com/knowledge-graph/).
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001271 &quot;description&quot;: &quot;A String&quot;, # Entity textual description, expressed in its `locale` language.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001272 &quot;confidence&quot;: 3.14, # **Deprecated. Use `score` instead.**
1273 # The accuracy of the entity detection in an image.
1274 # For example, for an image in which the &quot;Eiffel Tower&quot; entity is detected,
1275 # this field represents the confidence that there is a tower in the query
1276 # image. Range [0, 1].
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001277 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
1278 # for `LABEL_DETECTION` features.
1279 &quot;vertices&quot;: [ # The bounding polygon vertices.
1280 { # A vertex represents a 2D point in the image.
1281 # NOTE: the vertex coordinates are in the same scale as the original image.
1282 &quot;x&quot;: 42, # X coordinate.
1283 &quot;y&quot;: 42, # Y coordinate.
1284 },
1285 ],
1286 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1287 { # A vertex represents a 2D point in the image.
1288 # NOTE: the normalized vertex coordinates are relative to the original image
1289 # and range from 0 to 1.
1290 &quot;x&quot;: 3.14, # X coordinate.
1291 &quot;y&quot;: 3.14, # Y coordinate.
1292 },
1293 ],
1294 },
1295 &quot;properties&quot;: [ # Some entities may have optional user-supplied `Property` (name/value)
1296 # fields, such a score or string that qualifies the entity.
1297 { # A `Property` consists of a user-supplied name/value pair.
1298 &quot;uint64Value&quot;: &quot;A String&quot;, # Value of numeric properties.
1299 &quot;value&quot;: &quot;A String&quot;, # Value of the property.
1300 &quot;name&quot;: &quot;A String&quot;, # Name of the property.
1301 },
1302 ],
1303 &quot;score&quot;: 3.14, # Overall score of the result. Range [0, 1].
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001304 },
1305 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001306 },
1307 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001308 },
1309 ],
1310 }</pre>
1311</div>
1312
1313<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07001314 <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001315 <pre>Run asynchronous image detection and annotation for a list of generic
1316files, such as PDF files, which may contain multiple pages and multiple
1317images per page. Progress and results can be retrieved through the
1318`google.longrunning.Operations` interface.
1319`Operation.metadata` contains `OperationMetadata` (metadata).
1320`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
1321
1322Args:
Dan O'Mearadd494642020-05-01 07:42:23 -07001323 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001324 The object takes the form of:
1325
1326{ # Multiple async file annotation requests are batched into a single service
1327 # call.
Bu Sun Kim65020912020-05-20 12:08:20 -07001328 &quot;requests&quot;: [ # Required. Individual async file annotation requests for this batch.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001329 { # An offline file annotation request.
Bu Sun Kim65020912020-05-20 12:08:20 -07001330 &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
Bu Sun Kim65020912020-05-20 12:08:20 -07001331 &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
1332 &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001333 # of the image. For example, if the desired aspect ratio is 4/3, the
1334 # corresponding float value should be 1.33333. If not specified, the
1335 # best possible crop is returned. The number of provided aspect ratios is
1336 # limited to a maximum of 16; any aspect ratios provided after the 16th are
1337 # ignored.
1338 3.14,
1339 ],
1340 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001341 &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
Bu Sun Kim65020912020-05-20 12:08:20 -07001342 &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
1343 # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
1344 # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
1345 # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
1346 # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
1347 # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
1348 # migrate existing products to these categories as well.
1349 &quot;A String&quot;,
1350 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001351 &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
1352 # If it is not specified, system discretion will be applied.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001353 &quot;vertices&quot;: [ # The bounding polygon vertices.
1354 { # A vertex represents a 2D point in the image.
1355 # NOTE: the vertex coordinates are in the same scale as the original image.
1356 &quot;x&quot;: 42, # X coordinate.
1357 &quot;y&quot;: 42, # Y coordinate.
1358 },
1359 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001360 &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
1361 { # A vertex represents a 2D point in the image.
1362 # NOTE: the normalized vertex coordinates are relative to the original image
1363 # and range from 0 to 1.
1364 &quot;x&quot;: 3.14, # X coordinate.
1365 &quot;y&quot;: 3.14, # Y coordinate.
1366 },
1367 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001368 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001369 &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
1370 #
1371 # Format is:
1372 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
1373 &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
1374 # on Product labels. We currently support an AND of OR of key-value
1375 # expressions, where each expression within an OR must have the same key. An
1376 # &#x27;=&#x27; should be used to connect the key and value.
1377 #
1378 # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
1379 # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
1380 # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001381 },
1382 &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
1383 # yields the best results since it enables automatic language detection. For
1384 # languages based on the Latin alphabet, setting `language_hints` is not
1385 # needed. In rare cases, when the language of the text in the image is known,
1386 # setting a hint will help get better results (although it will be a
1387 # significant hindrance if the hint is wrong). Text detection returns an
1388 # error if one or more of the specified languages is not one of the
1389 # [supported languages](https://cloud.google.com/vision/docs/languages).
1390 &quot;A String&quot;,
1391 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001392 &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
1393 &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
1394 # of doubles representing degrees latitude and degrees longitude. Unless
1395 # specified otherwise, this must conform to the
1396 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1397 # standard&lt;/a&gt;. Values must be within normalized ranges.
1398 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1399 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1400 },
1401 &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
1402 # of doubles representing degrees latitude and degrees longitude. Unless
1403 # specified otherwise, this must conform to the
1404 # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
1405 # standard&lt;/a&gt;. Values must be within normalized ranges.
1406 &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
1407 &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
1408 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001409 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001410 &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
1411 &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
1412 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001413 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001414 &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
1415 &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
1416 # Note: As with all `bytes` fields, protobuffers use a pure binary
1417 # representation, whereas JSON representations use base64.
1418 #
1419 # Currently, this field only works for BatchAnnotateFiles requests. It does
1420 # not work for AsyncBatchAnnotateFiles requests.
1421 &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
1422 # &quot;image/gif&quot; are supported. Wildcards are not supported.
1423 &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
1424 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
1425 # Google Cloud Storage object. Wildcards are not currently supported.
1426 },
1427 },
1428 &quot;features&quot;: [ # Required. Requested features.
1429 { # The type of Google Cloud Vision API detection to perform, and the maximum
1430 # number of results to return for that type. Multiple `Feature` objects can
1431 # be specified in the `features` list.
1432 &quot;type&quot;: &quot;A String&quot;, # The feature type.
1433 &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
1434 # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
1435 &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
1436 # Supported values: &quot;builtin/stable&quot; (the default if unset) and
1437 # &quot;builtin/latest&quot;.
1438 },
1439 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001440 &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001441 &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
1442 # Google Cloud Storage.
1443 # The valid range is [1, 100]. If not specified, the default value is 20.
1444 #
1445 # For example, for one pdf file with 100 pages, 100 response protos will
1446 # be generated. If `batch_size` = 20, then 5 json files each
1447 # containing 20 response protos will be written under the prefix
1448 # `gcs_destination`.`uri`.
1449 #
1450 # Currently, batch_size only applies to GcsDestination, with potential future
1451 # support for other output configurations.
Bu Sun Kim65020912020-05-20 12:08:20 -07001452 &quot;gcsDestination&quot;: { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
1453 &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI prefix where the results will be stored. Results
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001454 # will be in JSON format and preceded by its corresponding input URI prefix.
1455 # This field can either represent a gcs file prefix or gcs directory. In
1456 # either case, the uri should be unique because in order to get all of the
1457 # output files, you will need to do a wildcard gcs search on the uri prefix
1458 # you provide.
1459 #
1460 # Examples:
1461 #
1462 # * File Prefix: gs://bucket-name/here/filenameprefix The output files
1463 # will be created in gs://bucket-name/here/ and the names of the
Bu Sun Kim65020912020-05-20 12:08:20 -07001464 # output files will begin with &quot;filenameprefix&quot;.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001465 #
1466 # * Directory Prefix: gs://bucket-name/some/location/ The output files
1467 # will be created in gs://bucket-name/some/location/ and the names of the
1468 # output files could be anything because there was no filename prefix
1469 # specified.
1470 #
1471 # If multiple outputs, each response is still AnnotateFileResponse, each of
1472 # which contains some subset of the full list of AnnotateImageResponse.
1473 # Multiple outputs can happen if, for example, the output JSON is too large
1474 # and overflows into multiple sharded files.
1475 },
1476 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001477 },
1478 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07001479 &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
Dan O'Mearadd494642020-05-01 07:42:23 -07001480 #
1481 # Format: `projects/{project-id}/locations/{location-id}`.
1482 #
1483 # If no parent is specified, a region will be chosen automatically.
1484 #
1485 # Supported location-ids:
1486 # `us`: USA country only,
1487 # `asia`: East asia areas, like Japan, Taiwan,
1488 # `eu`: The European Union.
1489 #
1490 # Example: `projects/project-A/locations/eu`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001491 }
1492
1493 x__xgafv: string, V1 error format.
1494 Allowed values
1495 1 - v1 error format
1496 2 - v2 error format
1497
1498Returns:
1499 An object of the form:
1500
1501 { # This resource represents a long-running operation that is the result of a
1502 # network API call.
Bu Sun Kim65020912020-05-20 12:08:20 -07001503 &quot;response&quot;: { # The normal response of the operation in case of success. If the original
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001504 # method returns no data on success, such as `Delete`, the response is
1505 # `google.protobuf.Empty`. If the original method is standard
1506 # `Get`/`Create`/`Update`, the response should be the resource. For other
1507 # methods, the response should have the type `XxxResponse`, where `Xxx`
1508 # is the original method name. For example, if the original method name
1509 # is `TakeSnapshot()`, the inferred response type is
1510 # `TakeSnapshotResponse`.
Bu Sun Kim65020912020-05-20 12:08:20 -07001511 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001512 },
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001513 &quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically
1514 # contains progress information and common metadata such as create time.
1515 # Some services might not provide such metadata. Any method that returns a
1516 # long-running operation should document the metadata type, if any.
1517 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1518 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001519 &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001520 # originally returns it. If you use the default HTTP mapping, the
1521 # `name` should be a resource name ending with `operations/{unique_id}`.
Bu Sun Kimd059ad82020-07-22 17:02:09 -07001522 &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
1523 # different programming environments, including REST APIs and RPC APIs. It is
1524 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
1525 # three pieces of data: error code, error message, and error details.
1526 #
1527 # You can find out more about this error model and how to work with it in the
1528 # [API Design Guide](https://cloud.google.com/apis/design/errors).
1529 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
1530 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
1531 # message types for APIs to use.
1532 {
1533 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
1534 },
1535 ],
1536 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
1537 # user-facing error message should be localized and sent in the
1538 # google.rpc.Status.details field, or localized by the client.
1539 },
1540 &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
1541 # If `true`, the operation is completed, and either `error` or `response` is
1542 # available.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001543 }</pre>
1544</div>
1545
1546</body></html>