blob: 9fe90a4ec7901dff2846a8b249021500f2ee04c1 [file] [log] [blame]
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1.html">Google Cloud Vision API</a> . <a href="vision_v1.images.html">images</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p>
79<p class="firstline">Run image detection and annotation for a batch of images.</p>
80<h3>Method Details</h3>
81<div class="method">
82 <code class="details" id="annotate">annotate(body, x__xgafv=None)</code>
83 <pre>Run image detection and annotation for a batch of images.
84
85Args:
86 body: object, The request body. (required)
87 The object takes the form of:
88
89{ # Multiple image annotation requests are batched into a single service call.
90 "requests": [ # Individual image annotation requests for this batch.
91 { # Request for performing Google Cloud Vision API tasks over a user-provided
92 # image, with user-requested features.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -080093 "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
94 "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # lat/long rectangle that specifies the location of the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -070095 "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
96 # of doubles representing degrees latitude and degrees longitude. Unless
97 # specified otherwise, this must conform to the
98 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
99 # standard</a>. Values must be within normalized ranges.
100 #
101 # Example of normalization code in Python:
102 #
103 # def NormalizeLongitude(longitude):
104 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
105 # q, r = divmod(longitude, 360.0)
106 # if r > 180.0 or (r == 180.0 and q <= -1.0):
107 # return r - 360.0
108 # return r
109 #
110 # def NormalizeLatLng(latitude, longitude):
111 # """Wraps decimal degrees latitude and longitude to
112 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
113 # r = latitude % 360.0
114 # if r <= 90.0:
115 # return r, NormalizeLongitude(longitude)
116 # elif r >= 270.0:
117 # return r - 360, NormalizeLongitude(longitude)
118 # else:
119 # return 180 - r, NormalizeLongitude(longitude + 180.0)
120 #
121 # assert 180.0 == NormalizeLongitude(180.0)
122 # assert -180.0 == NormalizeLongitude(-180.0)
123 # assert -179.0 == NormalizeLongitude(181.0)
124 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
125 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
126 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
127 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
128 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
129 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
130 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
131 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
132 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
133 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400134 #
135 # The code in logs/storage/validator/logs_validator_traits.cc treats this type
136 # as if it were annotated as ST_LOCATION.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700137 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
138 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
139 },
140 "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
141 # of doubles representing degrees latitude and degrees longitude. Unless
142 # specified otherwise, this must conform to the
143 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
144 # standard</a>. Values must be within normalized ranges.
145 #
146 # Example of normalization code in Python:
147 #
148 # def NormalizeLongitude(longitude):
149 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
150 # q, r = divmod(longitude, 360.0)
151 # if r > 180.0 or (r == 180.0 and q <= -1.0):
152 # return r - 360.0
153 # return r
154 #
155 # def NormalizeLatLng(latitude, longitude):
156 # """Wraps decimal degrees latitude and longitude to
157 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
158 # r = latitude % 360.0
159 # if r <= 90.0:
160 # return r, NormalizeLongitude(longitude)
161 # elif r >= 270.0:
162 # return r - 360, NormalizeLongitude(longitude)
163 # else:
164 # return 180 - r, NormalizeLongitude(longitude + 180.0)
165 #
166 # assert 180.0 == NormalizeLongitude(180.0)
167 # assert -180.0 == NormalizeLongitude(-180.0)
168 # assert -179.0 == NormalizeLongitude(181.0)
169 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
170 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
171 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
172 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
173 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
174 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
175 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
176 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
177 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
178 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400179 #
180 # The code in logs/storage/validator/logs_validator_traits.cc treats this type
181 # as if it were annotated as ST_LOCATION.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700182 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
183 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
184 },
185 },
186 "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
Jon Wayne Parrott7d5badb2016-08-16 12:44:29 -0700187 # yields the best results since it enables automatic language detection. For
188 # languages based on the Latin alphabet, setting `language_hints` is not
189 # needed. In rare cases, when the language of the text in the image is known,
190 # setting a hint will help get better results (although it will be a
191 # significant hindrance if the hint is wrong). Text detection returns an
192 # error if one or more of the specified languages is not one of the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800193 # [supported languages](/vision/docs/languages).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700194 "A String",
195 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400196 "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
197 "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
198 # of the image. For example, if the desired aspect ratio is 4/3, the
199 # corresponding float value should be 1.33333. If not specified, the
200 # best possible crop is returned. The number of provided aspect ratios is
201 # limited to a maximum of 16; any aspect ratios provided after the 16th are
202 # ignored.
203 3.14,
204 ],
205 },
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700206 },
207 "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
208 "content": "A String", # Image content, represented as a stream of bytes.
Jon Wayne Parrott7d5badb2016-08-16 12:44:29 -0700209 # Note: as with all `bytes` fields, protobuffers use a pure binary
210 # representation, whereas JSON representations use base64.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800211 "source": { # External image source (Google Cloud Storage image location). # Google Cloud Storage image location. If both `content` and `source`
212 # are provided for an image, `content` takes precedence and is
213 # used to perform the image annotation request.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400214 "gcsImageUri": "A String", # NOTE: For new code `image_uri` below is preferred.
215 # Google Cloud Storage image URI, which must be in the following form:
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800216 # `gs://bucket_name/object_name` (for details, see
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400217 # [Google Cloud Storage Request
218 # URIs](https://cloud.google.com/storage/docs/reference-uris)).
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800219 # NOTE: Cloud Storage object versioning is not supported.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400220 "imageUri": "A String", # Image URI which supports:
221 # 1) Google Cloud Storage image URI, which must be in the following form:
222 # `gs://bucket_name/object_name` (for details, see
223 # [Google Cloud Storage Request
224 # URIs](https://cloud.google.com/storage/docs/reference-uris)).
225 # NOTE: Cloud Storage object versioning is not supported.
226 # 2) Publicly accessible image HTTP/HTTPS URL.
227 # This is preferred over the legacy `gcs_image_uri` above. When both
228 # `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
229 # precedence.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700230 },
231 },
232 "features": [ # Requested features.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800233 { # Users describe the type of Google Cloud Vision API tasks to perform over
234 # images by using *Feature*s. Each Feature indicates a type of image
235 # detection task to perform. Features encode the Cloud Vision API
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700236 # vertical to operate on and the number of top-scoring results to return.
237 "type": "A String", # The feature type.
238 "maxResults": 42, # Maximum number of results of this type.
239 },
240 ],
241 },
242 ],
243 }
244
245 x__xgafv: string, V1 error format.
246 Allowed values
247 1 - v1 error format
248 2 - v2 error format
249
250Returns:
251 An object of the form:
252
253 { # Response to a batch image annotation request.
254 "responses": [ # Individual responses to image annotation requests within the batch.
255 { # Response to an image annotation request.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400256 "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
257 # methods over safe-search verticals (for example, adult, spoof, medical,
258 # violence).
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800259 "spoof": "A String", # Spoof likelihood. The likelihood that an modification
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700260 # was made to the image's canonical version to make it appear
261 # funny or offensive.
Thomas Coffee2f245372017-03-27 10:39:26 -0700262 "violence": "A String", # Violence likelihood.
263 "medical": "A String", # Likelihood that this is a medical image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800264 "adult": "A String", # Represents the adult content likelihood for the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700265 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800266 "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700267 { # Set of detected entity features.
268 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800269 # For example, for an image in which the "Eiffel Tower" entity is detected,
270 # this field represents the confidence that there is a tower in the query
271 # image. Range [0, 1].
272 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700273 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800274 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700275 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800276 # image. For example, the relevancy of "tower" is likely higher to an image
277 # containing the detected "Eiffel Tower" than to an image containing a
278 # detected distant towering building, even though the confidence that
279 # there is a tower in each image may be the same. Range [0, 1].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700280 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800281 # `LocationInfo` elements can be present because one location may
282 # indicate the location of the scene in the image, and another location
283 # may indicate the location of the place where the image was taken.
284 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700285 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800286 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700287 # of doubles representing degrees latitude and degrees longitude. Unless
288 # specified otherwise, this must conform to the
289 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
290 # standard</a>. Values must be within normalized ranges.
291 #
292 # Example of normalization code in Python:
293 #
294 # def NormalizeLongitude(longitude):
295 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
296 # q, r = divmod(longitude, 360.0)
297 # if r > 180.0 or (r == 180.0 and q <= -1.0):
298 # return r - 360.0
299 # return r
300 #
301 # def NormalizeLatLng(latitude, longitude):
302 # """Wraps decimal degrees latitude and longitude to
303 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
304 # r = latitude % 360.0
305 # if r <= 90.0:
306 # return r, NormalizeLongitude(longitude)
307 # elif r >= 270.0:
308 # return r - 360, NormalizeLongitude(longitude)
309 # else:
310 # return 180 - r, NormalizeLongitude(longitude + 180.0)
311 #
312 # assert 180.0 == NormalizeLongitude(180.0)
313 # assert -180.0 == NormalizeLongitude(-180.0)
314 # assert -179.0 == NormalizeLongitude(181.0)
315 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
316 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
317 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
318 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
319 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
320 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
321 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
322 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
323 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
324 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400325 #
326 # The code in logs/storage/validator/logs_validator_traits.cc treats this type
327 # as if it were annotated as ST_LOCATION.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700328 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
329 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
330 },
331 },
332 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400333 "mid": "A String", # Opaque entity ID. Some IDs may be available in
334 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700335 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800336 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700337 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
338 # are produced for the entire text detected in an image region, followed by
339 # `boundingPoly`s for each word within the detected text.
340 "vertices": [ # The bounding polygon vertices.
341 { # A vertex represents a 2D point in the image.
342 # NOTE: the vertex coordinates are in the same scale as the original image.
343 "y": 42, # Y coordinate.
344 "x": 42, # X coordinate.
345 },
346 ],
347 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800348 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
349 # fields, such a score or string that qualifies the entity.
350 { # A `Property` consists of a user-supplied name/value pair.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400351 "uint64Value": "A String", # Value of numeric properties.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700352 "name": "A String", # Name of the property.
353 "value": "A String", # Value of the property.
354 },
355 ],
356 },
357 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400358 "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
Sai Cheemalapatie833b792017-03-24 15:06:46 -0700359 "webEntities": [ # Deduced entities from similar images on the Internet.
360 { # Entity deduced from similar images on the Internet.
361 "entityId": "A String", # Opaque entity ID.
362 "score": 3.14, # Overall relevancy score for the entity.
363 # Not normalized and not comparable across different image queries.
364 "description": "A String", # Canonical description of the entity, in English.
365 },
366 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400367 "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
368 { # Metadata for web pages.
369 "url": "A String", # The result web page URL.
370 "score": 3.14, # Overall relevancy score for the web page.
371 # Not normalized and not comparable across different image queries.
372 },
373 ],
Sai Cheemalapatie833b792017-03-24 15:06:46 -0700374 "visuallySimilarImages": [ # The visually similar image results.
375 { # Metadata for online images.
376 "url": "A String", # The result image URL.
377 "score": 3.14, # Overall relevancy score for the image.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400378 # Not normalized and not comparable across different image queries.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400379 },
380 ],
381 "partialMatchingImages": [ # Partial matching images from the Internet.
382 # Those images are similar enough to share some key-point features. For
383 # example an original image will likely have partial matching for its crops.
384 { # Metadata for online images.
385 "url": "A String", # The result image URL.
386 "score": 3.14, # Overall relevancy score for the image.
387 # Not normalized and not comparable across different image queries.
388 },
389 ],
390 "fullMatchingImages": [ # Fully matching images from the Internet.
391 # They're definite neardups and most often a copy of the query image with
392 # merely a size change.
393 { # Metadata for online images.
394 "url": "A String", # The result image URL.
395 "score": 3.14, # Overall relevancy score for the image.
396 # Not normalized and not comparable across different image queries.
397 },
398 ],
399 },
Thomas Coffee2f245372017-03-27 10:39:26 -0700400 "error": { # The `Status` type defines a logical error model that is suitable for different # If set, represents the error message for the operation.
401 # Note that filled-in image annotations are guaranteed to be
402 # correct, even when `error` is set.
403 # programming environments, including REST APIs and RPC APIs. It is used by
404 # [gRPC](https://github.com/grpc). The error model is designed to be:
405 #
406 # - Simple to use and understand for most users
407 # - Flexible enough to meet unexpected needs
408 #
409 # # Overview
410 #
411 # The `Status` message contains three pieces of data: error code, error message,
412 # and error details. The error code should be an enum value of
413 # google.rpc.Code, but it may accept additional error codes if needed. The
414 # error message should be a developer-facing English message that helps
415 # developers *understand* and *resolve* the error. If a localized user-facing
416 # error message is needed, put the localized message in the error details or
417 # localize it in the client. The optional error details may contain arbitrary
418 # information about the error. There is a predefined set of error detail types
419 # in the package `google.rpc` which can be used for common error conditions.
420 #
421 # # Language mapping
422 #
423 # The `Status` message is the logical representation of the error model, but it
424 # is not necessarily the actual wire format. When the `Status` message is
425 # exposed in different client libraries and different wire protocols, it can be
426 # mapped differently. For example, it will likely be mapped to some exceptions
427 # in Java, but more likely mapped to some error codes in C.
428 #
429 # # Other uses
430 #
431 # The error model and the `Status` message can be used in a variety of
432 # environments, either with or without APIs, to provide a
433 # consistent developer experience across different environments.
434 #
435 # Example uses of this error model include:
436 #
437 # - Partial errors. If a service needs to return partial errors to the client,
438 # it may embed the `Status` in the normal response to indicate the partial
439 # errors.
440 #
441 # - Workflow errors. A typical workflow has multiple steps. Each step may
442 # have a `Status` message for error reporting purpose.
443 #
444 # - Batch operations. If a client uses batch request and batch response, the
445 # `Status` message should be used directly inside batch response, one for
446 # each error sub-response.
447 #
448 # - Asynchronous operations. If an API call embeds asynchronous operation
449 # results in its response, the status of those operations should be
450 # represented directly using the `Status` message.
451 #
452 # - Logging. If some API errors are stored in logs, the message `Status` could
453 # be used directly after any stripping needed for security/privacy reasons.
454 "message": "A String", # A developer-facing error message, which should be in English. Any
455 # user-facing error message should be localized and sent in the
456 # google.rpc.Status.details field, or localized by the client.
457 "code": 42, # The status code, which should be an enum value of google.rpc.Code.
458 "details": [ # A list of messages that carry the error details. There will be a
459 # common set of message types for APIs to use.
460 {
461 "a_key": "", # Properties of the object. Contains field @type with type URL.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400462 },
463 ],
464 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800465 "labelAnnotations": [ # If present, label detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700466 { # Set of detected entity features.
467 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800468 # For example, for an image in which the "Eiffel Tower" entity is detected,
469 # this field represents the confidence that there is a tower in the query
470 # image. Range [0, 1].
471 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700472 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800473 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700474 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800475 # image. For example, the relevancy of "tower" is likely higher to an image
476 # containing the detected "Eiffel Tower" than to an image containing a
477 # detected distant towering building, even though the confidence that
478 # there is a tower in each image may be the same. Range [0, 1].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700479 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800480 # `LocationInfo` elements can be present because one location may
481 # indicate the location of the scene in the image, and another location
482 # may indicate the location of the place where the image was taken.
483 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700484 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800485 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700486 # of doubles representing degrees latitude and degrees longitude. Unless
487 # specified otherwise, this must conform to the
488 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
489 # standard</a>. Values must be within normalized ranges.
490 #
491 # Example of normalization code in Python:
492 #
493 # def NormalizeLongitude(longitude):
494 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
495 # q, r = divmod(longitude, 360.0)
496 # if r > 180.0 or (r == 180.0 and q <= -1.0):
497 # return r - 360.0
498 # return r
499 #
500 # def NormalizeLatLng(latitude, longitude):
501 # """Wraps decimal degrees latitude and longitude to
502 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
503 # r = latitude % 360.0
504 # if r <= 90.0:
505 # return r, NormalizeLongitude(longitude)
506 # elif r >= 270.0:
507 # return r - 360, NormalizeLongitude(longitude)
508 # else:
509 # return 180 - r, NormalizeLongitude(longitude + 180.0)
510 #
511 # assert 180.0 == NormalizeLongitude(180.0)
512 # assert -180.0 == NormalizeLongitude(-180.0)
513 # assert -179.0 == NormalizeLongitude(181.0)
514 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
515 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
516 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
517 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
518 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
519 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
520 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
521 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
522 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
523 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400524 #
525 # The code in logs/storage/validator/logs_validator_traits.cc treats this type
526 # as if it were annotated as ST_LOCATION.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700527 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
528 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
529 },
530 },
531 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400532 "mid": "A String", # Opaque entity ID. Some IDs may be available in
533 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700534 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800535 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700536 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
537 # are produced for the entire text detected in an image region, followed by
538 # `boundingPoly`s for each word within the detected text.
539 "vertices": [ # The bounding polygon vertices.
540 { # A vertex represents a 2D point in the image.
541 # NOTE: the vertex coordinates are in the same scale as the original image.
542 "y": 42, # Y coordinate.
543 "x": 42, # X coordinate.
544 },
545 ],
546 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800547 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
548 # fields, such a score or string that qualifies the entity.
549 { # A `Property` consists of a user-supplied name/value pair.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400550 "uint64Value": "A String", # Value of numeric properties.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700551 "name": "A String", # Name of the property.
552 "value": "A String", # Value of the property.
553 },
554 ],
555 },
556 ],
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800557 "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700558 "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800559 "colors": [ # RGB color values with their score and pixel fraction.
560 { # Color information consists of RGB channels, score, and the fraction of
561 # the image that the color occupies in the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700562 "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
563 # for simplicity of conversion to/from color representations in various
564 # languages over compactness; for example, the fields of this representation
565 # can be trivially provided to the constructor of "java.awt.Color" in Java; it
566 # can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
567 # method in iOS; and, with just a little work, it can be easily formatted into
568 # a CSS "rgba()" string in JavaScript, as well. Here are some examples:
569 #
570 # Example (Java):
571 #
572 # import com.google.type.Color;
573 #
574 # // ...
575 # public static java.awt.Color fromProto(Color protocolor) {
576 # float alpha = protocolor.hasAlpha()
577 # ? protocolor.getAlpha().getValue()
578 # : 1.0;
579 #
580 # return new java.awt.Color(
581 # protocolor.getRed(),
582 # protocolor.getGreen(),
583 # protocolor.getBlue(),
584 # alpha);
585 # }
586 #
587 # public static Color toProto(java.awt.Color color) {
588 # float red = (float) color.getRed();
589 # float green = (float) color.getGreen();
590 # float blue = (float) color.getBlue();
591 # float denominator = 255.0;
592 # Color.Builder resultBuilder =
593 # Color
594 # .newBuilder()
595 # .setRed(red / denominator)
596 # .setGreen(green / denominator)
597 # .setBlue(blue / denominator);
598 # int alpha = color.getAlpha();
599 # if (alpha != 255) {
600 # result.setAlpha(
601 # FloatValue
602 # .newBuilder()
603 # .setValue(((float) alpha) / denominator)
604 # .build());
605 # }
606 # return resultBuilder.build();
607 # }
608 # // ...
609 #
610 # Example (iOS / Obj-C):
611 #
612 # // ...
613 # static UIColor* fromProto(Color* protocolor) {
614 # float red = [protocolor red];
615 # float green = [protocolor green];
616 # float blue = [protocolor blue];
617 # FloatValue* alpha_wrapper = [protocolor alpha];
618 # float alpha = 1.0;
619 # if (alpha_wrapper != nil) {
620 # alpha = [alpha_wrapper value];
621 # }
622 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
623 # }
624 #
625 # static Color* toProto(UIColor* color) {
626 # CGFloat red, green, blue, alpha;
627 # if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
628 # return nil;
629 # }
630 # Color* result = [Color alloc] init];
631 # [result setRed:red];
632 # [result setGreen:green];
633 # [result setBlue:blue];
634 # if (alpha <= 0.9999) {
635 # [result setAlpha:floatWrapperWithValue(alpha)];
636 # }
637 # [result autorelease];
638 # return result;
639 # }
640 # // ...
641 #
642 # Example (JavaScript):
643 #
644 # // ...
645 #
646 # var protoToCssColor = function(rgb_color) {
647 # var redFrac = rgb_color.red || 0.0;
648 # var greenFrac = rgb_color.green || 0.0;
649 # var blueFrac = rgb_color.blue || 0.0;
650 # var red = Math.floor(redFrac * 255);
651 # var green = Math.floor(greenFrac * 255);
652 # var blue = Math.floor(blueFrac * 255);
653 #
654 # if (!('alpha' in rgb_color)) {
655 # return rgbToCssColor_(red, green, blue);
656 # }
657 #
658 # var alphaFrac = rgb_color.alpha.value || 0.0;
659 # var rgbParams = [red, green, blue].join(',');
660 # return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
661 # };
662 #
663 # var rgbToCssColor_ = function(red, green, blue) {
664 # var rgbNumber = new Number((red << 16) | (green << 8) | blue);
665 # var hexString = rgbNumber.toString(16);
666 # var missingZeros = 6 - hexString.length;
667 # var resultBuilder = ['#'];
668 # for (var i = 0; i < missingZeros; i++) {
669 # resultBuilder.push('0');
670 # }
671 # resultBuilder.push(hexString);
672 # return resultBuilder.join('');
673 # };
674 #
675 # // ...
676 "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
677 "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
678 # the final pixel color is defined by the equation:
679 #
680 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
681 #
682 # This means that a value of 1.0 corresponds to a solid color, whereas
683 # a value of 0.0 corresponds to a completely transparent color. This
684 # uses a wrapper message rather than a simple float scalar so that it is
685 # possible to distinguish between a default value and the value being unset.
686 # If omitted, this color object is to be rendered as a solid color
687 # (as if the alpha value had been explicitly given with a value of 1.0).
688 "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
689 "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
690 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800691 "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700692 # Value in range [0, 1].
693 "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
694 },
695 ],
696 },
697 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800698 "faceAnnotations": [ # If present, face detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700699 { # A face annotation object contains the results of face detection.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800700 "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
701 # pointing relative to the vertical plane perpendicular to the image. Range
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700702 # [-180,180].
Sai Cheemalapatie833b792017-03-24 15:06:46 -0700703 "sorrowLikelihood": "A String", # Sorrow likelihood.
704 "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
Thomas Coffee2f245372017-03-27 10:39:26 -0700705 "underExposedLikelihood": "A String", # Under-exposed likelihood.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700706 "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
707 "joyLikelihood": "A String", # Joy likelihood.
708 "landmarks": [ # Detected face landmarks.
709 { # A face-specific landmark (for example, a face feature).
710 # Landmark positions may fall outside the bounds of the image
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800711 # if the face is near one or more edges of the image.
712 # Therefore it is NOT guaranteed that `0 <= x < width` or
713 # `0 <= y < height`.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700714 "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
715 # A valid Position must have both x and y coordinates.
716 # The position coordinates are in the same scale as the original image.
717 "y": 3.14, # Y coordinate.
718 "x": 3.14, # X coordinate.
719 "z": 3.14, # Z coordinate (or depth).
720 },
721 "type": "A String", # Face landmark type.
722 },
723 ],
Sai Cheemalapatie833b792017-03-24 15:06:46 -0700724 "surpriseLikelihood": "A String", # Surprise likelihood.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700725 "blurredLikelihood": "A String", # Blurred likelihood.
Thomas Coffee2f245372017-03-27 10:39:26 -0700726 "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
727 # pointing relative to the image's horizontal plane. Range [-180,180].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700728 "angerLikelihood": "A String", # Anger likelihood.
729 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800730 # are in the original image's scale, as returned in `ImageParams`.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700731 # The bounding box is computed to "frame" the face in accordance with human
732 # expectations. It is based on the landmarker results.
733 # Note that one or more x and/or y coordinates may not be generated in the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800734 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
735 # appears in the image to be annotated.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700736 "vertices": [ # The bounding polygon vertices.
737 { # A vertex represents a 2D point in the image.
738 # NOTE: the vertex coordinates are in the same scale as the original image.
739 "y": 42, # Y coordinate.
740 "x": 42, # X coordinate.
741 },
742 ],
743 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800744 "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
745 # of the face relative to the image vertical about the axis perpendicular to
746 # the face. Range [-180,180].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700747 "headwearLikelihood": "A String", # Headwear likelihood.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800748 "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
749 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
750 # is used to eliminate the face from any image analysis that detects the
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700751 # "amount of skin" visible in an image. It is not based on the
752 # landmarker results, only on the initial face detection, hence
753 # the <code>fd</code> (face detection) prefix.
754 "vertices": [ # The bounding polygon vertices.
755 { # A vertex represents a 2D point in the image.
756 # NOTE: the vertex coordinates are in the same scale as the original image.
757 "y": 42, # Y coordinate.
758 "x": 42, # X coordinate.
759 },
760 ],
761 },
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700762 },
763 ],
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800764 "logoAnnotations": [ # If present, logo detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700765 { # Set of detected entity features.
766 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800767 # For example, for an image in which the "Eiffel Tower" entity is detected,
768 # this field represents the confidence that there is a tower in the query
769 # image. Range [0, 1].
770 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700771 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800772 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700773 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800774 # image. For example, the relevancy of "tower" is likely higher to an image
775 # containing the detected "Eiffel Tower" than to an image containing a
776 # detected distant towering building, even though the confidence that
777 # there is a tower in each image may be the same. Range [0, 1].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700778 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800779 # `LocationInfo` elements can be present because one location may
780 # indicate the location of the scene in the image, and another location
781 # may indicate the location of the place where the image was taken.
782 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700783 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800784 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700785 # of doubles representing degrees latitude and degrees longitude. Unless
786 # specified otherwise, this must conform to the
787 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
788 # standard</a>. Values must be within normalized ranges.
789 #
790 # Example of normalization code in Python:
791 #
792 # def NormalizeLongitude(longitude):
793 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
794 # q, r = divmod(longitude, 360.0)
795 # if r > 180.0 or (r == 180.0 and q <= -1.0):
796 # return r - 360.0
797 # return r
798 #
799 # def NormalizeLatLng(latitude, longitude):
800 # """Wraps decimal degrees latitude and longitude to
801 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
802 # r = latitude % 360.0
803 # if r <= 90.0:
804 # return r, NormalizeLongitude(longitude)
805 # elif r >= 270.0:
806 # return r - 360, NormalizeLongitude(longitude)
807 # else:
808 # return 180 - r, NormalizeLongitude(longitude + 180.0)
809 #
810 # assert 180.0 == NormalizeLongitude(180.0)
811 # assert -180.0 == NormalizeLongitude(-180.0)
812 # assert -179.0 == NormalizeLongitude(181.0)
813 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
814 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
815 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
816 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
817 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
818 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
819 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
820 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
821 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
822 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400823 #
824 # The code in logs/storage/validator/logs_validator_traits.cc treats this type
825 # as if it were annotated as ST_LOCATION.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700826 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
827 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
828 },
829 },
830 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400831 "mid": "A String", # Opaque entity ID. Some IDs may be available in
832 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700833 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800834 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700835 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
836 # are produced for the entire text detected in an image region, followed by
837 # `boundingPoly`s for each word within the detected text.
838 "vertices": [ # The bounding polygon vertices.
839 { # A vertex represents a 2D point in the image.
840 # NOTE: the vertex coordinates are in the same scale as the original image.
841 "y": 42, # Y coordinate.
842 "x": 42, # X coordinate.
843 },
844 ],
845 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800846 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
847 # fields, such a score or string that qualifies the entity.
848 { # A `Property` consists of a user-supplied name/value pair.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400849 "uint64Value": "A String", # Value of numeric properties.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700850 "name": "A String", # Name of the property.
851 "value": "A String", # Value of the property.
852 },
853 ],
854 },
855 ],
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800856 "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700857 { # Set of detected entity features.
858 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800859 # For example, for an image in which the "Eiffel Tower" entity is detected,
860 # this field represents the confidence that there is a tower in the query
861 # image. Range [0, 1].
862 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700863 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800864 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700865 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800866 # image. For example, the relevancy of "tower" is likely higher to an image
867 # containing the detected "Eiffel Tower" than to an image containing a
868 # detected distant towering building, even though the confidence that
869 # there is a tower in each image may be the same. Range [0, 1].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700870 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800871 # `LocationInfo` elements can be present because one location may
872 # indicate the location of the scene in the image, and another location
873 # may indicate the location of the place where the image was taken.
874 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700875 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800876 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700877 # of doubles representing degrees latitude and degrees longitude. Unless
878 # specified otherwise, this must conform to the
879 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
880 # standard</a>. Values must be within normalized ranges.
881 #
882 # Example of normalization code in Python:
883 #
884 # def NormalizeLongitude(longitude):
885 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
886 # q, r = divmod(longitude, 360.0)
887 # if r > 180.0 or (r == 180.0 and q <= -1.0):
888 # return r - 360.0
889 # return r
890 #
891 # def NormalizeLatLng(latitude, longitude):
892 # """Wraps decimal degrees latitude and longitude to
893 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
894 # r = latitude % 360.0
895 # if r <= 90.0:
896 # return r, NormalizeLongitude(longitude)
897 # elif r >= 270.0:
898 # return r - 360, NormalizeLongitude(longitude)
899 # else:
900 # return 180 - r, NormalizeLongitude(longitude + 180.0)
901 #
902 # assert 180.0 == NormalizeLongitude(180.0)
903 # assert -180.0 == NormalizeLongitude(-180.0)
904 # assert -179.0 == NormalizeLongitude(181.0)
905 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
906 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
907 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
908 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
909 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
910 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
911 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
912 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
913 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
914 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400915 #
916 # The code in logs/storage/validator/logs_validator_traits.cc treats this type
917 # as if it were annotated as ST_LOCATION.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700918 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
919 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
920 },
921 },
922 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400923 "mid": "A String", # Opaque entity ID. Some IDs may be available in
924 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700925 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800926 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700927 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
928 # are produced for the entire text detected in an image region, followed by
929 # `boundingPoly`s for each word within the detected text.
930 "vertices": [ # The bounding polygon vertices.
931 { # A vertex represents a 2D point in the image.
932 # NOTE: the vertex coordinates are in the same scale as the original image.
933 "y": 42, # Y coordinate.
934 "x": 42, # X coordinate.
935 },
936 ],
937 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800938 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
939 # fields, such a score or string that qualifies the entity.
940 { # A `Property` consists of a user-supplied name/value pair.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400941 "uint64Value": "A String", # Value of numeric properties.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700942 "name": "A String", # Name of the property.
943 "value": "A String", # Value of the property.
944 },
945 ],
946 },
947 ],
Thomas Coffee2f245372017-03-27 10:39:26 -0700948 "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
949 # completed successfully.
950 # This annotation provides the structural hierarchy for the OCR detected
951 # text.
952 # The hierarchy of an OCR extracted text structure is like this:
953 # TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
954 # Each structural component, starting from Page, may further have their own
955 # properties. Properties describe detected languages, breaks etc.. Please
956 # refer to the google.cloud.vision.v1.TextAnnotation.TextProperty message
957 # definition below for more detail.
958 "text": "A String", # UTF-8 text detected on the pages.
959 "pages": [ # List of pages detected by OCR.
960 { # Detected page from OCR.
961 "width": 42, # Page width in pixels.
962 "property": { # Additional information detected on the structural component. # Additional information detected on the page.
963 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
964 "type": "A String", # Detected break type.
965 "isPrefix": True or False, # True if break prepends the element.
966 },
967 "detectedLanguages": [ # A list of detected languages together with confidence.
968 { # Detected language for a structural component.
969 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
970 # information, see
971 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
972 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
973 },
974 ],
975 },
976 "blocks": [ # List of blocks of text, images etc on this page.
977 { # Logical element on the page.
978 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the block.
979 # The vertices are in the order of top-left, top-right, bottom-right,
980 # bottom-left. When a rotation of the bounding box is detected the rotation
981 # is represented as around the top-left corner as defined when the text is
982 # read in the 'natural' orientation.
983 # For example:
984 # * when the text is horizontal it might look like:
985 # 0----1
986 # | |
987 # 3----2
988 # * when it's rotated 180 degrees around the top-left corner it becomes:
989 # 2----3
990 # | |
991 # 1----0
992 # and the vertice order will still be (0, 1, 2, 3).
993 "vertices": [ # The bounding polygon vertices.
994 { # A vertex represents a 2D point in the image.
995 # NOTE: the vertex coordinates are in the same scale as the original image.
996 "y": 42, # Y coordinate.
997 "x": 42, # X coordinate.
998 },
999 ],
1000 },
1001 "blockType": "A String", # Detected block type (text, image etc) for this block.
1002 "property": { # Additional information detected on the structural component. # Additional information detected for the block.
1003 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
1004 "type": "A String", # Detected break type.
1005 "isPrefix": True or False, # True if break prepends the element.
1006 },
1007 "detectedLanguages": [ # A list of detected languages together with confidence.
1008 { # Detected language for a structural component.
1009 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
1010 # information, see
1011 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1012 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
1013 },
1014 ],
1015 },
1016 "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
1017 { # Structural unit of text representing a number of words in certain order.
1018 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
1019 # The vertices are in the order of top-left, top-right, bottom-right,
1020 # bottom-left. When a rotation of the bounding box is detected the rotation
1021 # is represented as around the top-left corner as defined when the text is
1022 # read in the 'natural' orientation.
1023 # For example:
1024 # * when the text is horizontal it might look like:
1025 # 0----1
1026 # | |
1027 # 3----2
1028 # * when it's rotated 180 degrees around the top-left corner it becomes:
1029 # 2----3
1030 # | |
1031 # 1----0
1032 # and the vertice order will still be (0, 1, 2, 3).
1033 "vertices": [ # The bounding polygon vertices.
1034 { # A vertex represents a 2D point in the image.
1035 # NOTE: the vertex coordinates are in the same scale as the original image.
1036 "y": 42, # Y coordinate.
1037 "x": 42, # X coordinate.
1038 },
1039 ],
1040 },
1041 "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
1042 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
1043 "type": "A String", # Detected break type.
1044 "isPrefix": True or False, # True if break prepends the element.
1045 },
1046 "detectedLanguages": [ # A list of detected languages together with confidence.
1047 { # Detected language for a structural component.
1048 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
1049 # information, see
1050 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1051 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
1052 },
1053 ],
1054 },
1055 "words": [ # List of words in this paragraph.
1056 { # A word representation.
1057 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
1058 # The vertices are in the order of top-left, top-right, bottom-right,
1059 # bottom-left. When a rotation of the bounding box is detected the rotation
1060 # is represented as around the top-left corner as defined when the text is
1061 # read in the 'natural' orientation.
1062 # For example:
1063 # * when the text is horizontal it might look like:
1064 # 0----1
1065 # | |
1066 # 3----2
1067 # * when it's rotated 180 degrees around the top-left corner it becomes:
1068 # 2----3
1069 # | |
1070 # 1----0
1071 # and the vertice order will still be (0, 1, 2, 3).
1072 "vertices": [ # The bounding polygon vertices.
1073 { # A vertex represents a 2D point in the image.
1074 # NOTE: the vertex coordinates are in the same scale as the original image.
1075 "y": 42, # Y coordinate.
1076 "x": 42, # X coordinate.
1077 },
1078 ],
1079 },
1080 "symbols": [ # List of symbols in the word.
1081 # The order of the symbols follows the natural reading order.
1082 { # A single symbol representation.
1083 "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
1084 # The vertices are in the order of top-left, top-right, bottom-right,
1085 # bottom-left. When a rotation of the bounding box is detected the rotation
1086 # is represented as around the top-left corner as defined when the text is
1087 # read in the 'natural' orientation.
1088 # For example:
1089 # * when the text is horizontal it might look like:
1090 # 0----1
1091 # | |
1092 # 3----2
1093 # * when it's rotated 180 degrees around the top-left corner it becomes:
1094 # 2----3
1095 # | |
1096 # 1----0
1097 # and the vertice order will still be (0, 1, 2, 3).
1098 "vertices": [ # The bounding polygon vertices.
1099 { # A vertex represents a 2D point in the image.
1100 # NOTE: the vertex coordinates are in the same scale as the original image.
1101 "y": 42, # Y coordinate.
1102 "x": 42, # X coordinate.
1103 },
1104 ],
1105 },
1106 "text": "A String", # The actual UTF-8 representation of the symbol.
1107 "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
1108 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
1109 "type": "A String", # Detected break type.
1110 "isPrefix": True or False, # True if break prepends the element.
1111 },
1112 "detectedLanguages": [ # A list of detected languages together with confidence.
1113 { # Detected language for a structural component.
1114 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
1115 # information, see
1116 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1117 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
1118 },
1119 ],
1120 },
1121 },
1122 ],
1123 "property": { # Additional information detected on the structural component. # Additional information detected for the word.
1124 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
1125 "type": "A String", # Detected break type.
1126 "isPrefix": True or False, # True if break prepends the element.
1127 },
1128 "detectedLanguages": [ # A list of detected languages together with confidence.
1129 { # Detected language for a structural component.
1130 "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
1131 # information, see
1132 # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
1133 "confidence": 3.14, # Confidence of detected language. Range [0, 1].
1134 },
1135 ],
1136 },
1137 },
1138 ],
1139 },
1140 ],
1141 },
1142 ],
1143 "height": 42, # Page height in pixels.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001144 },
1145 ],
1146 },
1147 "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
1148 "cropHints": [ # Crop hint results.
1149 { # Single crop hint that is used to generate a new crop when serving an image.
1150 "confidence": 3.14, # Confidence of this being a salient region. Range [0, 1].
1151 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
1152 # box are in the original image's scale, as returned in `ImageParams`.
1153 "vertices": [ # The bounding polygon vertices.
1154 { # A vertex represents a 2D point in the image.
1155 # NOTE: the vertex coordinates are in the same scale as the original image.
1156 "y": 42, # Y coordinate.
1157 "x": 42, # X coordinate.
1158 },
1159 ],
1160 },
1161 "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
1162 # image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -07001163 },
1164 ],
1165 },
1166 },
1167 ],
1168 }</pre>
1169</div>
1170
1171</body></html>