blob: 7db68f000253c9e622f5841002d275eabe45e269 [file] [log] [blame]
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="vision_v1.html">Google Cloud Vision API</a> . <a href="vision_v1.images.html">images</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p>
79<p class="firstline">Run image detection and annotation for a batch of images.</p>
80<h3>Method Details</h3>
81<div class="method">
82 <code class="details" id="annotate">annotate(body, x__xgafv=None)</code>
83 <pre>Run image detection and annotation for a batch of images.
84
85Args:
86 body: object, The request body. (required)
87 The object takes the form of:
88
89{ # Multiple image annotation requests are batched into a single service call.
90 "requests": [ # Individual image annotation requests for this batch.
91 { # Request for performing Google Cloud Vision API tasks over a user-provided
92 # image, with user-requested features.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -080093 "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
94 "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # lat/long rectangle that specifies the location of the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -070095 "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
96 # of doubles representing degrees latitude and degrees longitude. Unless
97 # specified otherwise, this must conform to the
98 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
99 # standard</a>. Values must be within normalized ranges.
100 #
101 # Example of normalization code in Python:
102 #
103 # def NormalizeLongitude(longitude):
104 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
105 # q, r = divmod(longitude, 360.0)
106 # if r > 180.0 or (r == 180.0 and q <= -1.0):
107 # return r - 360.0
108 # return r
109 #
110 # def NormalizeLatLng(latitude, longitude):
111 # """Wraps decimal degrees latitude and longitude to
112 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
113 # r = latitude % 360.0
114 # if r <= 90.0:
115 # return r, NormalizeLongitude(longitude)
116 # elif r >= 270.0:
117 # return r - 360, NormalizeLongitude(longitude)
118 # else:
119 # return 180 - r, NormalizeLongitude(longitude + 180.0)
120 #
121 # assert 180.0 == NormalizeLongitude(180.0)
122 # assert -180.0 == NormalizeLongitude(-180.0)
123 # assert -179.0 == NormalizeLongitude(181.0)
124 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
125 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
126 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
127 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
128 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
129 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
130 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
131 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
132 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
133 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
134 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
135 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
136 },
137 "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
138 # of doubles representing degrees latitude and degrees longitude. Unless
139 # specified otherwise, this must conform to the
140 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
141 # standard</a>. Values must be within normalized ranges.
142 #
143 # Example of normalization code in Python:
144 #
145 # def NormalizeLongitude(longitude):
146 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
147 # q, r = divmod(longitude, 360.0)
148 # if r > 180.0 or (r == 180.0 and q <= -1.0):
149 # return r - 360.0
150 # return r
151 #
152 # def NormalizeLatLng(latitude, longitude):
153 # """Wraps decimal degrees latitude and longitude to
154 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
155 # r = latitude % 360.0
156 # if r <= 90.0:
157 # return r, NormalizeLongitude(longitude)
158 # elif r >= 270.0:
159 # return r - 360, NormalizeLongitude(longitude)
160 # else:
161 # return 180 - r, NormalizeLongitude(longitude + 180.0)
162 #
163 # assert 180.0 == NormalizeLongitude(180.0)
164 # assert -180.0 == NormalizeLongitude(-180.0)
165 # assert -179.0 == NormalizeLongitude(181.0)
166 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
167 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
168 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
169 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
170 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
171 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
172 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
173 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
174 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
175 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
176 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
177 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
178 },
179 },
180 "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
Jon Wayne Parrott7d5badb2016-08-16 12:44:29 -0700181 # yields the best results since it enables automatic language detection. For
182 # languages based on the Latin alphabet, setting `language_hints` is not
183 # needed. In rare cases, when the language of the text in the image is known,
184 # setting a hint will help get better results (although it will be a
185 # significant hindrance if the hint is wrong). Text detection returns an
186 # error if one or more of the specified languages is not one of the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800187 # [supported languages](/vision/docs/languages).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700188 "A String",
189 ],
190 },
191 "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
192 "content": "A String", # Image content, represented as a stream of bytes.
Jon Wayne Parrott7d5badb2016-08-16 12:44:29 -0700193 # Note: as with all `bytes` fields, protobuffers use a pure binary
194 # representation, whereas JSON representations use base64.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800195 "source": { # External image source (Google Cloud Storage image location). # Google Cloud Storage image location. If both `content` and `source`
196 # are provided for an image, `content` takes precedence and is
197 # used to perform the image annotation request.
198 "gcsImageUri": "A String", # Google Cloud Storage image URI, which must be in the following form:
199 # `gs://bucket_name/object_name` (for details, see
200 # [Google Cloud Storage Request URIs](https://cloud.google.com/storage/docs/reference-uris)).
201 # NOTE: Cloud Storage object versioning is not supported.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700202 },
203 },
204 "features": [ # Requested features.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800205 { # Users describe the type of Google Cloud Vision API tasks to perform over
206 # images by using *Feature*s. Each Feature indicates a type of image
207 # detection task to perform. Features encode the Cloud Vision API
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700208 # vertical to operate on and the number of top-scoring results to return.
209 "type": "A String", # The feature type.
210 "maxResults": 42, # Maximum number of results of this type.
211 },
212 ],
213 },
214 ],
215 }
216
217 x__xgafv: string, V1 error format.
218 Allowed values
219 1 - v1 error format
220 2 - v2 error format
221
222Returns:
223 An object of the form:
224
225 { # Response to a batch image annotation request.
226 "responses": [ # Individual responses to image annotation requests within the batch.
227 { # Response to an image annotation request.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800228 "safeSearchAnnotation": { # If present, safe-search annotation has completed successfully.
229 "medical": "A String", # Likelihood that this is a medical image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700230 "violence": "A String", # Violence likelihood.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800231 "spoof": "A String", # Spoof likelihood. The likelihood that an modification
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700232 # was made to the image's canonical version to make it appear
233 # funny or offensive.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800234 "adult": "A String", # Represents the adult content likelihood for the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700235 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800236 "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700237 { # Set of detected entity features.
238 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800239 # For example, for an image in which the "Eiffel Tower" entity is detected,
240 # this field represents the confidence that there is a tower in the query
241 # image. Range [0, 1].
242 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700243 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800244 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700245 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800246 # image. For example, the relevancy of "tower" is likely higher to an image
247 # containing the detected "Eiffel Tower" than to an image containing a
248 # detected distant towering building, even though the confidence that
249 # there is a tower in each image may be the same. Range [0, 1].
250 "mid": "A String", # Opaque entity ID. Some IDs may be available in
251 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700252 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800253 # `LocationInfo` elements can be present because one location may
254 # indicate the location of the scene in the image, and another location
255 # may indicate the location of the place where the image was taken.
256 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700257 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800258 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700259 # of doubles representing degrees latitude and degrees longitude. Unless
260 # specified otherwise, this must conform to the
261 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
262 # standard</a>. Values must be within normalized ranges.
263 #
264 # Example of normalization code in Python:
265 #
266 # def NormalizeLongitude(longitude):
267 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
268 # q, r = divmod(longitude, 360.0)
269 # if r > 180.0 or (r == 180.0 and q <= -1.0):
270 # return r - 360.0
271 # return r
272 #
273 # def NormalizeLatLng(latitude, longitude):
274 # """Wraps decimal degrees latitude and longitude to
275 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
276 # r = latitude % 360.0
277 # if r <= 90.0:
278 # return r, NormalizeLongitude(longitude)
279 # elif r >= 270.0:
280 # return r - 360, NormalizeLongitude(longitude)
281 # else:
282 # return 180 - r, NormalizeLongitude(longitude + 180.0)
283 #
284 # assert 180.0 == NormalizeLongitude(180.0)
285 # assert -180.0 == NormalizeLongitude(-180.0)
286 # assert -179.0 == NormalizeLongitude(181.0)
287 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
288 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
289 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
290 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
291 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
292 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
293 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
294 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
295 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
296 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
297 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
298 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
299 },
300 },
301 ],
302 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800303 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700304 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
305 # are produced for the entire text detected in an image region, followed by
306 # `boundingPoly`s for each word within the detected text.
307 "vertices": [ # The bounding polygon vertices.
308 { # A vertex represents a 2D point in the image.
309 # NOTE: the vertex coordinates are in the same scale as the original image.
310 "y": 42, # Y coordinate.
311 "x": 42, # X coordinate.
312 },
313 ],
314 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800315 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
316 # fields, such a score or string that qualifies the entity.
317 { # A `Property` consists of a user-supplied name/value pair.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700318 "name": "A String", # Name of the property.
319 "value": "A String", # Value of the property.
320 },
321 ],
322 },
323 ],
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800324 "labelAnnotations": [ # If present, label detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700325 { # Set of detected entity features.
326 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800327 # For example, for an image in which the "Eiffel Tower" entity is detected,
328 # this field represents the confidence that there is a tower in the query
329 # image. Range [0, 1].
330 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700331 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800332 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700333 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800334 # image. For example, the relevancy of "tower" is likely higher to an image
335 # containing the detected "Eiffel Tower" than to an image containing a
336 # detected distant towering building, even though the confidence that
337 # there is a tower in each image may be the same. Range [0, 1].
338 "mid": "A String", # Opaque entity ID. Some IDs may be available in
339 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700340 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800341 # `LocationInfo` elements can be present because one location may
342 # indicate the location of the scene in the image, and another location
343 # may indicate the location of the place where the image was taken.
344 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700345 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800346 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700347 # of doubles representing degrees latitude and degrees longitude. Unless
348 # specified otherwise, this must conform to the
349 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
350 # standard</a>. Values must be within normalized ranges.
351 #
352 # Example of normalization code in Python:
353 #
354 # def NormalizeLongitude(longitude):
355 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
356 # q, r = divmod(longitude, 360.0)
357 # if r > 180.0 or (r == 180.0 and q <= -1.0):
358 # return r - 360.0
359 # return r
360 #
361 # def NormalizeLatLng(latitude, longitude):
362 # """Wraps decimal degrees latitude and longitude to
363 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
364 # r = latitude % 360.0
365 # if r <= 90.0:
366 # return r, NormalizeLongitude(longitude)
367 # elif r >= 270.0:
368 # return r - 360, NormalizeLongitude(longitude)
369 # else:
370 # return 180 - r, NormalizeLongitude(longitude + 180.0)
371 #
372 # assert 180.0 == NormalizeLongitude(180.0)
373 # assert -180.0 == NormalizeLongitude(-180.0)
374 # assert -179.0 == NormalizeLongitude(181.0)
375 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
376 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
377 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
378 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
379 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
380 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
381 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
382 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
383 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
384 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
385 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
386 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
387 },
388 },
389 ],
390 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800391 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700392 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
393 # are produced for the entire text detected in an image region, followed by
394 # `boundingPoly`s for each word within the detected text.
395 "vertices": [ # The bounding polygon vertices.
396 { # A vertex represents a 2D point in the image.
397 # NOTE: the vertex coordinates are in the same scale as the original image.
398 "y": 42, # Y coordinate.
399 "x": 42, # X coordinate.
400 },
401 ],
402 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800403 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
404 # fields, such a score or string that qualifies the entity.
405 { # A `Property` consists of a user-supplied name/value pair.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700406 "name": "A String", # Name of the property.
407 "value": "A String", # Value of the property.
408 },
409 ],
410 },
411 ],
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800412 "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700413 "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800414 "colors": [ # RGB color values with their score and pixel fraction.
415 { # Color information consists of RGB channels, score, and the fraction of
416 # the image that the color occupies in the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700417 "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
418 # for simplicity of conversion to/from color representations in various
419 # languages over compactness; for example, the fields of this representation
420 # can be trivially provided to the constructor of "java.awt.Color" in Java; it
421 # can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
422 # method in iOS; and, with just a little work, it can be easily formatted into
423 # a CSS "rgba()" string in JavaScript, as well. Here are some examples:
424 #
425 # Example (Java):
426 #
427 # import com.google.type.Color;
428 #
429 # // ...
430 # public static java.awt.Color fromProto(Color protocolor) {
431 # float alpha = protocolor.hasAlpha()
432 # ? protocolor.getAlpha().getValue()
433 # : 1.0;
434 #
435 # return new java.awt.Color(
436 # protocolor.getRed(),
437 # protocolor.getGreen(),
438 # protocolor.getBlue(),
439 # alpha);
440 # }
441 #
442 # public static Color toProto(java.awt.Color color) {
443 # float red = (float) color.getRed();
444 # float green = (float) color.getGreen();
445 # float blue = (float) color.getBlue();
446 # float denominator = 255.0;
447 # Color.Builder resultBuilder =
448 # Color
449 # .newBuilder()
450 # .setRed(red / denominator)
451 # .setGreen(green / denominator)
452 # .setBlue(blue / denominator);
453 # int alpha = color.getAlpha();
454 # if (alpha != 255) {
455 # result.setAlpha(
456 # FloatValue
457 # .newBuilder()
458 # .setValue(((float) alpha) / denominator)
459 # .build());
460 # }
461 # return resultBuilder.build();
462 # }
463 # // ...
464 #
465 # Example (iOS / Obj-C):
466 #
467 # // ...
468 # static UIColor* fromProto(Color* protocolor) {
469 # float red = [protocolor red];
470 # float green = [protocolor green];
471 # float blue = [protocolor blue];
472 # FloatValue* alpha_wrapper = [protocolor alpha];
473 # float alpha = 1.0;
474 # if (alpha_wrapper != nil) {
475 # alpha = [alpha_wrapper value];
476 # }
477 # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
478 # }
479 #
480 # static Color* toProto(UIColor* color) {
481 # CGFloat red, green, blue, alpha;
482 # if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
483 # return nil;
484 # }
485 # Color* result = [Color alloc] init];
486 # [result setRed:red];
487 # [result setGreen:green];
488 # [result setBlue:blue];
489 # if (alpha <= 0.9999) {
490 # [result setAlpha:floatWrapperWithValue(alpha)];
491 # }
492 # [result autorelease];
493 # return result;
494 # }
495 # // ...
496 #
497 # Example (JavaScript):
498 #
499 # // ...
500 #
501 # var protoToCssColor = function(rgb_color) {
502 # var redFrac = rgb_color.red || 0.0;
503 # var greenFrac = rgb_color.green || 0.0;
504 # var blueFrac = rgb_color.blue || 0.0;
505 # var red = Math.floor(redFrac * 255);
506 # var green = Math.floor(greenFrac * 255);
507 # var blue = Math.floor(blueFrac * 255);
508 #
509 # if (!('alpha' in rgb_color)) {
510 # return rgbToCssColor_(red, green, blue);
511 # }
512 #
513 # var alphaFrac = rgb_color.alpha.value || 0.0;
514 # var rgbParams = [red, green, blue].join(',');
515 # return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
516 # };
517 #
518 # var rgbToCssColor_ = function(red, green, blue) {
519 # var rgbNumber = new Number((red << 16) | (green << 8) | blue);
520 # var hexString = rgbNumber.toString(16);
521 # var missingZeros = 6 - hexString.length;
522 # var resultBuilder = ['#'];
523 # for (var i = 0; i < missingZeros; i++) {
524 # resultBuilder.push('0');
525 # }
526 # resultBuilder.push(hexString);
527 # return resultBuilder.join('');
528 # };
529 #
530 # // ...
531 "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
532 "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
533 # the final pixel color is defined by the equation:
534 #
535 # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
536 #
537 # This means that a value of 1.0 corresponds to a solid color, whereas
538 # a value of 0.0 corresponds to a completely transparent color. This
539 # uses a wrapper message rather than a simple float scalar so that it is
540 # possible to distinguish between a default value and the value being unset.
541 # If omitted, this color object is to be rendered as a solid color
542 # (as if the alpha value had been explicitly given with a value of 1.0).
543 "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
544 "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
545 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800546 "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700547 # Value in range [0, 1].
548 "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
549 },
550 ],
551 },
552 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800553 "faceAnnotations": [ # If present, face detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700554 { # A face annotation object contains the results of face detection.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800555 "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
556 # pointing relative to the vertical plane perpendicular to the image. Range
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700557 # [-180,180].
558 "sorrowLikelihood": "A String", # Sorrow likelihood.
559 "underExposedLikelihood": "A String", # Under-exposed likelihood.
560 "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
561 "joyLikelihood": "A String", # Joy likelihood.
562 "landmarks": [ # Detected face landmarks.
563 { # A face-specific landmark (for example, a face feature).
564 # Landmark positions may fall outside the bounds of the image
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800565 # if the face is near one or more edges of the image.
566 # Therefore it is NOT guaranteed that `0 <= x < width` or
567 # `0 <= y < height`.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700568 "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
569 # A valid Position must have both x and y coordinates.
570 # The position coordinates are in the same scale as the original image.
571 "y": 3.14, # Y coordinate.
572 "x": 3.14, # X coordinate.
573 "z": 3.14, # Z coordinate (or depth).
574 },
575 "type": "A String", # Face landmark type.
576 },
577 ],
578 "surpriseLikelihood": "A String", # Surprise likelihood.
579 "blurredLikelihood": "A String", # Blurred likelihood.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800580 "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
581 # pointing relative to the image's horizontal plane. Range [-180,180].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700582 "angerLikelihood": "A String", # Anger likelihood.
583 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800584 # are in the original image's scale, as returned in `ImageParams`.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700585 # The bounding box is computed to "frame" the face in accordance with human
586 # expectations. It is based on the landmarker results.
587 # Note that one or more x and/or y coordinates may not be generated in the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800588 # `BoundingPoly` (the polygon will be unbounded) if only a partial face
589 # appears in the image to be annotated.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700590 "vertices": [ # The bounding polygon vertices.
591 { # A vertex represents a 2D point in the image.
592 # NOTE: the vertex coordinates are in the same scale as the original image.
593 "y": 42, # Y coordinate.
594 "x": 42, # X coordinate.
595 },
596 ],
597 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800598 "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
599 # of the face relative to the image vertical about the axis perpendicular to
600 # the face. Range [-180,180].
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700601 "headwearLikelihood": "A String", # Headwear likelihood.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800602 "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
603 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
604 # is used to eliminate the face from any image analysis that detects the
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700605 # "amount of skin" visible in an image. It is not based on the
606 # landmarker results, only on the initial face detection, hence
607 # the <code>fd</code> (face detection) prefix.
608 "vertices": [ # The bounding polygon vertices.
609 { # A vertex represents a 2D point in the image.
610 # NOTE: the vertex coordinates are in the same scale as the original image.
611 "y": 42, # Y coordinate.
612 "x": 42, # X coordinate.
613 },
614 ],
615 },
616 "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
617 },
618 ],
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800619 "logoAnnotations": [ # If present, logo detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700620 { # Set of detected entity features.
621 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800622 # For example, for an image in which the "Eiffel Tower" entity is detected,
623 # this field represents the confidence that there is a tower in the query
624 # image. Range [0, 1].
625 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700626 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800627 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700628 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800629 # image. For example, the relevancy of "tower" is likely higher to an image
630 # containing the detected "Eiffel Tower" than to an image containing a
631 # detected distant towering building, even though the confidence that
632 # there is a tower in each image may be the same. Range [0, 1].
633 "mid": "A String", # Opaque entity ID. Some IDs may be available in
634 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700635 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800636 # `LocationInfo` elements can be present because one location may
637 # indicate the location of the scene in the image, and another location
638 # may indicate the location of the place where the image was taken.
639 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700640 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800641 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700642 # of doubles representing degrees latitude and degrees longitude. Unless
643 # specified otherwise, this must conform to the
644 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
645 # standard</a>. Values must be within normalized ranges.
646 #
647 # Example of normalization code in Python:
648 #
649 # def NormalizeLongitude(longitude):
650 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
651 # q, r = divmod(longitude, 360.0)
652 # if r > 180.0 or (r == 180.0 and q <= -1.0):
653 # return r - 360.0
654 # return r
655 #
656 # def NormalizeLatLng(latitude, longitude):
657 # """Wraps decimal degrees latitude and longitude to
658 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
659 # r = latitude % 360.0
660 # if r <= 90.0:
661 # return r, NormalizeLongitude(longitude)
662 # elif r >= 270.0:
663 # return r - 360, NormalizeLongitude(longitude)
664 # else:
665 # return 180 - r, NormalizeLongitude(longitude + 180.0)
666 #
667 # assert 180.0 == NormalizeLongitude(180.0)
668 # assert -180.0 == NormalizeLongitude(-180.0)
669 # assert -179.0 == NormalizeLongitude(181.0)
670 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
671 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
672 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
673 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
674 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
675 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
676 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
677 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
678 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
679 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
680 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
681 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
682 },
683 },
684 ],
685 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800686 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700687 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
688 # are produced for the entire text detected in an image region, followed by
689 # `boundingPoly`s for each word within the detected text.
690 "vertices": [ # The bounding polygon vertices.
691 { # A vertex represents a 2D point in the image.
692 # NOTE: the vertex coordinates are in the same scale as the original image.
693 "y": 42, # Y coordinate.
694 "x": 42, # X coordinate.
695 },
696 ],
697 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800698 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
699 # fields, such a score or string that qualifies the entity.
700 { # A `Property` consists of a user-supplied name/value pair.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700701 "name": "A String", # Name of the property.
702 "value": "A String", # Value of the property.
703 },
704 ],
705 },
706 ],
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800707 "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700708 { # Set of detected entity features.
709 "confidence": 3.14, # The accuracy of the entity detection in an image.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800710 # For example, for an image in which the "Eiffel Tower" entity is detected,
711 # this field represents the confidence that there is a tower in the query
712 # image. Range [0, 1].
713 "description": "A String", # Entity textual description, expressed in its `locale` language.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700714 "locale": "A String", # The language code for the locale in which the entity textual
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800715 # `description` is expressed.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700716 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800717 # image. For example, the relevancy of "tower" is likely higher to an image
718 # containing the detected "Eiffel Tower" than to an image containing a
719 # detected distant towering building, even though the confidence that
720 # there is a tower in each image may be the same. Range [0, 1].
721 "mid": "A String", # Opaque entity ID. Some IDs may be available in
722 # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700723 "locations": [ # The location information for the detected entity. Multiple
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800724 # `LocationInfo` elements can be present because one location may
725 # indicate the location of the scene in the image, and another location
726 # may indicate the location of the place where the image was taken.
727 # Location information is usually present for landmarks.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700728 { # Detected entity location information.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800729 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700730 # of doubles representing degrees latitude and degrees longitude. Unless
731 # specified otherwise, this must conform to the
732 # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
733 # standard</a>. Values must be within normalized ranges.
734 #
735 # Example of normalization code in Python:
736 #
737 # def NormalizeLongitude(longitude):
738 # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
739 # q, r = divmod(longitude, 360.0)
740 # if r > 180.0 or (r == 180.0 and q <= -1.0):
741 # return r - 360.0
742 # return r
743 #
744 # def NormalizeLatLng(latitude, longitude):
745 # """Wraps decimal degrees latitude and longitude to
746 # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
747 # r = latitude % 360.0
748 # if r <= 90.0:
749 # return r, NormalizeLongitude(longitude)
750 # elif r >= 270.0:
751 # return r - 360, NormalizeLongitude(longitude)
752 # else:
753 # return 180 - r, NormalizeLongitude(longitude + 180.0)
754 #
755 # assert 180.0 == NormalizeLongitude(180.0)
756 # assert -180.0 == NormalizeLongitude(-180.0)
757 # assert -179.0 == NormalizeLongitude(181.0)
758 # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
759 # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
760 # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
761 # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
762 # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
763 # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
764 # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
765 # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
766 # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
767 # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
768 "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
769 "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
770 },
771 },
772 ],
773 "score": 3.14, # Overall score of the result. Range [0, 1].
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800774 "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700775 # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
776 # are produced for the entire text detected in an image region, followed by
777 # `boundingPoly`s for each word within the detected text.
778 "vertices": [ # The bounding polygon vertices.
779 { # A vertex represents a 2D point in the image.
780 # NOTE: the vertex coordinates are in the same scale as the original image.
781 "y": 42, # Y coordinate.
782 "x": 42, # X coordinate.
783 },
784 ],
785 },
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800786 "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
787 # fields, such a score or string that qualifies the entity.
788 { # A `Property` consists of a user-supplied name/value pair.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700789 "name": "A String", # Name of the property.
790 "value": "A String", # Value of the property.
791 },
792 ],
793 },
794 ],
795 "error": { # The `Status` type defines a logical error model that is suitable for different # If set, represents the error message for the operation.
Jon Wayne Parrott692617a2017-01-06 09:58:29 -0800796 # Note that filled-in image annotations are guaranteed to be
797 # correct, even when `error` is set.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700798 # programming environments, including REST APIs and RPC APIs. It is used by
799 # [gRPC](https://github.com/grpc). The error model is designed to be:
800 #
801 # - Simple to use and understand for most users
802 # - Flexible enough to meet unexpected needs
803 #
804 # # Overview
805 #
806 # The `Status` message contains three pieces of data: error code, error message,
807 # and error details. The error code should be an enum value of
808 # google.rpc.Code, but it may accept additional error codes if needed. The
809 # error message should be a developer-facing English message that helps
810 # developers *understand* and *resolve* the error. If a localized user-facing
811 # error message is needed, put the localized message in the error details or
812 # localize it in the client. The optional error details may contain arbitrary
813 # information about the error. There is a predefined set of error detail types
814 # in the package `google.rpc` which can be used for common error conditions.
815 #
816 # # Language mapping
817 #
818 # The `Status` message is the logical representation of the error model, but it
819 # is not necessarily the actual wire format. When the `Status` message is
820 # exposed in different client libraries and different wire protocols, it can be
821 # mapped differently. For example, it will likely be mapped to some exceptions
822 # in Java, but more likely mapped to some error codes in C.
823 #
824 # # Other uses
825 #
826 # The error model and the `Status` message can be used in a variety of
827 # environments, either with or without APIs, to provide a
828 # consistent developer experience across different environments.
829 #
830 # Example uses of this error model include:
831 #
832 # - Partial errors. If a service needs to return partial errors to the client,
833 # it may embed the `Status` in the normal response to indicate the partial
834 # errors.
835 #
836 # - Workflow errors. A typical workflow has multiple steps. Each step may
837 # have a `Status` message for error reporting purpose.
838 #
839 # - Batch operations. If a client uses batch request and batch response, the
840 # `Status` message should be used directly inside batch response, one for
841 # each error sub-response.
842 #
843 # - Asynchronous operations. If an API call embeds asynchronous operation
844 # results in its response, the status of those operations should be
845 # represented directly using the `Status` message.
846 #
847 # - Logging. If some API errors are stored in logs, the message `Status` could
848 # be used directly after any stripping needed for security/privacy reasons.
849 "message": "A String", # A developer-facing error message, which should be in English. Any
850 # user-facing error message should be localized and sent in the
851 # google.rpc.Status.details field, or localized by the client.
852 "code": 42, # The status code, which should be an enum value of google.rpc.Code.
853 "details": [ # A list of messages that carry the error details. There will be a
854 # common set of message types for APIs to use.
855 {
Jon Wayne Parrott7d5badb2016-08-16 12:44:29 -0700856 "a_key": "", # Properties of the object. Contains field @type with type URL.
Jon Wayne Parrott0a471d32016-05-19 10:54:38 -0700857 },
858 ],
859 },
860 },
861 ],
862 }</pre>
863</div>
864
865</body></html>