Jon Wayne Parrott | 0a471d3 | 2016-05-19 10:54:38 -0700 | [diff] [blame] | 1 | <html><body> |
| 2 | <style> |
| 3 | |
| 4 | body, h1, h2, h3, div, span, p, pre, a { |
| 5 | margin: 0; |
| 6 | padding: 0; |
| 7 | border: 0; |
| 8 | font-weight: inherit; |
| 9 | font-style: inherit; |
| 10 | font-size: 100%; |
| 11 | font-family: inherit; |
| 12 | vertical-align: baseline; |
| 13 | } |
| 14 | |
| 15 | body { |
| 16 | font-size: 13px; |
| 17 | padding: 1em; |
| 18 | } |
| 19 | |
| 20 | h1 { |
| 21 | font-size: 26px; |
| 22 | margin-bottom: 1em; |
| 23 | } |
| 24 | |
| 25 | h2 { |
| 26 | font-size: 24px; |
| 27 | margin-bottom: 1em; |
| 28 | } |
| 29 | |
| 30 | h3 { |
| 31 | font-size: 20px; |
| 32 | margin-bottom: 1em; |
| 33 | margin-top: 1em; |
| 34 | } |
| 35 | |
| 36 | pre, code { |
| 37 | line-height: 1.5; |
| 38 | font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace; |
| 39 | } |
| 40 | |
| 41 | pre { |
| 42 | margin-top: 0.5em; |
| 43 | } |
| 44 | |
| 45 | h1, h2, h3, p { |
| 46 | font-family: Arial, sans serif; |
| 47 | } |
| 48 | |
| 49 | h1, h2, h3 { |
| 50 | border-bottom: solid #CCC 1px; |
| 51 | } |
| 52 | |
| 53 | .toc_element { |
| 54 | margin-top: 0.5em; |
| 55 | } |
| 56 | |
| 57 | .firstline { |
| 58 | margin-left: 2 em; |
| 59 | } |
| 60 | |
| 61 | .method { |
| 62 | margin-top: 1em; |
| 63 | border: solid 1px #CCC; |
| 64 | padding: 1em; |
| 65 | background: #EEE; |
| 66 | } |
| 67 | |
| 68 | .details { |
| 69 | font-weight: bold; |
| 70 | font-size: 14px; |
| 71 | } |
| 72 | |
| 73 | </style> |
| 74 | |
| 75 | <h1><a href="vision_v1.html">Google Cloud Vision API</a> . <a href="vision_v1.images.html">images</a></h1> |
| 76 | <h2>Instance Methods</h2> |
| 77 | <p class="toc_element"> |
| 78 | <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p> |
| 79 | <p class="firstline">Run image detection and annotation for a batch of images.</p> |
| 80 | <h3>Method Details</h3> |
| 81 | <div class="method"> |
| 82 | <code class="details" id="annotate">annotate(body, x__xgafv=None)</code> |
| 83 | <pre>Run image detection and annotation for a batch of images. |
| 84 | |
| 85 | Args: |
| 86 | body: object, The request body. (required) |
| 87 | The object takes the form of: |
| 88 | |
| 89 | { # Multiple image annotation requests are batched into a single service call. |
| 90 | "requests": [ # Individual image annotation requests for this batch. |
| 91 | { # Request for performing Google Cloud Vision API tasks over a user-provided |
| 92 | # image, with user-requested features. |
| 93 | "imageContext": { # Image context. # Additional context that may accompany the image. |
| 94 | "latLongRect": { # Rectangle determined by min and max LatLng pairs. # Lat/long rectangle that specifies the location of the image. |
| 95 | "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair. |
| 96 | # of doubles representing degrees latitude and degrees longitude. Unless |
| 97 | # specified otherwise, this must conform to the |
| 98 | # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 |
| 99 | # standard</a>. Values must be within normalized ranges. |
| 100 | # |
| 101 | # Example of normalization code in Python: |
| 102 | # |
| 103 | # def NormalizeLongitude(longitude): |
| 104 | # """Wraps decimal degrees longitude to [-180.0, 180.0].""" |
| 105 | # q, r = divmod(longitude, 360.0) |
| 106 | # if r > 180.0 or (r == 180.0 and q <= -1.0): |
| 107 | # return r - 360.0 |
| 108 | # return r |
| 109 | # |
| 110 | # def NormalizeLatLng(latitude, longitude): |
| 111 | # """Wraps decimal degrees latitude and longitude to |
| 112 | # [-90.0, 90.0] and [-180.0, 180.0], respectively.""" |
| 113 | # r = latitude % 360.0 |
| 114 | # if r <= 90.0: |
| 115 | # return r, NormalizeLongitude(longitude) |
| 116 | # elif r >= 270.0: |
| 117 | # return r - 360, NormalizeLongitude(longitude) |
| 118 | # else: |
| 119 | # return 180 - r, NormalizeLongitude(longitude + 180.0) |
| 120 | # |
| 121 | # assert 180.0 == NormalizeLongitude(180.0) |
| 122 | # assert -180.0 == NormalizeLongitude(-180.0) |
| 123 | # assert -179.0 == NormalizeLongitude(181.0) |
| 124 | # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) |
| 125 | # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) |
| 126 | # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) |
| 127 | # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) |
| 128 | # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) |
| 129 | # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) |
| 130 | # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) |
| 131 | # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) |
| 132 | # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) |
| 133 | # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) |
| 134 | "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. |
| 135 | "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. |
| 136 | }, |
| 137 | "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair. |
| 138 | # of doubles representing degrees latitude and degrees longitude. Unless |
| 139 | # specified otherwise, this must conform to the |
| 140 | # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 |
| 141 | # standard</a>. Values must be within normalized ranges. |
| 142 | # |
| 143 | # Example of normalization code in Python: |
| 144 | # |
| 145 | # def NormalizeLongitude(longitude): |
| 146 | # """Wraps decimal degrees longitude to [-180.0, 180.0].""" |
| 147 | # q, r = divmod(longitude, 360.0) |
| 148 | # if r > 180.0 or (r == 180.0 and q <= -1.0): |
| 149 | # return r - 360.0 |
| 150 | # return r |
| 151 | # |
| 152 | # def NormalizeLatLng(latitude, longitude): |
| 153 | # """Wraps decimal degrees latitude and longitude to |
| 154 | # [-90.0, 90.0] and [-180.0, 180.0], respectively.""" |
| 155 | # r = latitude % 360.0 |
| 156 | # if r <= 90.0: |
| 157 | # return r, NormalizeLongitude(longitude) |
| 158 | # elif r >= 270.0: |
| 159 | # return r - 360, NormalizeLongitude(longitude) |
| 160 | # else: |
| 161 | # return 180 - r, NormalizeLongitude(longitude + 180.0) |
| 162 | # |
| 163 | # assert 180.0 == NormalizeLongitude(180.0) |
| 164 | # assert -180.0 == NormalizeLongitude(-180.0) |
| 165 | # assert -179.0 == NormalizeLongitude(181.0) |
| 166 | # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) |
| 167 | # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) |
| 168 | # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) |
| 169 | # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) |
| 170 | # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) |
| 171 | # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) |
| 172 | # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) |
| 173 | # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) |
| 174 | # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) |
| 175 | # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) |
| 176 | "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. |
| 177 | "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. |
| 178 | }, |
| 179 | }, |
| 180 | "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value |
Jon Wayne Parrott | 7d5badb | 2016-08-16 12:44:29 -0700 | [diff] [blame^] | 181 | # yields the best results since it enables automatic language detection. For |
| 182 | # languages based on the Latin alphabet, setting `language_hints` is not |
| 183 | # needed. In rare cases, when the language of the text in the image is known, |
| 184 | # setting a hint will help get better results (although it will be a |
| 185 | # significant hindrance if the hint is wrong). Text detection returns an |
| 186 | # error if one or more of the specified languages is not one of the |
| 187 | # [supported |
| 188 | # languages](/translate/v2/translate-reference#supported_languages). |
Jon Wayne Parrott | 0a471d3 | 2016-05-19 10:54:38 -0700 | [diff] [blame] | 189 | "A String", |
| 190 | ], |
| 191 | }, |
| 192 | "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed. |
| 193 | "content": "A String", # Image content, represented as a stream of bytes. |
Jon Wayne Parrott | 7d5badb | 2016-08-16 12:44:29 -0700 | [diff] [blame^] | 194 | # Note: as with all `bytes` fields, protobuffers use a pure binary |
| 195 | # representation, whereas JSON representations use base64. |
Jon Wayne Parrott | 0a471d3 | 2016-05-19 10:54:38 -0700 | [diff] [blame] | 196 | "source": { # External image source (Google Cloud Storage image location). # Google Cloud Storage image location. If both 'content' and 'source' |
| 197 | # are filled for an image, 'content' takes precedence and it will be |
| 198 | # used for performing the image annotation request. |
| 199 | "gcsImageUri": "A String", # Google Cloud Storage image URI. It must be in the following form: |
| 200 | # `gs://bucket_name/object_name`. For more |
| 201 | # details, please see: https://cloud.google.com/storage/docs/reference-uris. |
| 202 | # NOTE: Cloud Storage object versioning is not supported! |
| 203 | }, |
| 204 | }, |
| 205 | "features": [ # Requested features. |
| 206 | { # The <em>Feature</em> indicates what type of image detection task to perform. |
| 207 | # Users describe the type of Google Cloud Vision API tasks to perform over |
| 208 | # images by using <em>Feature</em>s. Features encode the Cloud Vision API |
| 209 | # vertical to operate on and the number of top-scoring results to return. |
| 210 | "type": "A String", # The feature type. |
| 211 | "maxResults": 42, # Maximum number of results of this type. |
| 212 | }, |
| 213 | ], |
| 214 | }, |
| 215 | ], |
| 216 | } |
| 217 | |
| 218 | x__xgafv: string, V1 error format. |
| 219 | Allowed values |
| 220 | 1 - v1 error format |
| 221 | 2 - v2 error format |
| 222 | |
| 223 | Returns: |
| 224 | An object of the form: |
| 225 | |
| 226 | { # Response to a batch image annotation request. |
| 227 | "responses": [ # Individual responses to image annotation requests within the batch. |
| 228 | { # Response to an image annotation request. |
| 229 | "safeSearchAnnotation": { # Set of features pertaining to the image, computed by various computer vision # If present, safe-search annotation completed successfully. |
| 230 | # methods over safe-search verticals (for example, adult, spoof, medical, |
| 231 | # violence). |
| 232 | "medical": "A String", # Likelihood this is a medical image. |
| 233 | "violence": "A String", # Violence likelihood. |
| 234 | "spoof": "A String", # Spoof likelihood. The likelihood that an obvious modification |
| 235 | # was made to the image's canonical version to make it appear |
| 236 | # funny or offensive. |
| 237 | "adult": "A String", # Represents the adult contents likelihood for the image. |
| 238 | }, |
| 239 | "textAnnotations": [ # If present, text (OCR) detection completed successfully. |
| 240 | { # Set of detected entity features. |
| 241 | "confidence": 3.14, # The accuracy of the entity detection in an image. |
| 242 | # For example, for an image containing 'Eiffel Tower,' this field represents |
| 243 | # the confidence that there is a tower in the query image. Range [0, 1]. |
| 244 | "description": "A String", # Entity textual description, expressed in its <code>locale</code> language. |
| 245 | "locale": "A String", # The language code for the locale in which the entity textual |
| 246 | # <code>description</code> (next field) is expressed. |
| 247 | "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the |
| 248 | # image. For example, the relevancy of 'tower' to an image containing |
| 249 | # 'Eiffel Tower' is likely higher than an image containing a distant towering |
| 250 | # building, though the confidence that there is a tower may be the same. |
| 251 | # Range [0, 1]. |
| 252 | "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG). |
| 253 | # For more details on KG please see: |
| 254 | # https://developers.google.com/knowledge-graph/ |
| 255 | "locations": [ # The location information for the detected entity. Multiple |
| 256 | # <code>LocationInfo</code> elements can be present since one location may |
| 257 | # indicate the location of the scene in the query image, and another the |
| 258 | # location of the place where the query image was taken. Location information |
| 259 | # is usually present for landmarks. |
| 260 | { # Detected entity location information. |
| 261 | "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates. |
| 262 | # of doubles representing degrees latitude and degrees longitude. Unless |
| 263 | # specified otherwise, this must conform to the |
| 264 | # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 |
| 265 | # standard</a>. Values must be within normalized ranges. |
| 266 | # |
| 267 | # Example of normalization code in Python: |
| 268 | # |
| 269 | # def NormalizeLongitude(longitude): |
| 270 | # """Wraps decimal degrees longitude to [-180.0, 180.0].""" |
| 271 | # q, r = divmod(longitude, 360.0) |
| 272 | # if r > 180.0 or (r == 180.0 and q <= -1.0): |
| 273 | # return r - 360.0 |
| 274 | # return r |
| 275 | # |
| 276 | # def NormalizeLatLng(latitude, longitude): |
| 277 | # """Wraps decimal degrees latitude and longitude to |
| 278 | # [-90.0, 90.0] and [-180.0, 180.0], respectively.""" |
| 279 | # r = latitude % 360.0 |
| 280 | # if r <= 90.0: |
| 281 | # return r, NormalizeLongitude(longitude) |
| 282 | # elif r >= 270.0: |
| 283 | # return r - 360, NormalizeLongitude(longitude) |
| 284 | # else: |
| 285 | # return 180 - r, NormalizeLongitude(longitude + 180.0) |
| 286 | # |
| 287 | # assert 180.0 == NormalizeLongitude(180.0) |
| 288 | # assert -180.0 == NormalizeLongitude(-180.0) |
| 289 | # assert -179.0 == NormalizeLongitude(181.0) |
| 290 | # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) |
| 291 | # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) |
| 292 | # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) |
| 293 | # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) |
| 294 | # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) |
| 295 | # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) |
| 296 | # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) |
| 297 | # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) |
| 298 | # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) |
| 299 | # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) |
| 300 | "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. |
| 301 | "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. |
| 302 | }, |
| 303 | }, |
| 304 | ], |
| 305 | "score": 3.14, # Overall score of the result. Range [0, 1]. |
| 306 | "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently |
| 307 | # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s |
| 308 | # are produced for the entire text detected in an image region, followed by |
| 309 | # `boundingPoly`s for each word within the detected text. |
| 310 | "vertices": [ # The bounding polygon vertices. |
| 311 | { # A vertex represents a 2D point in the image. |
| 312 | # NOTE: the vertex coordinates are in the same scale as the original image. |
| 313 | "y": 42, # Y coordinate. |
| 314 | "x": 42, # X coordinate. |
| 315 | }, |
| 316 | ], |
| 317 | }, |
| 318 | "properties": [ # Some entities can have additional optional <code>Property</code> fields. |
| 319 | # For example a different kind of score or string that qualifies the entity. |
| 320 | { # Arbitrary name/value pair. |
| 321 | "name": "A String", # Name of the property. |
| 322 | "value": "A String", # Value of the property. |
| 323 | }, |
| 324 | ], |
| 325 | }, |
| 326 | ], |
| 327 | "labelAnnotations": [ # If present, label detection completed successfully. |
| 328 | { # Set of detected entity features. |
| 329 | "confidence": 3.14, # The accuracy of the entity detection in an image. |
| 330 | # For example, for an image containing 'Eiffel Tower,' this field represents |
| 331 | # the confidence that there is a tower in the query image. Range [0, 1]. |
| 332 | "description": "A String", # Entity textual description, expressed in its <code>locale</code> language. |
| 333 | "locale": "A String", # The language code for the locale in which the entity textual |
| 334 | # <code>description</code> (next field) is expressed. |
| 335 | "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the |
| 336 | # image. For example, the relevancy of 'tower' to an image containing |
| 337 | # 'Eiffel Tower' is likely higher than an image containing a distant towering |
| 338 | # building, though the confidence that there is a tower may be the same. |
| 339 | # Range [0, 1]. |
| 340 | "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG). |
| 341 | # For more details on KG please see: |
| 342 | # https://developers.google.com/knowledge-graph/ |
| 343 | "locations": [ # The location information for the detected entity. Multiple |
| 344 | # <code>LocationInfo</code> elements can be present since one location may |
| 345 | # indicate the location of the scene in the query image, and another the |
| 346 | # location of the place where the query image was taken. Location information |
| 347 | # is usually present for landmarks. |
| 348 | { # Detected entity location information. |
| 349 | "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates. |
| 350 | # of doubles representing degrees latitude and degrees longitude. Unless |
| 351 | # specified otherwise, this must conform to the |
| 352 | # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 |
| 353 | # standard</a>. Values must be within normalized ranges. |
| 354 | # |
| 355 | # Example of normalization code in Python: |
| 356 | # |
| 357 | # def NormalizeLongitude(longitude): |
| 358 | # """Wraps decimal degrees longitude to [-180.0, 180.0].""" |
| 359 | # q, r = divmod(longitude, 360.0) |
| 360 | # if r > 180.0 or (r == 180.0 and q <= -1.0): |
| 361 | # return r - 360.0 |
| 362 | # return r |
| 363 | # |
| 364 | # def NormalizeLatLng(latitude, longitude): |
| 365 | # """Wraps decimal degrees latitude and longitude to |
| 366 | # [-90.0, 90.0] and [-180.0, 180.0], respectively.""" |
| 367 | # r = latitude % 360.0 |
| 368 | # if r <= 90.0: |
| 369 | # return r, NormalizeLongitude(longitude) |
| 370 | # elif r >= 270.0: |
| 371 | # return r - 360, NormalizeLongitude(longitude) |
| 372 | # else: |
| 373 | # return 180 - r, NormalizeLongitude(longitude + 180.0) |
| 374 | # |
| 375 | # assert 180.0 == NormalizeLongitude(180.0) |
| 376 | # assert -180.0 == NormalizeLongitude(-180.0) |
| 377 | # assert -179.0 == NormalizeLongitude(181.0) |
| 378 | # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) |
| 379 | # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) |
| 380 | # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) |
| 381 | # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) |
| 382 | # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) |
| 383 | # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) |
| 384 | # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) |
| 385 | # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) |
| 386 | # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) |
| 387 | # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) |
| 388 | "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. |
| 389 | "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. |
| 390 | }, |
| 391 | }, |
| 392 | ], |
| 393 | "score": 3.14, # Overall score of the result. Range [0, 1]. |
| 394 | "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently |
| 395 | # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s |
| 396 | # are produced for the entire text detected in an image region, followed by |
| 397 | # `boundingPoly`s for each word within the detected text. |
| 398 | "vertices": [ # The bounding polygon vertices. |
| 399 | { # A vertex represents a 2D point in the image. |
| 400 | # NOTE: the vertex coordinates are in the same scale as the original image. |
| 401 | "y": 42, # Y coordinate. |
| 402 | "x": 42, # X coordinate. |
| 403 | }, |
| 404 | ], |
| 405 | }, |
| 406 | "properties": [ # Some entities can have additional optional <code>Property</code> fields. |
| 407 | # For example a different kind of score or string that qualifies the entity. |
| 408 | { # Arbitrary name/value pair. |
| 409 | "name": "A String", # Name of the property. |
| 410 | "value": "A String", # Value of the property. |
| 411 | }, |
| 412 | ], |
| 413 | }, |
| 414 | ], |
| 415 | "imagePropertiesAnnotation": { # Stores image properties (e.g. dominant colors). # If present, image properties were extracted successfully. |
| 416 | "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully. |
| 417 | "colors": [ # RGB color values, with their score and pixel fraction. |
| 418 | { # Color information consists of RGB channels, score and fraction of |
| 419 | # image the color occupies in the image. |
| 420 | "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color. |
| 421 | # for simplicity of conversion to/from color representations in various |
| 422 | # languages over compactness; for example, the fields of this representation |
| 423 | # can be trivially provided to the constructor of "java.awt.Color" in Java; it |
| 424 | # can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha" |
| 425 | # method in iOS; and, with just a little work, it can be easily formatted into |
| 426 | # a CSS "rgba()" string in JavaScript, as well. Here are some examples: |
| 427 | # |
| 428 | # Example (Java): |
| 429 | # |
| 430 | # import com.google.type.Color; |
| 431 | # |
| 432 | # // ... |
| 433 | # public static java.awt.Color fromProto(Color protocolor) { |
| 434 | # float alpha = protocolor.hasAlpha() |
| 435 | # ? protocolor.getAlpha().getValue() |
| 436 | # : 1.0; |
| 437 | # |
| 438 | # return new java.awt.Color( |
| 439 | # protocolor.getRed(), |
| 440 | # protocolor.getGreen(), |
| 441 | # protocolor.getBlue(), |
| 442 | # alpha); |
| 443 | # } |
| 444 | # |
| 445 | # public static Color toProto(java.awt.Color color) { |
| 446 | # float red = (float) color.getRed(); |
| 447 | # float green = (float) color.getGreen(); |
| 448 | # float blue = (float) color.getBlue(); |
| 449 | # float denominator = 255.0; |
| 450 | # Color.Builder resultBuilder = |
| 451 | # Color |
| 452 | # .newBuilder() |
| 453 | # .setRed(red / denominator) |
| 454 | # .setGreen(green / denominator) |
| 455 | # .setBlue(blue / denominator); |
| 456 | # int alpha = color.getAlpha(); |
| 457 | # if (alpha != 255) { |
| 458 | # result.setAlpha( |
| 459 | # FloatValue |
| 460 | # .newBuilder() |
| 461 | # .setValue(((float) alpha) / denominator) |
| 462 | # .build()); |
| 463 | # } |
| 464 | # return resultBuilder.build(); |
| 465 | # } |
| 466 | # // ... |
| 467 | # |
| 468 | # Example (iOS / Obj-C): |
| 469 | # |
| 470 | # // ... |
| 471 | # static UIColor* fromProto(Color* protocolor) { |
| 472 | # float red = [protocolor red]; |
| 473 | # float green = [protocolor green]; |
| 474 | # float blue = [protocolor blue]; |
| 475 | # FloatValue* alpha_wrapper = [protocolor alpha]; |
| 476 | # float alpha = 1.0; |
| 477 | # if (alpha_wrapper != nil) { |
| 478 | # alpha = [alpha_wrapper value]; |
| 479 | # } |
| 480 | # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; |
| 481 | # } |
| 482 | # |
| 483 | # static Color* toProto(UIColor* color) { |
| 484 | # CGFloat red, green, blue, alpha; |
| 485 | # if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { |
| 486 | # return nil; |
| 487 | # } |
| 488 | # Color* result = [Color alloc] init]; |
| 489 | # [result setRed:red]; |
| 490 | # [result setGreen:green]; |
| 491 | # [result setBlue:blue]; |
| 492 | # if (alpha <= 0.9999) { |
| 493 | # [result setAlpha:floatWrapperWithValue(alpha)]; |
| 494 | # } |
| 495 | # [result autorelease]; |
| 496 | # return result; |
| 497 | # } |
| 498 | # // ... |
| 499 | # |
| 500 | # Example (JavaScript): |
| 501 | # |
| 502 | # // ... |
| 503 | # |
| 504 | # var protoToCssColor = function(rgb_color) { |
| 505 | # var redFrac = rgb_color.red || 0.0; |
| 506 | # var greenFrac = rgb_color.green || 0.0; |
| 507 | # var blueFrac = rgb_color.blue || 0.0; |
| 508 | # var red = Math.floor(redFrac * 255); |
| 509 | # var green = Math.floor(greenFrac * 255); |
| 510 | # var blue = Math.floor(blueFrac * 255); |
| 511 | # |
| 512 | # if (!('alpha' in rgb_color)) { |
| 513 | # return rgbToCssColor_(red, green, blue); |
| 514 | # } |
| 515 | # |
| 516 | # var alphaFrac = rgb_color.alpha.value || 0.0; |
| 517 | # var rgbParams = [red, green, blue].join(','); |
| 518 | # return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); |
| 519 | # }; |
| 520 | # |
| 521 | # var rgbToCssColor_ = function(red, green, blue) { |
| 522 | # var rgbNumber = new Number((red << 16) | (green << 8) | blue); |
| 523 | # var hexString = rgbNumber.toString(16); |
| 524 | # var missingZeros = 6 - hexString.length; |
| 525 | # var resultBuilder = ['#']; |
| 526 | # for (var i = 0; i < missingZeros; i++) { |
| 527 | # resultBuilder.push('0'); |
| 528 | # } |
| 529 | # resultBuilder.push(hexString); |
| 530 | # return resultBuilder.join(''); |
| 531 | # }; |
| 532 | # |
| 533 | # // ... |
| 534 | "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1]. |
| 535 | "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is, |
| 536 | # the final pixel color is defined by the equation: |
| 537 | # |
| 538 | # pixel color = alpha * (this color) + (1.0 - alpha) * (background color) |
| 539 | # |
| 540 | # This means that a value of 1.0 corresponds to a solid color, whereas |
| 541 | # a value of 0.0 corresponds to a completely transparent color. This |
| 542 | # uses a wrapper message rather than a simple float scalar so that it is |
| 543 | # possible to distinguish between a default value and the value being unset. |
| 544 | # If omitted, this color object is to be rendered as a solid color |
| 545 | # (as if the alpha value had been explicitly given with a value of 1.0). |
| 546 | "green": 3.14, # The amount of green in the color as a value in the interval [0, 1]. |
| 547 | "red": 3.14, # The amount of red in the color as a value in the interval [0, 1]. |
| 548 | }, |
| 549 | "pixelFraction": 3.14, # Stores the fraction of pixels the color occupies in the image. |
| 550 | # Value in range [0, 1]. |
| 551 | "score": 3.14, # Image-specific score for this color. Value in range [0, 1]. |
| 552 | }, |
| 553 | ], |
| 554 | }, |
| 555 | }, |
| 556 | "faceAnnotations": [ # If present, face detection completed successfully. |
| 557 | { # A face annotation object contains the results of face detection. |
| 558 | "panAngle": 3.14, # Yaw angle. Indicates the leftward/rightward angle that the face is |
| 559 | # pointing, relative to the vertical plane perpendicular to the image. Range |
| 560 | # [-180,180]. |
| 561 | "sorrowLikelihood": "A String", # Sorrow likelihood. |
| 562 | "underExposedLikelihood": "A String", # Under-exposed likelihood. |
| 563 | "detectionConfidence": 3.14, # Detection confidence. Range [0, 1]. |
| 564 | "joyLikelihood": "A String", # Joy likelihood. |
| 565 | "landmarks": [ # Detected face landmarks. |
| 566 | { # A face-specific landmark (for example, a face feature). |
| 567 | # Landmark positions may fall outside the bounds of the image |
| 568 | # when the face is near one or more edges of the image. |
| 569 | # Therefore it is NOT guaranteed that 0 <= x < width or 0 <= y < height. |
| 570 | "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position. |
| 571 | # A valid Position must have both x and y coordinates. |
| 572 | # The position coordinates are in the same scale as the original image. |
| 573 | "y": 3.14, # Y coordinate. |
| 574 | "x": 3.14, # X coordinate. |
| 575 | "z": 3.14, # Z coordinate (or depth). |
| 576 | }, |
| 577 | "type": "A String", # Face landmark type. |
| 578 | }, |
| 579 | ], |
| 580 | "surpriseLikelihood": "A String", # Surprise likelihood. |
| 581 | "blurredLikelihood": "A String", # Blurred likelihood. |
| 582 | "tiltAngle": 3.14, # Pitch angle. Indicates the upwards/downwards angle that the face is |
| 583 | # pointing |
| 584 | # relative to the image's horizontal plane. Range [-180,180]. |
| 585 | "angerLikelihood": "A String", # Anger likelihood. |
| 586 | "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box |
| 587 | # are in the original image's scale, as returned in ImageParams. |
| 588 | # The bounding box is computed to "frame" the face in accordance with human |
| 589 | # expectations. It is based on the landmarker results. |
| 590 | # Note that one or more x and/or y coordinates may not be generated in the |
| 591 | # BoundingPoly (the polygon will be unbounded) if only a partial face appears in |
| 592 | # the image to be annotated. |
| 593 | "vertices": [ # The bounding polygon vertices. |
| 594 | { # A vertex represents a 2D point in the image. |
| 595 | # NOTE: the vertex coordinates are in the same scale as the original image. |
| 596 | "y": 42, # Y coordinate. |
| 597 | "x": 42, # X coordinate. |
| 598 | }, |
| 599 | ], |
| 600 | }, |
| 601 | "rollAngle": 3.14, # Roll angle. Indicates the amount of clockwise/anti-clockwise rotation of |
| 602 | # the |
| 603 | # face relative to the image vertical, about the axis perpendicular to the |
| 604 | # face. Range [-180,180]. |
| 605 | "headwearLikelihood": "A String", # Headwear likelihood. |
| 606 | "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # This bounding polygon is tighter than the previous |
| 607 | # <code>boundingPoly</code>, and |
| 608 | # encloses only the skin part of the face. Typically, it is used to |
| 609 | # eliminate the face from any image analysis that detects the |
| 610 | # "amount of skin" visible in an image. It is not based on the |
| 611 | # landmarker results, only on the initial face detection, hence |
| 612 | # the <code>fd</code> (face detection) prefix. |
| 613 | "vertices": [ # The bounding polygon vertices. |
| 614 | { # A vertex represents a 2D point in the image. |
| 615 | # NOTE: the vertex coordinates are in the same scale as the original image. |
| 616 | "y": 42, # Y coordinate. |
| 617 | "x": 42, # X coordinate. |
| 618 | }, |
| 619 | ], |
| 620 | }, |
| 621 | "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1]. |
| 622 | }, |
| 623 | ], |
| 624 | "logoAnnotations": [ # If present, logo detection completed successfully. |
| 625 | { # Set of detected entity features. |
| 626 | "confidence": 3.14, # The accuracy of the entity detection in an image. |
| 627 | # For example, for an image containing 'Eiffel Tower,' this field represents |
| 628 | # the confidence that there is a tower in the query image. Range [0, 1]. |
| 629 | "description": "A String", # Entity textual description, expressed in its <code>locale</code> language. |
| 630 | "locale": "A String", # The language code for the locale in which the entity textual |
| 631 | # <code>description</code> (next field) is expressed. |
| 632 | "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the |
| 633 | # image. For example, the relevancy of 'tower' to an image containing |
| 634 | # 'Eiffel Tower' is likely higher than an image containing a distant towering |
| 635 | # building, though the confidence that there is a tower may be the same. |
| 636 | # Range [0, 1]. |
| 637 | "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG). |
| 638 | # For more details on KG please see: |
| 639 | # https://developers.google.com/knowledge-graph/ |
| 640 | "locations": [ # The location information for the detected entity. Multiple |
| 641 | # <code>LocationInfo</code> elements can be present since one location may |
| 642 | # indicate the location of the scene in the query image, and another the |
| 643 | # location of the place where the query image was taken. Location information |
| 644 | # is usually present for landmarks. |
| 645 | { # Detected entity location information. |
| 646 | "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates. |
| 647 | # of doubles representing degrees latitude and degrees longitude. Unless |
| 648 | # specified otherwise, this must conform to the |
| 649 | # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 |
| 650 | # standard</a>. Values must be within normalized ranges. |
| 651 | # |
| 652 | # Example of normalization code in Python: |
| 653 | # |
| 654 | # def NormalizeLongitude(longitude): |
| 655 | # """Wraps decimal degrees longitude to [-180.0, 180.0].""" |
| 656 | # q, r = divmod(longitude, 360.0) |
| 657 | # if r > 180.0 or (r == 180.0 and q <= -1.0): |
| 658 | # return r - 360.0 |
| 659 | # return r |
| 660 | # |
| 661 | # def NormalizeLatLng(latitude, longitude): |
| 662 | # """Wraps decimal degrees latitude and longitude to |
| 663 | # [-90.0, 90.0] and [-180.0, 180.0], respectively.""" |
| 664 | # r = latitude % 360.0 |
| 665 | # if r <= 90.0: |
| 666 | # return r, NormalizeLongitude(longitude) |
| 667 | # elif r >= 270.0: |
| 668 | # return r - 360, NormalizeLongitude(longitude) |
| 669 | # else: |
| 670 | # return 180 - r, NormalizeLongitude(longitude + 180.0) |
| 671 | # |
| 672 | # assert 180.0 == NormalizeLongitude(180.0) |
| 673 | # assert -180.0 == NormalizeLongitude(-180.0) |
| 674 | # assert -179.0 == NormalizeLongitude(181.0) |
| 675 | # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) |
| 676 | # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) |
| 677 | # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) |
| 678 | # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) |
| 679 | # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) |
| 680 | # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) |
| 681 | # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) |
| 682 | # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) |
| 683 | # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) |
| 684 | # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) |
| 685 | "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. |
| 686 | "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. |
| 687 | }, |
| 688 | }, |
| 689 | ], |
| 690 | "score": 3.14, # Overall score of the result. Range [0, 1]. |
| 691 | "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently |
| 692 | # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s |
| 693 | # are produced for the entire text detected in an image region, followed by |
| 694 | # `boundingPoly`s for each word within the detected text. |
| 695 | "vertices": [ # The bounding polygon vertices. |
| 696 | { # A vertex represents a 2D point in the image. |
| 697 | # NOTE: the vertex coordinates are in the same scale as the original image. |
| 698 | "y": 42, # Y coordinate. |
| 699 | "x": 42, # X coordinate. |
| 700 | }, |
| 701 | ], |
| 702 | }, |
| 703 | "properties": [ # Some entities can have additional optional <code>Property</code> fields. |
| 704 | # For example a different kind of score or string that qualifies the entity. |
| 705 | { # Arbitrary name/value pair. |
| 706 | "name": "A String", # Name of the property. |
| 707 | "value": "A String", # Value of the property. |
| 708 | }, |
| 709 | ], |
| 710 | }, |
| 711 | ], |
| 712 | "landmarkAnnotations": [ # If present, landmark detection completed successfully. |
| 713 | { # Set of detected entity features. |
| 714 | "confidence": 3.14, # The accuracy of the entity detection in an image. |
| 715 | # For example, for an image containing 'Eiffel Tower,' this field represents |
| 716 | # the confidence that there is a tower in the query image. Range [0, 1]. |
| 717 | "description": "A String", # Entity textual description, expressed in its <code>locale</code> language. |
| 718 | "locale": "A String", # The language code for the locale in which the entity textual |
| 719 | # <code>description</code> (next field) is expressed. |
| 720 | "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the |
| 721 | # image. For example, the relevancy of 'tower' to an image containing |
| 722 | # 'Eiffel Tower' is likely higher than an image containing a distant towering |
| 723 | # building, though the confidence that there is a tower may be the same. |
| 724 | # Range [0, 1]. |
| 725 | "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG). |
| 726 | # For more details on KG please see: |
| 727 | # https://developers.google.com/knowledge-graph/ |
| 728 | "locations": [ # The location information for the detected entity. Multiple |
| 729 | # <code>LocationInfo</code> elements can be present since one location may |
| 730 | # indicate the location of the scene in the query image, and another the |
| 731 | # location of the place where the query image was taken. Location information |
| 732 | # is usually present for landmarks. |
| 733 | { # Detected entity location information. |
| 734 | "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates. |
| 735 | # of doubles representing degrees latitude and degrees longitude. Unless |
| 736 | # specified otherwise, this must conform to the |
| 737 | # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 |
| 738 | # standard</a>. Values must be within normalized ranges. |
| 739 | # |
| 740 | # Example of normalization code in Python: |
| 741 | # |
| 742 | # def NormalizeLongitude(longitude): |
| 743 | # """Wraps decimal degrees longitude to [-180.0, 180.0].""" |
| 744 | # q, r = divmod(longitude, 360.0) |
| 745 | # if r > 180.0 or (r == 180.0 and q <= -1.0): |
| 746 | # return r - 360.0 |
| 747 | # return r |
| 748 | # |
| 749 | # def NormalizeLatLng(latitude, longitude): |
| 750 | # """Wraps decimal degrees latitude and longitude to |
| 751 | # [-90.0, 90.0] and [-180.0, 180.0], respectively.""" |
| 752 | # r = latitude % 360.0 |
| 753 | # if r <= 90.0: |
| 754 | # return r, NormalizeLongitude(longitude) |
| 755 | # elif r >= 270.0: |
| 756 | # return r - 360, NormalizeLongitude(longitude) |
| 757 | # else: |
| 758 | # return 180 - r, NormalizeLongitude(longitude + 180.0) |
| 759 | # |
| 760 | # assert 180.0 == NormalizeLongitude(180.0) |
| 761 | # assert -180.0 == NormalizeLongitude(-180.0) |
| 762 | # assert -179.0 == NormalizeLongitude(181.0) |
| 763 | # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) |
| 764 | # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) |
| 765 | # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) |
| 766 | # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) |
| 767 | # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) |
| 768 | # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) |
| 769 | # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) |
| 770 | # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) |
| 771 | # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) |
| 772 | # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) |
| 773 | "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. |
| 774 | "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. |
| 775 | }, |
| 776 | }, |
| 777 | ], |
| 778 | "score": 3.14, # Overall score of the result. Range [0, 1]. |
| 779 | "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently |
| 780 | # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s |
| 781 | # are produced for the entire text detected in an image region, followed by |
| 782 | # `boundingPoly`s for each word within the detected text. |
| 783 | "vertices": [ # The bounding polygon vertices. |
| 784 | { # A vertex represents a 2D point in the image. |
| 785 | # NOTE: the vertex coordinates are in the same scale as the original image. |
| 786 | "y": 42, # Y coordinate. |
| 787 | "x": 42, # X coordinate. |
| 788 | }, |
| 789 | ], |
| 790 | }, |
| 791 | "properties": [ # Some entities can have additional optional <code>Property</code> fields. |
| 792 | # For example a different kind of score or string that qualifies the entity. |
| 793 | { # Arbitrary name/value pair. |
| 794 | "name": "A String", # Name of the property. |
| 795 | "value": "A String", # Value of the property. |
| 796 | }, |
| 797 | ], |
| 798 | }, |
| 799 | ], |
| 800 | "error": { # The `Status` type defines a logical error model that is suitable for different # If set, represents the error message for the operation. |
| 801 | # Note that filled-in mage annotations are guaranteed to be |
| 802 | # correct, even when <code>error</code> is non-empty. |
| 803 | # programming environments, including REST APIs and RPC APIs. It is used by |
| 804 | # [gRPC](https://github.com/grpc). The error model is designed to be: |
| 805 | # |
| 806 | # - Simple to use and understand for most users |
| 807 | # - Flexible enough to meet unexpected needs |
| 808 | # |
| 809 | # # Overview |
| 810 | # |
| 811 | # The `Status` message contains three pieces of data: error code, error message, |
| 812 | # and error details. The error code should be an enum value of |
| 813 | # google.rpc.Code, but it may accept additional error codes if needed. The |
| 814 | # error message should be a developer-facing English message that helps |
| 815 | # developers *understand* and *resolve* the error. If a localized user-facing |
| 816 | # error message is needed, put the localized message in the error details or |
| 817 | # localize it in the client. The optional error details may contain arbitrary |
| 818 | # information about the error. There is a predefined set of error detail types |
| 819 | # in the package `google.rpc` which can be used for common error conditions. |
| 820 | # |
| 821 | # # Language mapping |
| 822 | # |
| 823 | # The `Status` message is the logical representation of the error model, but it |
| 824 | # is not necessarily the actual wire format. When the `Status` message is |
| 825 | # exposed in different client libraries and different wire protocols, it can be |
| 826 | # mapped differently. For example, it will likely be mapped to some exceptions |
| 827 | # in Java, but more likely mapped to some error codes in C. |
| 828 | # |
| 829 | # # Other uses |
| 830 | # |
| 831 | # The error model and the `Status` message can be used in a variety of |
| 832 | # environments, either with or without APIs, to provide a |
| 833 | # consistent developer experience across different environments. |
| 834 | # |
| 835 | # Example uses of this error model include: |
| 836 | # |
| 837 | # - Partial errors. If a service needs to return partial errors to the client, |
| 838 | # it may embed the `Status` in the normal response to indicate the partial |
| 839 | # errors. |
| 840 | # |
| 841 | # - Workflow errors. A typical workflow has multiple steps. Each step may |
| 842 | # have a `Status` message for error reporting purpose. |
| 843 | # |
| 844 | # - Batch operations. If a client uses batch request and batch response, the |
| 845 | # `Status` message should be used directly inside batch response, one for |
| 846 | # each error sub-response. |
| 847 | # |
| 848 | # - Asynchronous operations. If an API call embeds asynchronous operation |
| 849 | # results in its response, the status of those operations should be |
| 850 | # represented directly using the `Status` message. |
| 851 | # |
| 852 | # - Logging. If some API errors are stored in logs, the message `Status` could |
| 853 | # be used directly after any stripping needed for security/privacy reasons. |
| 854 | "message": "A String", # A developer-facing error message, which should be in English. Any |
| 855 | # user-facing error message should be localized and sent in the |
| 856 | # google.rpc.Status.details field, or localized by the client. |
| 857 | "code": 42, # The status code, which should be an enum value of google.rpc.Code. |
| 858 | "details": [ # A list of messages that carry the error details. There will be a |
| 859 | # common set of message types for APIs to use. |
| 860 | { |
Jon Wayne Parrott | 7d5badb | 2016-08-16 12:44:29 -0700 | [diff] [blame^] | 861 | "a_key": "", # Properties of the object. Contains field @type with type URL. |
Jon Wayne Parrott | 0a471d3 | 2016-05-19 10:54:38 -0700 | [diff] [blame] | 862 | }, |
| 863 | ], |
| 864 | }, |
| 865 | }, |
| 866 | ], |
| 867 | }</pre> |
| 868 | </div> |
| 869 | |
| 870 | </body></html> |