Update docs
diff --git a/docs/dyn/vision_v1.images.html b/docs/dyn/vision_v1.images.html
new file mode 100644
index 0000000..6a4fded
--- /dev/null
+++ b/docs/dyn/vision_v1.images.html
@@ -0,0 +1,869 @@
+<html><body>
+<style>
+
+body, h1, h2, h3, div, span, p, pre, a {
+ margin: 0;
+ padding: 0;
+ border: 0;
+ font-weight: inherit;
+ font-style: inherit;
+ font-size: 100%;
+ font-family: inherit;
+ vertical-align: baseline;
+}
+
+body {
+ font-size: 13px;
+ padding: 1em;
+}
+
+h1 {
+ font-size: 26px;
+ margin-bottom: 1em;
+}
+
+h2 {
+ font-size: 24px;
+ margin-bottom: 1em;
+}
+
+h3 {
+ font-size: 20px;
+ margin-bottom: 1em;
+ margin-top: 1em;
+}
+
+pre, code {
+ line-height: 1.5;
+ font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
+}
+
+pre {
+ margin-top: 0.5em;
+}
+
+h1, h2, h3, p {
+ font-family: Arial, sans serif;
+}
+
+h1, h2, h3 {
+ border-bottom: solid #CCC 1px;
+}
+
+.toc_element {
+ margin-top: 0.5em;
+}
+
+.firstline {
+ margin-left: 2 em;
+}
+
+.method {
+ margin-top: 1em;
+ border: solid 1px #CCC;
+ padding: 1em;
+ background: #EEE;
+}
+
+.details {
+ font-weight: bold;
+ font-size: 14px;
+}
+
+</style>
+
+<h1><a href="vision_v1.html">Google Cloud Vision API</a> . <a href="vision_v1.images.html">images</a></h1>
+<h2>Instance Methods</h2>
+<p class="toc_element">
+ <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p>
+<p class="firstline">Run image detection and annotation for a batch of images.</p>
+<h3>Method Details</h3>
+<div class="method">
+ <code class="details" id="annotate">annotate(body, x__xgafv=None)</code>
+ <pre>Run image detection and annotation for a batch of images.
+
+Args:
+ body: object, The request body. (required)
+ The object takes the form of:
+
+{ # Multiple image annotation requests are batched into a single service call.
+ "requests": [ # Individual image annotation requests for this batch.
+ { # Request for performing Google Cloud Vision API tasks over a user-provided
+ # image, with user-requested features.
+ "imageContext": { # Image context. # Additional context that may accompany the image.
+ "latLongRect": { # Rectangle determined by min and max LatLng pairs. # Lat/long rectangle that specifies the location of the image.
+ "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
+ # of doubles representing degrees latitude and degrees longitude. Unless
+ # specified otherwise, this must conform to the
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
+ #
+ # Example of normalization code in Python:
+ #
+ # def NormalizeLongitude(longitude):
+ # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+ # q, r = divmod(longitude, 360.0)
+ # if r > 180.0 or (r == 180.0 and q <= -1.0):
+ # return r - 360.0
+ # return r
+ #
+ # def NormalizeLatLng(latitude, longitude):
+ # """Wraps decimal degrees latitude and longitude to
+ # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+ # r = latitude % 360.0
+ # if r <= 90.0:
+ # return r, NormalizeLongitude(longitude)
+ # elif r >= 270.0:
+ # return r - 360, NormalizeLongitude(longitude)
+ # else:
+ # return 180 - r, NormalizeLongitude(longitude + 180.0)
+ #
+ # assert 180.0 == NormalizeLongitude(180.0)
+ # assert -180.0 == NormalizeLongitude(-180.0)
+ # assert -179.0 == NormalizeLongitude(181.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+ # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+ # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+ # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+ # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+ "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
+ # of doubles representing degrees latitude and degrees longitude. Unless
+ # specified otherwise, this must conform to the
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
+ #
+ # Example of normalization code in Python:
+ #
+ # def NormalizeLongitude(longitude):
+ # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+ # q, r = divmod(longitude, 360.0)
+ # if r > 180.0 or (r == 180.0 and q <= -1.0):
+ # return r - 360.0
+ # return r
+ #
+ # def NormalizeLatLng(latitude, longitude):
+ # """Wraps decimal degrees latitude and longitude to
+ # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+ # r = latitude % 360.0
+ # if r <= 90.0:
+ # return r, NormalizeLongitude(longitude)
+ # elif r >= 270.0:
+ # return r - 360, NormalizeLongitude(longitude)
+ # else:
+ # return 180 - r, NormalizeLongitude(longitude + 180.0)
+ #
+ # assert 180.0 == NormalizeLongitude(180.0)
+ # assert -180.0 == NormalizeLongitude(-180.0)
+ # assert -179.0 == NormalizeLongitude(181.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+ # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+ # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+ # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+ # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+ "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ },
+ "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
+ # will yield the best results as it will allow text detection to
+ # automatically detect the text language. For languages based on the latin
+ # alphabet a hint is not needed. In rare cases, when the language of
+ # the text in the image is known in advance, setting this hint will help get
+ # better results (although it will hurt a great deal if the hint is wrong).
+ # Text detection will return an error if one or more of the languages
+ # specified here are not supported. The exact list of supported languages are
+ # specified here:
+ # https://cloud.google.com/translate/v2/using_rest#language-params
+ "A String",
+ ],
+ },
+ "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
+ "content": "A String", # Image content, represented as a stream of bytes.
+ "source": { # External image source (Google Cloud Storage image location). # Google Cloud Storage image location. If both 'content' and 'source'
+ # are filled for an image, 'content' takes precedence and it will be
+ # used for performing the image annotation request.
+ "gcsImageUri": "A String", # Google Cloud Storage image URI. It must be in the following form:
+ # `gs://bucket_name/object_name`. For more
+ # details, please see: https://cloud.google.com/storage/docs/reference-uris.
+ # NOTE: Cloud Storage object versioning is not supported!
+ },
+ },
+ "features": [ # Requested features.
+ { # The <em>Feature</em> indicates what type of image detection task to perform.
+ # Users describe the type of Google Cloud Vision API tasks to perform over
+ # images by using <em>Feature</em>s. Features encode the Cloud Vision API
+ # vertical to operate on and the number of top-scoring results to return.
+ "type": "A String", # The feature type.
+ "maxResults": 42, # Maximum number of results of this type.
+ },
+ ],
+ },
+ ],
+ }
+
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # Response to a batch image annotation request.
+ "responses": [ # Individual responses to image annotation requests within the batch.
+ { # Response to an image annotation request.
+ "safeSearchAnnotation": { # Set of features pertaining to the image, computed by various computer vision # If present, safe-search annotation completed successfully.
+ # methods over safe-search verticals (for example, adult, spoof, medical,
+ # violence).
+ "medical": "A String", # Likelihood this is a medical image.
+ "violence": "A String", # Violence likelihood.
+ "spoof": "A String", # Spoof likelihood. The likelihood that an obvious modification
+ # was made to the image's canonical version to make it appear
+ # funny or offensive.
+ "adult": "A String", # Represents the adult contents likelihood for the image.
+ },
+ "textAnnotations": [ # If present, text (OCR) detection completed successfully.
+ { # Set of detected entity features.
+ "confidence": 3.14, # The accuracy of the entity detection in an image.
+ # For example, for an image containing 'Eiffel Tower,' this field represents
+ # the confidence that there is a tower in the query image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ "locale": "A String", # The language code for the locale in which the entity textual
+ # <code>description</code> (next field) is expressed.
+ "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+ # image. For example, the relevancy of 'tower' to an image containing
+ # 'Eiffel Tower' is likely higher than an image containing a distant towering
+ # building, though the confidence that there is a tower may be the same.
+ # Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
+ # For more details on KG please see:
+ # https://developers.google.com/knowledge-graph/
+ "locations": [ # The location information for the detected entity. Multiple
+ # <code>LocationInfo</code> elements can be present since one location may
+ # indicate the location of the scene in the query image, and another the
+ # location of the place where the query image was taken. Location information
+ # is usually present for landmarks.
+ { # Detected entity location information.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ # of doubles representing degrees latitude and degrees longitude. Unless
+ # specified otherwise, this must conform to the
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
+ #
+ # Example of normalization code in Python:
+ #
+ # def NormalizeLongitude(longitude):
+ # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+ # q, r = divmod(longitude, 360.0)
+ # if r > 180.0 or (r == 180.0 and q <= -1.0):
+ # return r - 360.0
+ # return r
+ #
+ # def NormalizeLatLng(latitude, longitude):
+ # """Wraps decimal degrees latitude and longitude to
+ # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+ # r = latitude % 360.0
+ # if r <= 90.0:
+ # return r, NormalizeLongitude(longitude)
+ # elif r >= 270.0:
+ # return r - 360, NormalizeLongitude(longitude)
+ # else:
+ # return 180 - r, NormalizeLongitude(longitude + 180.0)
+ #
+ # assert 180.0 == NormalizeLongitude(180.0)
+ # assert -180.0 == NormalizeLongitude(-180.0)
+ # assert -179.0 == NormalizeLongitude(181.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+ # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+ # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+ # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+ # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+ "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ },
+ ],
+ "score": 3.14, # Overall score of the result. Range [0, 1].
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
+ # are produced for the entire text detected in an image region, followed by
+ # `boundingPoly`s for each word within the detected text.
+ "vertices": [ # The bounding polygon vertices.
+ { # A vertex represents a 2D point in the image.
+ # NOTE: the vertex coordinates are in the same scale as the original image.
+ "y": 42, # Y coordinate.
+ "x": 42, # X coordinate.
+ },
+ ],
+ },
+ "properties": [ # Some entities can have additional optional <code>Property</code> fields.
+ # For example a different kind of score or string that qualifies the entity.
+ { # Arbitrary name/value pair.
+ "name": "A String", # Name of the property.
+ "value": "A String", # Value of the property.
+ },
+ ],
+ },
+ ],
+ "labelAnnotations": [ # If present, label detection completed successfully.
+ { # Set of detected entity features.
+ "confidence": 3.14, # The accuracy of the entity detection in an image.
+ # For example, for an image containing 'Eiffel Tower,' this field represents
+ # the confidence that there is a tower in the query image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ "locale": "A String", # The language code for the locale in which the entity textual
+ # <code>description</code> (next field) is expressed.
+ "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+ # image. For example, the relevancy of 'tower' to an image containing
+ # 'Eiffel Tower' is likely higher than an image containing a distant towering
+ # building, though the confidence that there is a tower may be the same.
+ # Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
+ # For more details on KG please see:
+ # https://developers.google.com/knowledge-graph/
+ "locations": [ # The location information for the detected entity. Multiple
+ # <code>LocationInfo</code> elements can be present since one location may
+ # indicate the location of the scene in the query image, and another the
+ # location of the place where the query image was taken. Location information
+ # is usually present for landmarks.
+ { # Detected entity location information.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ # of doubles representing degrees latitude and degrees longitude. Unless
+ # specified otherwise, this must conform to the
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
+ #
+ # Example of normalization code in Python:
+ #
+ # def NormalizeLongitude(longitude):
+ # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+ # q, r = divmod(longitude, 360.0)
+ # if r > 180.0 or (r == 180.0 and q <= -1.0):
+ # return r - 360.0
+ # return r
+ #
+ # def NormalizeLatLng(latitude, longitude):
+ # """Wraps decimal degrees latitude and longitude to
+ # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+ # r = latitude % 360.0
+ # if r <= 90.0:
+ # return r, NormalizeLongitude(longitude)
+ # elif r >= 270.0:
+ # return r - 360, NormalizeLongitude(longitude)
+ # else:
+ # return 180 - r, NormalizeLongitude(longitude + 180.0)
+ #
+ # assert 180.0 == NormalizeLongitude(180.0)
+ # assert -180.0 == NormalizeLongitude(-180.0)
+ # assert -179.0 == NormalizeLongitude(181.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+ # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+ # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+ # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+ # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+ "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ },
+ ],
+ "score": 3.14, # Overall score of the result. Range [0, 1].
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
+ # are produced for the entire text detected in an image region, followed by
+ # `boundingPoly`s for each word within the detected text.
+ "vertices": [ # The bounding polygon vertices.
+ { # A vertex represents a 2D point in the image.
+ # NOTE: the vertex coordinates are in the same scale as the original image.
+ "y": 42, # Y coordinate.
+ "x": 42, # X coordinate.
+ },
+ ],
+ },
+ "properties": [ # Some entities can have additional optional <code>Property</code> fields.
+ # For example a different kind of score or string that qualifies the entity.
+ { # Arbitrary name/value pair.
+ "name": "A String", # Name of the property.
+ "value": "A String", # Value of the property.
+ },
+ ],
+ },
+ ],
+ "imagePropertiesAnnotation": { # Stores image properties (e.g. dominant colors). # If present, image properties were extracted successfully.
+ "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
+ "colors": [ # RGB color values, with their score and pixel fraction.
+ { # Color information consists of RGB channels, score and fraction of
+ # image the color occupies in the image.
+ "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
+ # for simplicity of conversion to/from color representations in various
+ # languages over compactness; for example, the fields of this representation
+ # can be trivially provided to the constructor of "java.awt.Color" in Java; it
+ # can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
+ # method in iOS; and, with just a little work, it can be easily formatted into
+ # a CSS "rgba()" string in JavaScript, as well. Here are some examples:
+ #
+ # Example (Java):
+ #
+ # import com.google.type.Color;
+ #
+ # // ...
+ # public static java.awt.Color fromProto(Color protocolor) {
+ # float alpha = protocolor.hasAlpha()
+ # ? protocolor.getAlpha().getValue()
+ # : 1.0;
+ #
+ # return new java.awt.Color(
+ # protocolor.getRed(),
+ # protocolor.getGreen(),
+ # protocolor.getBlue(),
+ # alpha);
+ # }
+ #
+ # public static Color toProto(java.awt.Color color) {
+ # float red = (float) color.getRed();
+ # float green = (float) color.getGreen();
+ # float blue = (float) color.getBlue();
+ # float denominator = 255.0;
+ # Color.Builder resultBuilder =
+ # Color
+ # .newBuilder()
+ # .setRed(red / denominator)
+ # .setGreen(green / denominator)
+ # .setBlue(blue / denominator);
+ # int alpha = color.getAlpha();
+ # if (alpha != 255) {
+ # result.setAlpha(
+ # FloatValue
+ # .newBuilder()
+ # .setValue(((float) alpha) / denominator)
+ # .build());
+ # }
+ # return resultBuilder.build();
+ # }
+ # // ...
+ #
+ # Example (iOS / Obj-C):
+ #
+ # // ...
+ # static UIColor* fromProto(Color* protocolor) {
+ # float red = [protocolor red];
+ # float green = [protocolor green];
+ # float blue = [protocolor blue];
+ # FloatValue* alpha_wrapper = [protocolor alpha];
+ # float alpha = 1.0;
+ # if (alpha_wrapper != nil) {
+ # alpha = [alpha_wrapper value];
+ # }
+ # return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
+ # }
+ #
+ # static Color* toProto(UIColor* color) {
+ # CGFloat red, green, blue, alpha;
+ # if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
+ # return nil;
+ # }
+ # Color* result = [Color alloc] init];
+ # [result setRed:red];
+ # [result setGreen:green];
+ # [result setBlue:blue];
+ # if (alpha <= 0.9999) {
+ # [result setAlpha:floatWrapperWithValue(alpha)];
+ # }
+ # [result autorelease];
+ # return result;
+ # }
+ # // ...
+ #
+ # Example (JavaScript):
+ #
+ # // ...
+ #
+ # var protoToCssColor = function(rgb_color) {
+ # var redFrac = rgb_color.red || 0.0;
+ # var greenFrac = rgb_color.green || 0.0;
+ # var blueFrac = rgb_color.blue || 0.0;
+ # var red = Math.floor(redFrac * 255);
+ # var green = Math.floor(greenFrac * 255);
+ # var blue = Math.floor(blueFrac * 255);
+ #
+ # if (!('alpha' in rgb_color)) {
+ # return rgbToCssColor_(red, green, blue);
+ # }
+ #
+ # var alphaFrac = rgb_color.alpha.value || 0.0;
+ # var rgbParams = [red, green, blue].join(',');
+ # return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
+ # };
+ #
+ # var rgbToCssColor_ = function(red, green, blue) {
+ # var rgbNumber = new Number((red << 16) | (green << 8) | blue);
+ # var hexString = rgbNumber.toString(16);
+ # var missingZeros = 6 - hexString.length;
+ # var resultBuilder = ['#'];
+ # for (var i = 0; i < missingZeros; i++) {
+ # resultBuilder.push('0');
+ # }
+ # resultBuilder.push(hexString);
+ # return resultBuilder.join('');
+ # };
+ #
+ # // ...
+ "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
+ "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
+ # the final pixel color is defined by the equation:
+ #
+ # pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
+ #
+ # This means that a value of 1.0 corresponds to a solid color, whereas
+ # a value of 0.0 corresponds to a completely transparent color. This
+ # uses a wrapper message rather than a simple float scalar so that it is
+ # possible to distinguish between a default value and the value being unset.
+ # If omitted, this color object is to be rendered as a solid color
+ # (as if the alpha value had been explicitly given with a value of 1.0).
+ "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
+ "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
+ },
+ "pixelFraction": 3.14, # Stores the fraction of pixels the color occupies in the image.
+ # Value in range [0, 1].
+ "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
+ },
+ ],
+ },
+ },
+ "faceAnnotations": [ # If present, face detection completed successfully.
+ { # A face annotation object contains the results of face detection.
+ "panAngle": 3.14, # Yaw angle. Indicates the leftward/rightward angle that the face is
+ # pointing, relative to the vertical plane perpendicular to the image. Range
+ # [-180,180].
+ "sorrowLikelihood": "A String", # Sorrow likelihood.
+ "underExposedLikelihood": "A String", # Under-exposed likelihood.
+ "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
+ "joyLikelihood": "A String", # Joy likelihood.
+ "landmarks": [ # Detected face landmarks.
+ { # A face-specific landmark (for example, a face feature).
+ # Landmark positions may fall outside the bounds of the image
+ # when the face is near one or more edges of the image.
+ # Therefore it is NOT guaranteed that 0 <= x < width or 0 <= y < height.
+ "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
+ # A valid Position must have both x and y coordinates.
+ # The position coordinates are in the same scale as the original image.
+ "y": 3.14, # Y coordinate.
+ "x": 3.14, # X coordinate.
+ "z": 3.14, # Z coordinate (or depth).
+ },
+ "type": "A String", # Face landmark type.
+ },
+ ],
+ "surpriseLikelihood": "A String", # Surprise likelihood.
+ "blurredLikelihood": "A String", # Blurred likelihood.
+ "tiltAngle": 3.14, # Pitch angle. Indicates the upwards/downwards angle that the face is
+ # pointing
+ # relative to the image's horizontal plane. Range [-180,180].
+ "angerLikelihood": "A String", # Anger likelihood.
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
+ # are in the original image's scale, as returned in ImageParams.
+ # The bounding box is computed to "frame" the face in accordance with human
+ # expectations. It is based on the landmarker results.
+ # Note that one or more x and/or y coordinates may not be generated in the
+ # BoundingPoly (the polygon will be unbounded) if only a partial face appears in
+ # the image to be annotated.
+ "vertices": [ # The bounding polygon vertices.
+ { # A vertex represents a 2D point in the image.
+ # NOTE: the vertex coordinates are in the same scale as the original image.
+ "y": 42, # Y coordinate.
+ "x": 42, # X coordinate.
+ },
+ ],
+ },
+ "rollAngle": 3.14, # Roll angle. Indicates the amount of clockwise/anti-clockwise rotation of
+ # the
+ # face relative to the image vertical, about the axis perpendicular to the
+ # face. Range [-180,180].
+ "headwearLikelihood": "A String", # Headwear likelihood.
+ "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # This bounding polygon is tighter than the previous
+ # <code>boundingPoly</code>, and
+ # encloses only the skin part of the face. Typically, it is used to
+ # eliminate the face from any image analysis that detects the
+ # "amount of skin" visible in an image. It is not based on the
+ # landmarker results, only on the initial face detection, hence
+ # the <code>fd</code> (face detection) prefix.
+ "vertices": [ # The bounding polygon vertices.
+ { # A vertex represents a 2D point in the image.
+ # NOTE: the vertex coordinates are in the same scale as the original image.
+ "y": 42, # Y coordinate.
+ "x": 42, # X coordinate.
+ },
+ ],
+ },
+ "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
+ },
+ ],
+ "logoAnnotations": [ # If present, logo detection completed successfully.
+ { # Set of detected entity features.
+ "confidence": 3.14, # The accuracy of the entity detection in an image.
+ # For example, for an image containing 'Eiffel Tower,' this field represents
+ # the confidence that there is a tower in the query image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ "locale": "A String", # The language code for the locale in which the entity textual
+ # <code>description</code> (next field) is expressed.
+ "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+ # image. For example, the relevancy of 'tower' to an image containing
+ # 'Eiffel Tower' is likely higher than an image containing a distant towering
+ # building, though the confidence that there is a tower may be the same.
+ # Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
+ # For more details on KG please see:
+ # https://developers.google.com/knowledge-graph/
+ "locations": [ # The location information for the detected entity. Multiple
+ # <code>LocationInfo</code> elements can be present since one location may
+ # indicate the location of the scene in the query image, and another the
+ # location of the place where the query image was taken. Location information
+ # is usually present for landmarks.
+ { # Detected entity location information.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ # of doubles representing degrees latitude and degrees longitude. Unless
+ # specified otherwise, this must conform to the
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
+ #
+ # Example of normalization code in Python:
+ #
+ # def NormalizeLongitude(longitude):
+ # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+ # q, r = divmod(longitude, 360.0)
+ # if r > 180.0 or (r == 180.0 and q <= -1.0):
+ # return r - 360.0
+ # return r
+ #
+ # def NormalizeLatLng(latitude, longitude):
+ # """Wraps decimal degrees latitude and longitude to
+ # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+ # r = latitude % 360.0
+ # if r <= 90.0:
+ # return r, NormalizeLongitude(longitude)
+ # elif r >= 270.0:
+ # return r - 360, NormalizeLongitude(longitude)
+ # else:
+ # return 180 - r, NormalizeLongitude(longitude + 180.0)
+ #
+ # assert 180.0 == NormalizeLongitude(180.0)
+ # assert -180.0 == NormalizeLongitude(-180.0)
+ # assert -179.0 == NormalizeLongitude(181.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+ # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+ # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+ # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+ # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+ "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ },
+ ],
+ "score": 3.14, # Overall score of the result. Range [0, 1].
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
+ # are produced for the entire text detected in an image region, followed by
+ # `boundingPoly`s for each word within the detected text.
+ "vertices": [ # The bounding polygon vertices.
+ { # A vertex represents a 2D point in the image.
+ # NOTE: the vertex coordinates are in the same scale as the original image.
+ "y": 42, # Y coordinate.
+ "x": 42, # X coordinate.
+ },
+ ],
+ },
+ "properties": [ # Some entities can have additional optional <code>Property</code> fields.
+ # For example a different kind of score or string that qualifies the entity.
+ { # Arbitrary name/value pair.
+ "name": "A String", # Name of the property.
+ "value": "A String", # Value of the property.
+ },
+ ],
+ },
+ ],
+ "landmarkAnnotations": [ # If present, landmark detection completed successfully.
+ { # Set of detected entity features.
+ "confidence": 3.14, # The accuracy of the entity detection in an image.
+ # For example, for an image containing 'Eiffel Tower,' this field represents
+ # the confidence that there is a tower in the query image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ "locale": "A String", # The language code for the locale in which the entity textual
+ # <code>description</code> (next field) is expressed.
+ "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+ # image. For example, the relevancy of 'tower' to an image containing
+ # 'Eiffel Tower' is likely higher than an image containing a distant towering
+ # building, though the confidence that there is a tower may be the same.
+ # Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
+ # For more details on KG please see:
+ # https://developers.google.com/knowledge-graph/
+ "locations": [ # The location information for the detected entity. Multiple
+ # <code>LocationInfo</code> elements can be present since one location may
+ # indicate the location of the scene in the query image, and another the
+ # location of the place where the query image was taken. Location information
+ # is usually present for landmarks.
+ { # Detected entity location information.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ # of doubles representing degrees latitude and degrees longitude. Unless
+ # specified otherwise, this must conform to the
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
+ #
+ # Example of normalization code in Python:
+ #
+ # def NormalizeLongitude(longitude):
+ # """Wraps decimal degrees longitude to [-180.0, 180.0]."""
+ # q, r = divmod(longitude, 360.0)
+ # if r > 180.0 or (r == 180.0 and q <= -1.0):
+ # return r - 360.0
+ # return r
+ #
+ # def NormalizeLatLng(latitude, longitude):
+ # """Wraps decimal degrees latitude and longitude to
+ # [-90.0, 90.0] and [-180.0, 180.0], respectively."""
+ # r = latitude % 360.0
+ # if r <= 90.0:
+ # return r, NormalizeLongitude(longitude)
+ # elif r >= 270.0:
+ # return r - 360, NormalizeLongitude(longitude)
+ # else:
+ # return 180 - r, NormalizeLongitude(longitude + 180.0)
+ #
+ # assert 180.0 == NormalizeLongitude(180.0)
+ # assert -180.0 == NormalizeLongitude(-180.0)
+ # assert -179.0 == NormalizeLongitude(181.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0)
+ # assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0)
+ # assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0)
+ # assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0)
+ # assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0)
+ # assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0)
+ # assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0)
+ # assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0)
+ "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+ "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+ },
+ },
+ ],
+ "score": 3.14, # Overall score of the result. Range [0, 1].
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ # for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
+ # are produced for the entire text detected in an image region, followed by
+ # `boundingPoly`s for each word within the detected text.
+ "vertices": [ # The bounding polygon vertices.
+ { # A vertex represents a 2D point in the image.
+ # NOTE: the vertex coordinates are in the same scale as the original image.
+ "y": 42, # Y coordinate.
+ "x": 42, # X coordinate.
+ },
+ ],
+ },
+ "properties": [ # Some entities can have additional optional <code>Property</code> fields.
+ # For example a different kind of score or string that qualifies the entity.
+ { # Arbitrary name/value pair.
+ "name": "A String", # Name of the property.
+ "value": "A String", # Value of the property.
+ },
+ ],
+ },
+ ],
+ "error": { # The `Status` type defines a logical error model that is suitable for different # If set, represents the error message for the operation.
+ # Note that filled-in mage annotations are guaranteed to be
+ # correct, even when <code>error</code> is non-empty.
+ # programming environments, including REST APIs and RPC APIs. It is used by
+ # [gRPC](https://github.com/grpc). The error model is designed to be:
+ #
+ # - Simple to use and understand for most users
+ # - Flexible enough to meet unexpected needs
+ #
+ # # Overview
+ #
+ # The `Status` message contains three pieces of data: error code, error message,
+ # and error details. The error code should be an enum value of
+ # google.rpc.Code, but it may accept additional error codes if needed. The
+ # error message should be a developer-facing English message that helps
+ # developers *understand* and *resolve* the error. If a localized user-facing
+ # error message is needed, put the localized message in the error details or
+ # localize it in the client. The optional error details may contain arbitrary
+ # information about the error. There is a predefined set of error detail types
+ # in the package `google.rpc` which can be used for common error conditions.
+ #
+ # # Language mapping
+ #
+ # The `Status` message is the logical representation of the error model, but it
+ # is not necessarily the actual wire format. When the `Status` message is
+ # exposed in different client libraries and different wire protocols, it can be
+ # mapped differently. For example, it will likely be mapped to some exceptions
+ # in Java, but more likely mapped to some error codes in C.
+ #
+ # # Other uses
+ #
+ # The error model and the `Status` message can be used in a variety of
+ # environments, either with or without APIs, to provide a
+ # consistent developer experience across different environments.
+ #
+ # Example uses of this error model include:
+ #
+ # - Partial errors. If a service needs to return partial errors to the client,
+ # it may embed the `Status` in the normal response to indicate the partial
+ # errors.
+ #
+ # - Workflow errors. A typical workflow has multiple steps. Each step may
+ # have a `Status` message for error reporting purpose.
+ #
+ # - Batch operations. If a client uses batch request and batch response, the
+ # `Status` message should be used directly inside batch response, one for
+ # each error sub-response.
+ #
+ # - Asynchronous operations. If an API call embeds asynchronous operation
+ # results in its response, the status of those operations should be
+ # represented directly using the `Status` message.
+ #
+ # - Logging. If some API errors are stored in logs, the message `Status` could
+ # be used directly after any stripping needed for security/privacy reasons.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "details": [ # A list of messages that carry the error details. There will be a
+ # common set of message types for APIs to use.
+ {
+ "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ },
+ ],
+ },
+ },
+ ],
+ }</pre>
+</div>
+
+</body></html>
\ No newline at end of file