Release v1.6.0 (#324)
* Update version and changelog for v1.6.0
* Update docs
diff --git a/docs/dyn/vision_v1.images.html b/docs/dyn/vision_v1.images.html
index 7f5d55a..7db68f0 100644
--- a/docs/dyn/vision_v1.images.html
+++ b/docs/dyn/vision_v1.images.html
@@ -90,8 +90,8 @@
"requests": [ # Individual image annotation requests for this batch.
{ # Request for performing Google Cloud Vision API tasks over a user-provided
# image, with user-requested features.
- "imageContext": { # Image context. # Additional context that may accompany the image.
- "latLongRect": { # Rectangle determined by min and max LatLng pairs. # Lat/long rectangle that specifies the location of the image.
+ "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
+ "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # lat/long rectangle that specifies the location of the image.
"minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
@@ -184,8 +184,7 @@
# setting a hint will help get better results (although it will be a
# significant hindrance if the hint is wrong). Text detection returns an
# error if one or more of the specified languages is not one of the
- # [supported
- # languages](/translate/v2/translate-reference#supported_languages).
+ # [supported languages](/vision/docs/languages).
"A String",
],
},
@@ -193,19 +192,19 @@
"content": "A String", # Image content, represented as a stream of bytes.
# Note: as with all `bytes` fields, protobuffers use a pure binary
# representation, whereas JSON representations use base64.
- "source": { # External image source (Google Cloud Storage image location). # Google Cloud Storage image location. If both 'content' and 'source'
- # are filled for an image, 'content' takes precedence and it will be
- # used for performing the image annotation request.
- "gcsImageUri": "A String", # Google Cloud Storage image URI. It must be in the following form:
- # `gs://bucket_name/object_name`. For more
- # details, please see: https://cloud.google.com/storage/docs/reference-uris.
- # NOTE: Cloud Storage object versioning is not supported!
+ "source": { # External image source (Google Cloud Storage image location). # Google Cloud Storage image location. If both `content` and `source`
+ # are provided for an image, `content` takes precedence and is
+ # used to perform the image annotation request.
+ "gcsImageUri": "A String", # Google Cloud Storage image URI, which must be in the following form:
+ # `gs://bucket_name/object_name` (for details, see
+ # [Google Cloud Storage Request URIs](https://cloud.google.com/storage/docs/reference-uris)).
+ # NOTE: Cloud Storage object versioning is not supported.
},
},
"features": [ # Requested features.
- { # The <em>Feature</em> indicates what type of image detection task to perform.
- # Users describe the type of Google Cloud Vision API tasks to perform over
- # images by using <em>Feature</em>s. Features encode the Cloud Vision API
+ { # Users describe the type of Google Cloud Vision API tasks to perform over
+ # images by using *Feature*s. Each Feature indicates a type of image
+ # detection task to perform. Features encode the Cloud Vision API
# vertical to operate on and the number of top-scoring results to return.
"type": "A String", # The feature type.
"maxResults": 42, # Maximum number of results of this type.
@@ -226,39 +225,37 @@
{ # Response to a batch image annotation request.
"responses": [ # Individual responses to image annotation requests within the batch.
{ # Response to an image annotation request.
- "safeSearchAnnotation": { # Set of features pertaining to the image, computed by various computer vision # If present, safe-search annotation completed successfully.
- # methods over safe-search verticals (for example, adult, spoof, medical,
- # violence).
- "medical": "A String", # Likelihood this is a medical image.
+ "safeSearchAnnotation": { # If present, safe-search annotation has completed successfully.
+ "medical": "A String", # Likelihood that this is a medical image.
"violence": "A String", # Violence likelihood.
- "spoof": "A String", # Spoof likelihood. The likelihood that an obvious modification
+ "spoof": "A String", # Spoof likelihood. The likelihood that an modification
# was made to the image's canonical version to make it appear
# funny or offensive.
- "adult": "A String", # Represents the adult contents likelihood for the image.
+ "adult": "A String", # Represents the adult content likelihood for the image.
},
- "textAnnotations": [ # If present, text (OCR) detection completed successfully.
+ "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
{ # Set of detected entity features.
"confidence": 3.14, # The accuracy of the entity detection in an image.
- # For example, for an image containing 'Eiffel Tower,' this field represents
- # the confidence that there is a tower in the query image. Range [0, 1].
- "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ # For example, for an image in which the "Eiffel Tower" entity is detected,
+ # this field represents the confidence that there is a tower in the query
+ # image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its `locale` language.
"locale": "A String", # The language code for the locale in which the entity textual
- # <code>description</code> (next field) is expressed.
+ # `description` is expressed.
"topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
- # image. For example, the relevancy of 'tower' to an image containing
- # 'Eiffel Tower' is likely higher than an image containing a distant towering
- # building, though the confidence that there is a tower may be the same.
- # Range [0, 1].
- "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
- # For more details on KG please see:
- # https://developers.google.com/knowledge-graph/
+ # image. For example, the relevancy of "tower" is likely higher to an image
+ # containing the detected "Eiffel Tower" than to an image containing a
+ # detected distant towering building, even though the confidence that
+ # there is a tower in each image may be the same. Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs may be available in
+ # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
"locations": [ # The location information for the detected entity. Multiple
- # <code>LocationInfo</code> elements can be present since one location may
- # indicate the location of the scene in the query image, and another the
- # location of the place where the query image was taken. Location information
- # is usually present for landmarks.
+ # `LocationInfo` elements can be present because one location may
+ # indicate the location of the scene in the image, and another location
+ # may indicate the location of the place where the image was taken.
+ # Location information is usually present for landmarks.
{ # Detected entity location information.
- "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
# <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
@@ -303,7 +300,7 @@
},
],
"score": 3.14, # Overall score of the result. Range [0, 1].
- "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
# for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
# are produced for the entire text detected in an image region, followed by
# `boundingPoly`s for each word within the detected text.
@@ -315,38 +312,38 @@
},
],
},
- "properties": [ # Some entities can have additional optional <code>Property</code> fields.
- # For example a different kind of score or string that qualifies the entity.
- { # Arbitrary name/value pair.
+ "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+ # fields, such a score or string that qualifies the entity.
+ { # A `Property` consists of a user-supplied name/value pair.
"name": "A String", # Name of the property.
"value": "A String", # Value of the property.
},
],
},
],
- "labelAnnotations": [ # If present, label detection completed successfully.
+ "labelAnnotations": [ # If present, label detection has completed successfully.
{ # Set of detected entity features.
"confidence": 3.14, # The accuracy of the entity detection in an image.
- # For example, for an image containing 'Eiffel Tower,' this field represents
- # the confidence that there is a tower in the query image. Range [0, 1].
- "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ # For example, for an image in which the "Eiffel Tower" entity is detected,
+ # this field represents the confidence that there is a tower in the query
+ # image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its `locale` language.
"locale": "A String", # The language code for the locale in which the entity textual
- # <code>description</code> (next field) is expressed.
+ # `description` is expressed.
"topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
- # image. For example, the relevancy of 'tower' to an image containing
- # 'Eiffel Tower' is likely higher than an image containing a distant towering
- # building, though the confidence that there is a tower may be the same.
- # Range [0, 1].
- "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
- # For more details on KG please see:
- # https://developers.google.com/knowledge-graph/
+ # image. For example, the relevancy of "tower" is likely higher to an image
+ # containing the detected "Eiffel Tower" than to an image containing a
+ # detected distant towering building, even though the confidence that
+ # there is a tower in each image may be the same. Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs may be available in
+ # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
"locations": [ # The location information for the detected entity. Multiple
- # <code>LocationInfo</code> elements can be present since one location may
- # indicate the location of the scene in the query image, and another the
- # location of the place where the query image was taken. Location information
- # is usually present for landmarks.
+ # `LocationInfo` elements can be present because one location may
+ # indicate the location of the scene in the image, and another location
+ # may indicate the location of the place where the image was taken.
+ # Location information is usually present for landmarks.
{ # Detected entity location information.
- "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
# <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
@@ -391,7 +388,7 @@
},
],
"score": 3.14, # Overall score of the result. Range [0, 1].
- "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
# for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
# are produced for the entire text detected in an image region, followed by
# `boundingPoly`s for each word within the detected text.
@@ -403,20 +400,20 @@
},
],
},
- "properties": [ # Some entities can have additional optional <code>Property</code> fields.
- # For example a different kind of score or string that qualifies the entity.
- { # Arbitrary name/value pair.
+ "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+ # fields, such a score or string that qualifies the entity.
+ { # A `Property` consists of a user-supplied name/value pair.
"name": "A String", # Name of the property.
"value": "A String", # Value of the property.
},
],
},
],
- "imagePropertiesAnnotation": { # Stores image properties (e.g. dominant colors). # If present, image properties were extracted successfully.
+ "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
"dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
- "colors": [ # RGB color values, with their score and pixel fraction.
- { # Color information consists of RGB channels, score and fraction of
- # image the color occupies in the image.
+ "colors": [ # RGB color values with their score and pixel fraction.
+ { # Color information consists of RGB channels, score, and the fraction of
+ # the image that the color occupies in the image.
"color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
# for simplicity of conversion to/from color representations in various
# languages over compactness; for example, the fields of this representation
@@ -546,17 +543,17 @@
"green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
"red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
},
- "pixelFraction": 3.14, # Stores the fraction of pixels the color occupies in the image.
+ "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
# Value in range [0, 1].
"score": 3.14, # Image-specific score for this color. Value in range [0, 1].
},
],
},
},
- "faceAnnotations": [ # If present, face detection completed successfully.
+ "faceAnnotations": [ # If present, face detection has completed successfully.
{ # A face annotation object contains the results of face detection.
- "panAngle": 3.14, # Yaw angle. Indicates the leftward/rightward angle that the face is
- # pointing, relative to the vertical plane perpendicular to the image. Range
+ "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
+ # pointing relative to the vertical plane perpendicular to the image. Range
# [-180,180].
"sorrowLikelihood": "A String", # Sorrow likelihood.
"underExposedLikelihood": "A String", # Under-exposed likelihood.
@@ -565,8 +562,9 @@
"landmarks": [ # Detected face landmarks.
{ # A face-specific landmark (for example, a face feature).
# Landmark positions may fall outside the bounds of the image
- # when the face is near one or more edges of the image.
- # Therefore it is NOT guaranteed that 0 <= x < width or 0 <= y < height.
+ # if the face is near one or more edges of the image.
+ # Therefore it is NOT guaranteed that `0 <= x < width` or
+ # `0 <= y < height`.
"position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
# A valid Position must have both x and y coordinates.
# The position coordinates are in the same scale as the original image.
@@ -579,17 +577,16 @@
],
"surpriseLikelihood": "A String", # Surprise likelihood.
"blurredLikelihood": "A String", # Blurred likelihood.
- "tiltAngle": 3.14, # Pitch angle. Indicates the upwards/downwards angle that the face is
- # pointing
- # relative to the image's horizontal plane. Range [-180,180].
+ "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
+ # pointing relative to the image's horizontal plane. Range [-180,180].
"angerLikelihood": "A String", # Anger likelihood.
"boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
- # are in the original image's scale, as returned in ImageParams.
+ # are in the original image's scale, as returned in `ImageParams`.
# The bounding box is computed to "frame" the face in accordance with human
# expectations. It is based on the landmarker results.
# Note that one or more x and/or y coordinates may not be generated in the
- # BoundingPoly (the polygon will be unbounded) if only a partial face appears in
- # the image to be annotated.
+ # `BoundingPoly` (the polygon will be unbounded) if only a partial face
+ # appears in the image to be annotated.
"vertices": [ # The bounding polygon vertices.
{ # A vertex represents a 2D point in the image.
# NOTE: the vertex coordinates are in the same scale as the original image.
@@ -598,15 +595,13 @@
},
],
},
- "rollAngle": 3.14, # Roll angle. Indicates the amount of clockwise/anti-clockwise rotation of
- # the
- # face relative to the image vertical, about the axis perpendicular to the
- # face. Range [-180,180].
+ "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+ # of the face relative to the image vertical about the axis perpendicular to
+ # the face. Range [-180,180].
"headwearLikelihood": "A String", # Headwear likelihood.
- "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # This bounding polygon is tighter than the previous
- # <code>boundingPoly</code>, and
- # encloses only the skin part of the face. Typically, it is used to
- # eliminate the face from any image analysis that detects the
+ "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
+ # `boundingPoly`, and encloses only the skin part of the face. Typically, it
+ # is used to eliminate the face from any image analysis that detects the
# "amount of skin" visible in an image. It is not based on the
# landmarker results, only on the initial face detection, hence
# the <code>fd</code> (face detection) prefix.
@@ -621,29 +616,29 @@
"landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
},
],
- "logoAnnotations": [ # If present, logo detection completed successfully.
+ "logoAnnotations": [ # If present, logo detection has completed successfully.
{ # Set of detected entity features.
"confidence": 3.14, # The accuracy of the entity detection in an image.
- # For example, for an image containing 'Eiffel Tower,' this field represents
- # the confidence that there is a tower in the query image. Range [0, 1].
- "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ # For example, for an image in which the "Eiffel Tower" entity is detected,
+ # this field represents the confidence that there is a tower in the query
+ # image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its `locale` language.
"locale": "A String", # The language code for the locale in which the entity textual
- # <code>description</code> (next field) is expressed.
+ # `description` is expressed.
"topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
- # image. For example, the relevancy of 'tower' to an image containing
- # 'Eiffel Tower' is likely higher than an image containing a distant towering
- # building, though the confidence that there is a tower may be the same.
- # Range [0, 1].
- "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
- # For more details on KG please see:
- # https://developers.google.com/knowledge-graph/
+ # image. For example, the relevancy of "tower" is likely higher to an image
+ # containing the detected "Eiffel Tower" than to an image containing a
+ # detected distant towering building, even though the confidence that
+ # there is a tower in each image may be the same. Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs may be available in
+ # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
"locations": [ # The location information for the detected entity. Multiple
- # <code>LocationInfo</code> elements can be present since one location may
- # indicate the location of the scene in the query image, and another the
- # location of the place where the query image was taken. Location information
- # is usually present for landmarks.
+ # `LocationInfo` elements can be present because one location may
+ # indicate the location of the scene in the image, and another location
+ # may indicate the location of the place where the image was taken.
+ # Location information is usually present for landmarks.
{ # Detected entity location information.
- "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
# <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
@@ -688,7 +683,7 @@
},
],
"score": 3.14, # Overall score of the result. Range [0, 1].
- "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
# for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
# are produced for the entire text detected in an image region, followed by
# `boundingPoly`s for each word within the detected text.
@@ -700,38 +695,38 @@
},
],
},
- "properties": [ # Some entities can have additional optional <code>Property</code> fields.
- # For example a different kind of score or string that qualifies the entity.
- { # Arbitrary name/value pair.
+ "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+ # fields, such a score or string that qualifies the entity.
+ { # A `Property` consists of a user-supplied name/value pair.
"name": "A String", # Name of the property.
"value": "A String", # Value of the property.
},
],
},
],
- "landmarkAnnotations": [ # If present, landmark detection completed successfully.
+ "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
{ # Set of detected entity features.
"confidence": 3.14, # The accuracy of the entity detection in an image.
- # For example, for an image containing 'Eiffel Tower,' this field represents
- # the confidence that there is a tower in the query image. Range [0, 1].
- "description": "A String", # Entity textual description, expressed in its <code>locale</code> language.
+ # For example, for an image in which the "Eiffel Tower" entity is detected,
+ # this field represents the confidence that there is a tower in the query
+ # image. Range [0, 1].
+ "description": "A String", # Entity textual description, expressed in its `locale` language.
"locale": "A String", # The language code for the locale in which the entity textual
- # <code>description</code> (next field) is expressed.
+ # `description` is expressed.
"topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
- # image. For example, the relevancy of 'tower' to an image containing
- # 'Eiffel Tower' is likely higher than an image containing a distant towering
- # building, though the confidence that there is a tower may be the same.
- # Range [0, 1].
- "mid": "A String", # Opaque entity ID. Some IDs might be available in Knowledge Graph(KG).
- # For more details on KG please see:
- # https://developers.google.com/knowledge-graph/
+ # image. For example, the relevancy of "tower" is likely higher to an image
+ # containing the detected "Eiffel Tower" than to an image containing a
+ # detected distant towering building, even though the confidence that
+ # there is a tower in each image may be the same. Range [0, 1].
+ "mid": "A String", # Opaque entity ID. Some IDs may be available in
+ # [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
"locations": [ # The location information for the detected entity. Multiple
- # <code>LocationInfo</code> elements can be present since one location may
- # indicate the location of the scene in the query image, and another the
- # location of the place where the query image was taken. Location information
- # is usually present for landmarks.
+ # `LocationInfo` elements can be present because one location may
+ # indicate the location of the scene in the image, and another location
+ # may indicate the location of the place where the image was taken.
+ # Location information is usually present for landmarks.
{ # Detected entity location information.
- "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Lat - long location coordinates.
+ "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
# <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
@@ -776,7 +771,7 @@
},
],
"score": 3.14, # Overall score of the result. Range [0, 1].
- "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not filled currently
+ "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Currently not produced
# for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s
# are produced for the entire text detected in an image region, followed by
# `boundingPoly`s for each word within the detected text.
@@ -788,9 +783,9 @@
},
],
},
- "properties": [ # Some entities can have additional optional <code>Property</code> fields.
- # For example a different kind of score or string that qualifies the entity.
- { # Arbitrary name/value pair.
+ "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+ # fields, such a score or string that qualifies the entity.
+ { # A `Property` consists of a user-supplied name/value pair.
"name": "A String", # Name of the property.
"value": "A String", # Value of the property.
},
@@ -798,8 +793,8 @@
},
],
"error": { # The `Status` type defines a logical error model that is suitable for different # If set, represents the error message for the operation.
- # Note that filled-in mage annotations are guaranteed to be
- # correct, even when <code>error</code> is non-empty.
+ # Note that filled-in image annotations are guaranteed to be
+ # correct, even when `error` is set.
# programming environments, including REST APIs and RPC APIs. It is used by
# [gRPC](https://github.com/grpc). The error model is designed to be:
#