docs: update generated docs (#981)

diff --git a/docs/dyn/vision_v1.projects.images.html b/docs/dyn/vision_v1.projects.images.html
index 63f30c4..1fdbf21 100644
--- a/docs/dyn/vision_v1.projects.images.html
+++ b/docs/dyn/vision_v1.projects.images.html
@@ -106,9 +106,6 @@
       { # Request for performing Google Cloud Vision API tasks over a user-provided
           # image, with user-requested features, and with context information.
         "image": { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
-          "content": "A String", # Image content, represented as a stream of bytes.
-              # Note: As with all `bytes` fields, protobuffers use a pure binary
-              # representation, whereas JSON representations use base64.
           "source": { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
               # URL. If both `content` and `source` are provided for an image, `content`
               # takes precedence and is used to perform the image annotation request.
@@ -136,20 +133,35 @@
                 # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
                 # precedence.
           },
+          "content": "A String", # Image content, represented as a stream of bytes.
+              # Note: As with all `bytes` fields, protobuffers use a pure binary
+              # representation, whereas JSON representations use base64.
+              #
+              # Currently, this field only works for BatchAnnotateImages requests. It does
+              # not work for AsyncBatchAnnotateImages requests.
         },
-        "features": [ # Requested features.
-          { # The type of Google Cloud Vision API detection to perform, and the maximum
-              # number of results to return for that type. Multiple `Feature` objects can
-              # be specified in the `features` list.
-            "type": "A String", # The feature type.
-            "maxResults": 42, # Maximum number of results of this type. Does not apply to
-                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
-            "model": "A String", # Model to use for the feature.
-                # Supported values: "builtin/stable" (the default if unset) and
-                # "builtin/latest".
-          },
-        ],
         "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
+          "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
+            "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
+                # of doubles representing degrees latitude and degrees longitude. Unless
+                # specified otherwise, this must conform to the
+                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                # standard</a>. Values must be within normalized ranges.
+              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+            },
+            "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
+                # of doubles representing degrees latitude and degrees longitude. Unless
+                # specified otherwise, this must conform to the
+                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                # standard</a>. Values must be within normalized ranges.
+              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+            },
+          },
+          "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
+            "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
+          },
           "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
               # yields the best results since it enables automatic language detection. For
               # languages based on the Latin alphabet, setting `language_hints` is not
@@ -160,50 +172,16 @@
               # [supported languages](https://cloud.google.com/vision/docs/languages).
             "A String",
           ],
-          "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
-            "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
-          },
-          "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
-            "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
-                # of doubles representing degrees latitude and degrees longitude. Unless
-                # specified otherwise, this must conform to the
-                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                # standard</a>. Values must be within normalized ranges.
-              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-            },
-            "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
-                # of doubles representing degrees latitude and degrees longitude. Unless
-                # specified otherwise, this must conform to the
-                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                # standard</a>. Values must be within normalized ranges.
-              "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-              "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-            },
-          },
-          "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
-            "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
-                # of the image. For example, if the desired aspect ratio is 4/3, the
-                # corresponding float value should be 1.33333.  If not specified, the
-                # best possible crop is returned. The number of provided aspect ratios is
-                # limited to a maximum of 16; any aspect ratios provided after the 16th are
-                # ignored.
-              3.14,
-            ],
-          },
           "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
-            "filter": "A String", # The filtering expression. This can be used to restrict search results based
-                # on Product labels. We currently support an AND of OR of key-value
-                # expressions, where each expression within an OR must have the same key. An
-                # '=' should be used to connect the key and value.
-                #
-                # For example, "(color = red OR color = blue) AND brand = Google" is
-                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
-                # "color: red" is not acceptable because it uses a ':' instead of an '='.
-            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
-                #
-                # Format is:
-                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+            "productCategories": [ # The list of product categories to search in. Currently, we only consider
+                # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
+                # "packagedgoods-v1", or "general-v1" should be specified. The legacy
+                # categories "homegoods", "apparel", and "toys" are still supported but will
+                # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
+                # or "toys-v2" for better product search accuracy. It is recommended to
+                # migrate existing products to these categories as well.
+              "A String",
+            ],
             "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
                 # If it is not specified, system discretion will be applied.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
@@ -222,17 +200,42 @@
                 },
               ],
             },
-            "productCategories": [ # The list of product categories to search in. Currently, we only consider
-                # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
-                # "packagedgoods-v1", or "general-v1" should be specified. The legacy
-                # categories "homegoods", "apparel", and "toys" are still supported but will
-                # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
-                # or "toys-v2" for better product search accuracy. It is recommended to
-                # migrate existing products to these categories as well.
-              "A String",
+            "filter": "A String", # The filtering expression. This can be used to restrict search results based
+                # on Product labels. We currently support an AND of OR of key-value
+                # expressions, where each expression within an OR must have the same key. An
+                # '=' should be used to connect the key and value.
+                #
+                # For example, "(color = red OR color = blue) AND brand = Google" is
+                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
+                # "color: red" is not acceptable because it uses a ':' instead of an '='.
+            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
+                #
+                # Format is:
+                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+          },
+          "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
+            "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
+                # of the image. For example, if the desired aspect ratio is 4/3, the
+                # corresponding float value should be 1.33333.  If not specified, the
+                # best possible crop is returned. The number of provided aspect ratios is
+                # limited to a maximum of 16; any aspect ratios provided after the 16th are
+                # ignored.
+              3.14,
             ],
           },
         },
+        "features": [ # Requested features.
+          { # The type of Google Cloud Vision API detection to perform, and the maximum
+              # number of results to return for that type. Multiple `Feature` objects can
+              # be specified in the `features` list.
+            "maxResults": 42, # Maximum number of results of this type. Does not apply to
+                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+            "type": "A String", # The feature type.
+            "model": "A String", # Model to use for the feature.
+                # Supported values: "builtin/stable" (the default if unset) and
+                # "builtin/latest".
+          },
+        ],
       },
     ],
     "parent": "A String", # Optional. Target project and location to make a call.
@@ -260,35 +263,14 @@
     { # Response to a batch image annotation request.
     "responses": [ # Individual responses to image annotation requests within the batch.
       { # Response to an image annotation request.
-        "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
+        "labelAnnotations": [ # If present, label detection has completed successfully.
           { # Set of detected entity features.
-            "score": 3.14, # Overall score of the result. Range [0, 1].
-            "locations": [ # The location information for the detected entity. Multiple
-                # `LocationInfo` elements can be present because one location may
-                # indicate the location of the scene in the image, and another location
-                # may indicate the location of the place where the image was taken.
-                # Location information is usually present for landmarks.
-              { # Detected entity location information.
-                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
-                    # of doubles representing degrees latitude and degrees longitude. Unless
-                    # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
-                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-                },
-              },
-            ],
-            "mid": "A String", # Opaque entity ID. Some IDs may be available in
-                # [Google Knowledge Graph Search
-                # API](https://developers.google.com/knowledge-graph/).
+            "description": "A String", # Entity textual description, expressed in its `locale` language.
             "confidence": 3.14, # **Deprecated. Use `score` instead.**
                 # The accuracy of the entity detection in an image.
                 # For example, for an image in which the "Eiffel Tower" entity is detected,
                 # this field represents the confidence that there is a tower in the query
                 # image. Range [0, 1].
-            "locale": "A String", # The language code for the locale in which the entity textual
-                # `description` is expressed.
             "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
                 # for `LABEL_DETECTION` features.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
@@ -307,12 +289,32 @@
                 },
               ],
             },
-            "description": "A String", # Entity textual description, expressed in its `locale` language.
+            "mid": "A String", # Opaque entity ID. Some IDs may be available in
+                # [Google Knowledge Graph Search
+                # API](https://developers.google.com/knowledge-graph/).
+            "locale": "A String", # The language code for the locale in which the entity textual
+                # `description` is expressed.
             "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
                 # image. For example, the relevancy of "tower" is likely higher to an image
                 # containing the detected "Eiffel Tower" than to an image containing a
                 # detected distant towering building, even though the confidence that
                 # there is a tower in each image may be the same. Range [0, 1].
+            "locations": [ # The location information for the detected entity. Multiple
+                # `LocationInfo` elements can be present because one location may
+                # indicate the location of the scene in the image, and another location
+                # may indicate the location of the place where the image was taken.
+                # Location information is usually present for landmarks.
+              { # Detected entity location information.
+                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
+                    # of doubles representing degrees latitude and degrees longitude. Unless
+                    # specified otherwise, this must conform to the
+                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                    # standard</a>. Values must be within normalized ranges.
+                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+                },
+              },
+            ],
             "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
                 # fields, such a score or string that qualifies the entity.
               { # A `Property` consists of a user-supplied name/value pair.
@@ -321,176 +323,88 @@
                 "name": "A String", # Name of the property.
               },
             ],
+            "score": 3.14, # Overall score of the result. Range [0, 1].
           },
         ],
-        "faceAnnotations": [ # If present, face detection has completed successfully.
-          { # A face annotation object contains the results of face detection.
-            "sorrowLikelihood": "A String", # Sorrow likelihood.
-            "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
-                # pointing relative to the image's horizontal plane. Range [-180,180].
-            "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
-                # `boundingPoly`, and encloses only the skin part of the face. Typically, it
-                # is used to eliminate the face from any image analysis that detects the
-                # "amount of skin" visible in an image. It is not based on the
-                # landmarker results, only on the initial face detection, hence
-                # the <code>fd</code> (face detection) prefix.
-              "normalizedVertices": [ # The bounding polygon normalized vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the normalized vertex coordinates are relative to the original image
-                    # and range from 0 to 1.
-                  "y": 3.14, # Y coordinate.
-                  "x": 3.14, # X coordinate.
-                },
-              ],
-              "vertices": [ # The bounding polygon vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the vertex coordinates are in the same scale as the original image.
-                  "x": 42, # X coordinate.
-                  "y": 42, # Y coordinate.
-                },
-              ],
+        "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
+            # methods over safe-search verticals (for example, adult, spoof, medical,
+            # violence).
+          "spoof": "A String", # Spoof likelihood. The likelihood that an modification
+              # was made to the image's canonical version to make it appear
+              # funny or offensive.
+          "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
+              # include (but is not limited to) skimpy or sheer clothing, strategically
+              # covered nudity, lewd or provocative poses, or close-ups of sensitive
+              # body areas.
+          "adult": "A String", # Represents the adult content likelihood for the image. Adult content may
+              # contain elements such as nudity, pornographic images or cartoons, or
+              # sexual activities.
+          "violence": "A String", # Likelihood that this image contains violent content.
+          "medical": "A String", # Likelihood that this is a medical image.
+        },
+        "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
+          "partialMatchingImages": [ # Partial matching images from the Internet.
+              # Those images are similar enough to share some key-point features. For
+              # example an original image will likely have partial matching for its crops.
+            { # Metadata for online images.
+              "url": "A String", # The result image URL.
+              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
             },
-            "landmarks": [ # Detected face landmarks.
-              { # A face-specific landmark (for example, a face feature).
-                "type": "A String", # Face landmark type.
-                "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
-                    # A valid Position must have both x and y coordinates.
-                    # The position coordinates are in the same scale as the original image.
-                  "y": 3.14, # Y coordinate.
-                  "x": 3.14, # X coordinate.
-                  "z": 3.14, # Z coordinate (or depth).
-                },
-              },
-            ],
-            "surpriseLikelihood": "A String", # Surprise likelihood.
-            "angerLikelihood": "A String", # Anger likelihood.
-            "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
-            "joyLikelihood": "A String", # Joy likelihood.
-            "underExposedLikelihood": "A String", # Under-exposed likelihood.
-            "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
-                # pointing relative to the vertical plane perpendicular to the image. Range
-                # [-180,180].
-            "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
-            "blurredLikelihood": "A String", # Blurred likelihood.
-            "headwearLikelihood": "A String", # Headwear likelihood.
-            "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
-                # are in the original image's scale.
-                # The bounding box is computed to "frame" the face in accordance with human
-                # expectations. It is based on the landmarker results.
-                # Note that one or more x and/or y coordinates may not be generated in the
-                # `BoundingPoly` (the polygon will be unbounded) if only a partial face
-                # appears in the image to be annotated.
-              "normalizedVertices": [ # The bounding polygon normalized vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the normalized vertex coordinates are relative to the original image
-                    # and range from 0 to 1.
-                  "y": 3.14, # Y coordinate.
-                  "x": 3.14, # X coordinate.
-                },
-              ],
-              "vertices": [ # The bounding polygon vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the vertex coordinates are in the same scale as the original image.
-                  "x": 42, # X coordinate.
-                  "y": 42, # Y coordinate.
-                },
-              ],
+          ],
+          "bestGuessLabels": [ # The service's best guess as to the topic of the request image.
+              # Inferred from similar images on the open web.
+            { # Label to provide extra metadata for the web detection.
+              "label": "A String", # Label for extra metadata.
+              "languageCode": "A String", # The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+                  # For more information, see
+                  # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
             },
-            "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
-                # of the face relative to the image vertical about the axis perpendicular to
-                # the face. Range [-180,180].
-          },
-        ],
-        "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
-          "cropHints": [ # Crop hint results.
-            { # Single crop hint that is used to generate a new crop when serving an image.
-              "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
-              "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
-                  # image.
-              "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
-                  # box are in the original image's scale.
-                "normalizedVertices": [ # The bounding polygon normalized vertices.
-                  { # A vertex represents a 2D point in the image.
-                      # NOTE: the normalized vertex coordinates are relative to the original image
-                      # and range from 0 to 1.
-                    "y": 3.14, # Y coordinate.
-                    "x": 3.14, # X coordinate.
-                  },
-                ],
-                "vertices": [ # The bounding polygon vertices.
-                  { # A vertex represents a 2D point in the image.
-                      # NOTE: the vertex coordinates are in the same scale as the original image.
-                    "x": 42, # X coordinate.
-                    "y": 42, # Y coordinate.
-                  },
-                ],
-              },
+          ],
+          "visuallySimilarImages": [ # The visually similar image results.
+            { # Metadata for online images.
+              "url": "A String", # The result image URL.
+              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+            },
+          ],
+          "webEntities": [ # Deduced entities from similar images on the Internet.
+            { # Entity deduced from similar images on the Internet.
+              "entityId": "A String", # Opaque entity ID.
+              "score": 3.14, # Overall relevancy score for the entity.
+                  # Not normalized and not comparable across different image queries.
+              "description": "A String", # Canonical description of the entity, in English.
+            },
+          ],
+          "fullMatchingImages": [ # Fully matching images from the Internet.
+              # Can include resized copies of the query image.
+            { # Metadata for online images.
+              "url": "A String", # The result image URL.
+              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+            },
+          ],
+          "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
+            { # Metadata for web pages.
+              "fullMatchingImages": [ # Fully matching images on the page.
+                  # Can include resized copies of the query image.
+                { # Metadata for online images.
+                  "url": "A String", # The result image URL.
+                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                },
+              ],
+              "pageTitle": "A String", # Title for the web page, may contain HTML markups.
+              "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
+              "url": "A String", # The result web page URL.
+              "partialMatchingImages": [ # Partial matching images on the page.
+                  # Those images are similar enough to share some key-point features. For
+                  # example an original image will likely have partial matching for its
+                  # crops.
+                { # Metadata for online images.
+                  "url": "A String", # The result image URL.
+                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                },
+              ],
             },
           ],
         },
-        "labelAnnotations": [ # If present, label detection has completed successfully.
-          { # Set of detected entity features.
-            "score": 3.14, # Overall score of the result. Range [0, 1].
-            "locations": [ # The location information for the detected entity. Multiple
-                # `LocationInfo` elements can be present because one location may
-                # indicate the location of the scene in the image, and another location
-                # may indicate the location of the place where the image was taken.
-                # Location information is usually present for landmarks.
-              { # Detected entity location information.
-                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
-                    # of doubles representing degrees latitude and degrees longitude. Unless
-                    # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
-                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-                },
-              },
-            ],
-            "mid": "A String", # Opaque entity ID. Some IDs may be available in
-                # [Google Knowledge Graph Search
-                # API](https://developers.google.com/knowledge-graph/).
-            "confidence": 3.14, # **Deprecated. Use `score` instead.**
-                # The accuracy of the entity detection in an image.
-                # For example, for an image in which the "Eiffel Tower" entity is detected,
-                # this field represents the confidence that there is a tower in the query
-                # image. Range [0, 1].
-            "locale": "A String", # The language code for the locale in which the entity textual
-                # `description` is expressed.
-            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                # for `LABEL_DETECTION` features.
-              "normalizedVertices": [ # The bounding polygon normalized vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the normalized vertex coordinates are relative to the original image
-                    # and range from 0 to 1.
-                  "y": 3.14, # Y coordinate.
-                  "x": 3.14, # X coordinate.
-                },
-              ],
-              "vertices": [ # The bounding polygon vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the vertex coordinates are in the same scale as the original image.
-                  "x": 42, # X coordinate.
-                  "y": 42, # Y coordinate.
-                },
-              ],
-            },
-            "description": "A String", # Entity textual description, expressed in its `locale` language.
-            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
-                # image. For example, the relevancy of "tower" is likely higher to an image
-                # containing the detected "Eiffel Tower" than to an image containing a
-                # detected distant towering building, even though the confidence that
-                # there is a tower in each image may be the same. Range [0, 1].
-            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                # fields, such a score or string that qualifies the entity.
-              { # A `Property` consists of a user-supplied name/value pair.
-                "value": "A String", # Value of the property.
-                "uint64Value": "A String", # Value of numeric properties.
-                "name": "A String", # Name of the property.
-              },
-            ],
-          },
-        ],
         "productSearchResults": { # Results for a product search request. # If present, product search has completed successfully.
           "productGroupedResults": [ # List of results grouped by products detected in the query image. Each entry
               # corresponds to one bounding polygon in the query image, and contains the
@@ -501,10 +415,10 @@
               "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
                 { # Prediction for what the object in the bounding box is.
                   "score": 3.14, # Score of the result. Range [0, 1].
+                  "mid": "A String", # Object ID that should align with EntityAnnotation mid.
                   "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
                       # information, see
                       # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                  "mid": "A String", # Object ID that should align with EntityAnnotation mid.
                   "name": "A String", # Object name, expressed in its `language_code` language.
                 },
               ],
@@ -530,20 +444,14 @@
                   "image": "A String", # The resource name of the image from the product that is the closest match
                       # to the query.
                   "product": { # A Product contains ReferenceImages. # The Product.
+                    "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
+                        # 4096 characters long.
                     "name": "A String", # The resource name of the product.
                         #
                         # Format is:
                         # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
                         #
                         # This field is ignored when creating a product.
-                    "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
-                        # 4096 characters long.
-                    "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
-                        # characters long.
-                    "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
-                        # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
-                        # "homegoods", "apparel", and "toys" are still supported, but these should
-                        # not be used for new products.
                     "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
                         # constraints can be specified based on the product_labels.
                         #
@@ -558,12 +466,18 @@
                         # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
                         # will refuse to work for that ProductSet.
                       { # A product label represented as a key-value pair.
-                        "value": "A String", # The value of the label attached to the product. Cannot be empty and
-                            # cannot exceed 128 bytes.
                         "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
                             # exceed 128 bytes.
+                        "value": "A String", # The value of the label attached to the product. Cannot be empty and
+                            # cannot exceed 128 bytes.
                       },
                     ],
+                    "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
+                        # characters long.
+                    "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
+                        # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
+                        # "homegoods", "apparel", and "toys" are still supported, but these should
+                        # not be used for new products.
                   },
                   "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
                       # 1 (full confidence).
@@ -571,25 +485,22 @@
               ],
             },
           ],
+          "indexTime": "A String", # Timestamp of the index which provided these results. Products added to the
+              # product set and products removed from the product set after this time are
+              # not reflected in the current results.
           "results": [ # List of results, one for each product match.
             { # Information about a product.
               "image": "A String", # The resource name of the image from the product that is the closest match
                   # to the query.
               "product": { # A Product contains ReferenceImages. # The Product.
+                "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
+                    # 4096 characters long.
                 "name": "A String", # The resource name of the product.
                     #
                     # Format is:
                     # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
                     #
                     # This field is ignored when creating a product.
-                "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
-                    # 4096 characters long.
-                "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
-                    # characters long.
-                "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
-                    # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
-                    # "homegoods", "apparel", and "toys" are still supported, but these should
-                    # not be used for new products.
                 "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
                     # constraints can be specified based on the product_labels.
                     #
@@ -604,68 +515,23 @@
                     # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
                     # will refuse to work for that ProductSet.
                   { # A product label represented as a key-value pair.
-                    "value": "A String", # The value of the label attached to the product. Cannot be empty and
-                        # cannot exceed 128 bytes.
                     "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
                         # exceed 128 bytes.
+                    "value": "A String", # The value of the label attached to the product. Cannot be empty and
+                        # cannot exceed 128 bytes.
                   },
                 ],
+                "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
+                    # characters long.
+                "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
+                    # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
+                    # "homegoods", "apparel", and "toys" are still supported, but these should
+                    # not be used for new products.
               },
               "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
                   # 1 (full confidence).
             },
           ],
-          "indexTime": "A String", # Timestamp of the index which provided these results. Products added to the
-              # product set and products removed from the product set after this time are
-              # not reflected in the current results.
-        },
-        "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
-            # This will be sorted descending by confidence score.
-          { # Set of detected objects with bounding boxes.
-            "score": 3.14, # Score of the result. Range [0, 1].
-            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                # information, see
-                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-            "mid": "A String", # Object ID that should align with EntityAnnotation mid.
-            "name": "A String", # Object name, expressed in its `language_code` language.
-            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
-              "normalizedVertices": [ # The bounding polygon normalized vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the normalized vertex coordinates are relative to the original image
-                    # and range from 0 to 1.
-                  "y": 3.14, # Y coordinate.
-                  "x": 3.14, # X coordinate.
-                },
-              ],
-              "vertices": [ # The bounding polygon vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the vertex coordinates are in the same scale as the original image.
-                  "x": 42, # X coordinate.
-                  "y": 42, # Y coordinate.
-                },
-              ],
-            },
-          },
-        ],
-        "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
-            # Note that filled-in image annotations are guaranteed to be
-            # correct, even when `error` is set.
-            # different programming environments, including REST APIs and RPC APIs. It is
-            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
-            # three pieces of data: error code, error message, and error details.
-            #
-            # You can find out more about this error model and how to work with it in the
-            # [API Design Guide](https://cloud.google.com/apis/design/errors).
-          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
-          "message": "A String", # A developer-facing error message, which should be in English. Any
-              # user-facing error message should be localized and sent in the
-              # google.rpc.Status.details field, or localized by the client.
-          "details": [ # A list of messages that carry the error details.  There is a common set of
-              # message types for APIs to use.
-            {
-              "a_key": "", # Properties of the object. Contains field @type with type URL.
-            },
-          ],
         },
         "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
             # completed successfully.
@@ -677,14 +543,170 @@
             # properties. Properties describe detected languages, breaks etc.. Please refer
             # to the TextAnnotation.TextProperty message definition below for more
             # detail.
+          "text": "A String", # UTF-8 text detected on the pages.
           "pages": [ # List of pages detected by OCR.
             { # Detected page from OCR.
+              "width": 42, # Page width. For PDFs the unit is points. For images (including
+                  # TIFFs) the unit is pixels.
               "blocks": [ # List of blocks of text, images etc on this page.
                 { # Logical element on the page.
+                  "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
+                    { # Structural unit of text representing a number of words in certain order.
+                      "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
+                          # The vertices are in the order of top-left, top-right, bottom-right,
+                          # bottom-left. When a rotation of the bounding box is detected the rotation
+                          # is represented as around the top-left corner as defined when the text is
+                          # read in the 'natural' orientation.
+                          # For example:
+                          #   * when the text is horizontal it might look like:
+                          #      0----1
+                          #      |    |
+                          #      3----2
+                          #   * when it's rotated 180 degrees around the top-left corner it becomes:
+                          #      2----3
+                          #      |    |
+                          #      1----0
+                          #   and the vertex order will still be (0, 1, 2, 3).
+                        "normalizedVertices": [ # The bounding polygon normalized vertices.
+                          { # A vertex represents a 2D point in the image.
+                              # NOTE: the normalized vertex coordinates are relative to the original image
+                              # and range from 0 to 1.
+                            "y": 3.14, # Y coordinate.
+                            "x": 3.14, # X coordinate.
+                          },
+                        ],
+                        "vertices": [ # The bounding polygon vertices.
+                          { # A vertex represents a 2D point in the image.
+                              # NOTE: the vertex coordinates are in the same scale as the original image.
+                            "x": 42, # X coordinate.
+                            "y": 42, # Y coordinate.
+                          },
+                        ],
+                      },
+                      "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
+                        "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                          "isPrefix": True or False, # True if break prepends the element.
+                          "type": "A String", # Detected break type.
+                        },
+                        "detectedLanguages": [ # A list of detected languages together with confidence.
+                          { # Detected language for a structural component.
+                            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                                # information, see
+                                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                            "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                          },
+                        ],
+                      },
+                      "words": [ # List of all words in this paragraph.
+                        { # A word representation.
+                          "property": { # Additional information detected on the structural component. # Additional information detected for the word.
+                            "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                              "isPrefix": True or False, # True if break prepends the element.
+                              "type": "A String", # Detected break type.
+                            },
+                            "detectedLanguages": [ # A list of detected languages together with confidence.
+                              { # Detected language for a structural component.
+                                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                                    # information, see
+                                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                                "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                              },
+                            ],
+                          },
+                          "confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
+                          "symbols": [ # List of symbols in the word.
+                              # The order of the symbols follows the natural reading order.
+                            { # A single symbol representation.
+                              "confidence": 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
+                              "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
+                                  # The vertices are in the order of top-left, top-right, bottom-right,
+                                  # bottom-left. When a rotation of the bounding box is detected the rotation
+                                  # is represented as around the top-left corner as defined when the text is
+                                  # read in the 'natural' orientation.
+                                  # For example:
+                                  #   * when the text is horizontal it might look like:
+                                  #      0----1
+                                  #      |    |
+                                  #      3----2
+                                  #   * when it's rotated 180 degrees around the top-left corner it becomes:
+                                  #      2----3
+                                  #      |    |
+                                  #      1----0
+                                  #   and the vertex order will still be (0, 1, 2, 3).
+                                "normalizedVertices": [ # The bounding polygon normalized vertices.
+                                  { # A vertex represents a 2D point in the image.
+                                      # NOTE: the normalized vertex coordinates are relative to the original image
+                                      # and range from 0 to 1.
+                                    "y": 3.14, # Y coordinate.
+                                    "x": 3.14, # X coordinate.
+                                  },
+                                ],
+                                "vertices": [ # The bounding polygon vertices.
+                                  { # A vertex represents a 2D point in the image.
+                                      # NOTE: the vertex coordinates are in the same scale as the original image.
+                                    "x": 42, # X coordinate.
+                                    "y": 42, # Y coordinate.
+                                  },
+                                ],
+                              },
+                              "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
+                                "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                                  "isPrefix": True or False, # True if break prepends the element.
+                                  "type": "A String", # Detected break type.
+                                },
+                                "detectedLanguages": [ # A list of detected languages together with confidence.
+                                  { # Detected language for a structural component.
+                                    "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                                        # information, see
+                                        # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                                    "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                                  },
+                                ],
+                              },
+                              "text": "A String", # The actual UTF-8 representation of the symbol.
+                            },
+                          ],
+                          "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
+                              # The vertices are in the order of top-left, top-right, bottom-right,
+                              # bottom-left. When a rotation of the bounding box is detected the rotation
+                              # is represented as around the top-left corner as defined when the text is
+                              # read in the 'natural' orientation.
+                              # For example:
+                              #   * when the text is horizontal it might look like:
+                              #      0----1
+                              #      |    |
+                              #      3----2
+                              #   * when it's rotated 180 degrees around the top-left corner it becomes:
+                              #      2----3
+                              #      |    |
+                              #      1----0
+                              #   and the vertex order will still be (0, 1, 2, 3).
+                            "normalizedVertices": [ # The bounding polygon normalized vertices.
+                              { # A vertex represents a 2D point in the image.
+                                  # NOTE: the normalized vertex coordinates are relative to the original image
+                                  # and range from 0 to 1.
+                                "y": 3.14, # Y coordinate.
+                                "x": 3.14, # X coordinate.
+                              },
+                            ],
+                            "vertices": [ # The bounding polygon vertices.
+                              { # A vertex represents a 2D point in the image.
+                                  # NOTE: the vertex coordinates are in the same scale as the original image.
+                                "x": 42, # X coordinate.
+                                "y": 42, # Y coordinate.
+                              },
+                            ],
+                          },
+                        },
+                      ],
+                      "confidence": 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
+                    },
+                  ],
+                  "blockType": "A String", # Detected block type (text, image etc) for this block.
                   "property": { # Additional information detected on the structural component. # Additional information detected for the block.
                     "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                      "type": "A String", # Detected break type.
                       "isPrefix": True or False, # True if break prepends the element.
+                      "type": "A String", # Detected break type.
                     },
                     "detectedLanguages": [ # A list of detected languages together with confidence.
                       { # Detected language for a structural component.
@@ -695,7 +717,7 @@
                       },
                     ],
                   },
-                  "blockType": "A String", # Detected block type (text, image etc) for this block.
+                  "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
                   "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the block.
                       # The vertices are in the order of top-left, top-right, bottom-right,
                       # bottom-left. When a rotation of the bounding box is detected the rotation
@@ -732,165 +754,15 @@
                       },
                     ],
                   },
-                  "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
-                  "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
-                    { # Structural unit of text representing a number of words in certain order.
-                      "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
-                        "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                          "type": "A String", # Detected break type.
-                          "isPrefix": True or False, # True if break prepends the element.
-                        },
-                        "detectedLanguages": [ # A list of detected languages together with confidence.
-                          { # Detected language for a structural component.
-                            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                                # information, see
-                                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                            "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                          },
-                        ],
-                      },
-                      "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
-                          # The vertices are in the order of top-left, top-right, bottom-right,
-                          # bottom-left. When a rotation of the bounding box is detected the rotation
-                          # is represented as around the top-left corner as defined when the text is
-                          # read in the 'natural' orientation.
-                          # For example:
-                          #   * when the text is horizontal it might look like:
-                          #      0----1
-                          #      |    |
-                          #      3----2
-                          #   * when it's rotated 180 degrees around the top-left corner it becomes:
-                          #      2----3
-                          #      |    |
-                          #      1----0
-                          #   and the vertex order will still be (0, 1, 2, 3).
-                        "normalizedVertices": [ # The bounding polygon normalized vertices.
-                          { # A vertex represents a 2D point in the image.
-                              # NOTE: the normalized vertex coordinates are relative to the original image
-                              # and range from 0 to 1.
-                            "y": 3.14, # Y coordinate.
-                            "x": 3.14, # X coordinate.
-                          },
-                        ],
-                        "vertices": [ # The bounding polygon vertices.
-                          { # A vertex represents a 2D point in the image.
-                              # NOTE: the vertex coordinates are in the same scale as the original image.
-                            "x": 42, # X coordinate.
-                            "y": 42, # Y coordinate.
-                          },
-                        ],
-                      },
-                      "confidence": 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
-                      "words": [ # List of all words in this paragraph.
-                        { # A word representation.
-                          "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
-                              # The vertices are in the order of top-left, top-right, bottom-right,
-                              # bottom-left. When a rotation of the bounding box is detected the rotation
-                              # is represented as around the top-left corner as defined when the text is
-                              # read in the 'natural' orientation.
-                              # For example:
-                              #   * when the text is horizontal it might look like:
-                              #      0----1
-                              #      |    |
-                              #      3----2
-                              #   * when it's rotated 180 degrees around the top-left corner it becomes:
-                              #      2----3
-                              #      |    |
-                              #      1----0
-                              #   and the vertex order will still be (0, 1, 2, 3).
-                            "normalizedVertices": [ # The bounding polygon normalized vertices.
-                              { # A vertex represents a 2D point in the image.
-                                  # NOTE: the normalized vertex coordinates are relative to the original image
-                                  # and range from 0 to 1.
-                                "y": 3.14, # Y coordinate.
-                                "x": 3.14, # X coordinate.
-                              },
-                            ],
-                            "vertices": [ # The bounding polygon vertices.
-                              { # A vertex represents a 2D point in the image.
-                                  # NOTE: the vertex coordinates are in the same scale as the original image.
-                                "x": 42, # X coordinate.
-                                "y": 42, # Y coordinate.
-                              },
-                            ],
-                          },
-                          "confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
-                          "symbols": [ # List of symbols in the word.
-                              # The order of the symbols follows the natural reading order.
-                            { # A single symbol representation.
-                              "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
-                                "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                                  "type": "A String", # Detected break type.
-                                  "isPrefix": True or False, # True if break prepends the element.
-                                },
-                                "detectedLanguages": [ # A list of detected languages together with confidence.
-                                  { # Detected language for a structural component.
-                                    "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                                        # information, see
-                                        # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                                    "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                                  },
-                                ],
-                              },
-                              "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
-                                  # The vertices are in the order of top-left, top-right, bottom-right,
-                                  # bottom-left. When a rotation of the bounding box is detected the rotation
-                                  # is represented as around the top-left corner as defined when the text is
-                                  # read in the 'natural' orientation.
-                                  # For example:
-                                  #   * when the text is horizontal it might look like:
-                                  #      0----1
-                                  #      |    |
-                                  #      3----2
-                                  #   * when it's rotated 180 degrees around the top-left corner it becomes:
-                                  #      2----3
-                                  #      |    |
-                                  #      1----0
-                                  #   and the vertex order will still be (0, 1, 2, 3).
-                                "normalizedVertices": [ # The bounding polygon normalized vertices.
-                                  { # A vertex represents a 2D point in the image.
-                                      # NOTE: the normalized vertex coordinates are relative to the original image
-                                      # and range from 0 to 1.
-                                    "y": 3.14, # Y coordinate.
-                                    "x": 3.14, # X coordinate.
-                                  },
-                                ],
-                                "vertices": [ # The bounding polygon vertices.
-                                  { # A vertex represents a 2D point in the image.
-                                      # NOTE: the vertex coordinates are in the same scale as the original image.
-                                    "x": 42, # X coordinate.
-                                    "y": 42, # Y coordinate.
-                                  },
-                                ],
-                              },
-                              "confidence": 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
-                              "text": "A String", # The actual UTF-8 representation of the symbol.
-                            },
-                          ],
-                          "property": { # Additional information detected on the structural component. # Additional information detected for the word.
-                            "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                              "type": "A String", # Detected break type.
-                              "isPrefix": True or False, # True if break prepends the element.
-                            },
-                            "detectedLanguages": [ # A list of detected languages together with confidence.
-                              { # Detected language for a structural component.
-                                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                                    # information, see
-                                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                                "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                              },
-                            ],
-                          },
-                        },
-                      ],
-                    },
-                  ],
                 },
               ],
+              "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
+              "height": 42, # Page height. For PDFs the unit is points. For images (including
+                  # TIFFs) the unit is pixels.
               "property": { # Additional information detected on the structural component. # Additional information detected on the page.
                 "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                  "type": "A String", # Detected break type.
                   "isPrefix": True or False, # True if break prepends the element.
+                  "type": "A String", # Detected break type.
                 },
                 "detectedLanguages": [ # A list of detected languages together with confidence.
                   { # Detected language for a structural component.
@@ -901,46 +773,18 @@
                   },
                 ],
               },
-              "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
-              "height": 42, # Page height. For PDFs the unit is points. For images (including
-                  # TIFFs) the unit is pixels.
-              "width": 42, # Page width. For PDFs the unit is points. For images (including
-                  # TIFFs) the unit is pixels.
             },
           ],
-          "text": "A String", # UTF-8 text detected on the pages.
         },
-        "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
-          { # Set of detected entity features.
-            "score": 3.14, # Overall score of the result. Range [0, 1].
-            "locations": [ # The location information for the detected entity. Multiple
-                # `LocationInfo` elements can be present because one location may
-                # indicate the location of the scene in the image, and another location
-                # may indicate the location of the place where the image was taken.
-                # Location information is usually present for landmarks.
-              { # Detected entity location information.
-                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
-                    # of doubles representing degrees latitude and degrees longitude. Unless
-                    # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
-                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-                },
-              },
-            ],
-            "mid": "A String", # Opaque entity ID. Some IDs may be available in
-                # [Google Knowledge Graph Search
-                # API](https://developers.google.com/knowledge-graph/).
-            "confidence": 3.14, # **Deprecated. Use `score` instead.**
-                # The accuracy of the entity detection in an image.
-                # For example, for an image in which the "Eiffel Tower" entity is detected,
-                # this field represents the confidence that there is a tower in the query
-                # image. Range [0, 1].
-            "locale": "A String", # The language code for the locale in which the entity textual
-                # `description` is expressed.
-            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                # for `LABEL_DETECTION` features.
+        "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
+            # This will be sorted descending by confidence score.
+          { # Set of detected objects with bounding boxes.
+            "name": "A String", # Object name, expressed in its `language_code` language.
+            "score": 3.14, # Score of the result. Range [0, 1].
+            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                # information, see
+                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
                 { # A vertex represents a 2D point in the image.
                     # NOTE: the normalized vertex coordinates are relative to the original image
@@ -957,20 +801,7 @@
                 },
               ],
             },
-            "description": "A String", # Entity textual description, expressed in its `locale` language.
-            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
-                # image. For example, the relevancy of "tower" is likely higher to an image
-                # containing the detected "Eiffel Tower" than to an image containing a
-                # detected distant towering building, even though the confidence that
-                # there is a tower in each image may be the same. Range [0, 1].
-            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                # fields, such a score or string that qualifies the entity.
-              { # A `Property` consists of a user-supplied name/value pair.
-                "value": "A String", # Value of the property.
-                "uint64Value": "A String", # Value of numeric properties.
-                "name": "A String", # Name of the property.
-              },
-            ],
+            "mid": "A String", # Object ID that should align with EntityAnnotation mid.
           },
         ],
         "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
@@ -979,8 +810,6 @@
               { # Color information consists of RGB channels, score, and the fraction of
                   # the image that the color occupies in the image.
                 "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
-                "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
-                    # Value in range [0, 1].
                 "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
                     # for simplicity of conversion to/from color representations in various
                     # languages over compactness; for example, the fields of this representation
@@ -994,6 +823,10 @@
                     # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
                     # space.
                     #
+                    # Note: when color equality needs to be decided, implementations, unless
+                    # documented otherwise, will treat two colors to be equal if all their red,
+                    # green, blue and alpha values each differ by at most 1e-5.
+                    #
                     # Example (Java):
                     #
                     #      import com.google.type.Color;
@@ -1100,9 +933,9 @@
                     #     };
                     #
                     #     // ...
+                  "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
                   "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
                   "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
-                  "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
                   "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
                       # the final pixel color is defined by the equation:
                       #
@@ -1115,39 +948,98 @@
                       # If omitted, this color object is to be rendered as a solid color
                       # (as if the alpha value had been explicitly given with a value of 1.0).
                 },
+                "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
+                    # Value in range [0, 1].
               },
             ],
           },
         },
-        "logoAnnotations": [ # If present, logo detection has completed successfully.
-          { # Set of detected entity features.
-            "score": 3.14, # Overall score of the result. Range [0, 1].
-            "locations": [ # The location information for the detected entity. Multiple
-                # `LocationInfo` elements can be present because one location may
-                # indicate the location of the scene in the image, and another location
-                # may indicate the location of the place where the image was taken.
-                # Location information is usually present for landmarks.
-              { # Detected entity location information.
-                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
-                    # of doubles representing degrees latitude and degrees longitude. Unless
-                    # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
-                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+        "faceAnnotations": [ # If present, face detection has completed successfully.
+          { # A face annotation object contains the results of face detection.
+            "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
+                # `boundingPoly`, and encloses only the skin part of the face. Typically, it
+                # is used to eliminate the face from any image analysis that detects the
+                # "amount of skin" visible in an image. It is not based on the
+                # landmarker results, only on the initial face detection, hence
+                # the <code>fd</code> (face detection) prefix.
+              "normalizedVertices": [ # The bounding polygon normalized vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the normalized vertex coordinates are relative to the original image
+                    # and range from 0 to 1.
+                  "y": 3.14, # Y coordinate.
+                  "x": 3.14, # X coordinate.
                 },
+              ],
+              "vertices": [ # The bounding polygon vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the vertex coordinates are in the same scale as the original image.
+                  "x": 42, # X coordinate.
+                  "y": 42, # Y coordinate.
+                },
+              ],
+            },
+            "sorrowLikelihood": "A String", # Sorrow likelihood.
+            "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+                # of the face relative to the image vertical about the axis perpendicular to
+                # the face. Range [-180,180].
+            "angerLikelihood": "A String", # Anger likelihood.
+            "surpriseLikelihood": "A String", # Surprise likelihood.
+            "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
+                # are in the original image's scale.
+                # The bounding box is computed to "frame" the face in accordance with human
+                # expectations. It is based on the landmarker results.
+                # Note that one or more x and/or y coordinates may not be generated in the
+                # `BoundingPoly` (the polygon will be unbounded) if only a partial face
+                # appears in the image to be annotated.
+              "normalizedVertices": [ # The bounding polygon normalized vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the normalized vertex coordinates are relative to the original image
+                    # and range from 0 to 1.
+                  "y": 3.14, # Y coordinate.
+                  "x": 3.14, # X coordinate.
+                },
+              ],
+              "vertices": [ # The bounding polygon vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the vertex coordinates are in the same scale as the original image.
+                  "x": 42, # X coordinate.
+                  "y": 42, # Y coordinate.
+                },
+              ],
+            },
+            "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
+            "headwearLikelihood": "A String", # Headwear likelihood.
+            "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
+                # pointing relative to the vertical plane perpendicular to the image. Range
+                # [-180,180].
+            "landmarks": [ # Detected face landmarks.
+              { # A face-specific landmark (for example, a face feature).
+                "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
+                    # A valid Position must have both x and y coordinates.
+                    # The position coordinates are in the same scale as the original image.
+                  "z": 3.14, # Z coordinate (or depth).
+                  "y": 3.14, # Y coordinate.
+                  "x": 3.14, # X coordinate.
+                },
+                "type": "A String", # Face landmark type.
               },
             ],
-            "mid": "A String", # Opaque entity ID. Some IDs may be available in
-                # [Google Knowledge Graph Search
-                # API](https://developers.google.com/knowledge-graph/).
+            "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
+            "joyLikelihood": "A String", # Joy likelihood.
+            "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
+                # pointing relative to the image's horizontal plane. Range [-180,180].
+            "underExposedLikelihood": "A String", # Under-exposed likelihood.
+            "blurredLikelihood": "A String", # Blurred likelihood.
+          },
+        ],
+        "logoAnnotations": [ # If present, logo detection has completed successfully.
+          { # Set of detected entity features.
+            "description": "A String", # Entity textual description, expressed in its `locale` language.
             "confidence": 3.14, # **Deprecated. Use `score` instead.**
                 # The accuracy of the entity detection in an image.
                 # For example, for an image in which the "Eiffel Tower" entity is detected,
                 # this field represents the confidence that there is a tower in the query
                 # image. Range [0, 1].
-            "locale": "A String", # The language code for the locale in which the entity textual
-                # `description` is expressed.
             "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
                 # for `LABEL_DETECTION` features.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
@@ -1166,12 +1058,32 @@
                 },
               ],
             },
-            "description": "A String", # Entity textual description, expressed in its `locale` language.
+            "mid": "A String", # Opaque entity ID. Some IDs may be available in
+                # [Google Knowledge Graph Search
+                # API](https://developers.google.com/knowledge-graph/).
+            "locale": "A String", # The language code for the locale in which the entity textual
+                # `description` is expressed.
             "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
                 # image. For example, the relevancy of "tower" is likely higher to an image
                 # containing the detected "Eiffel Tower" than to an image containing a
                 # detected distant towering building, even though the confidence that
                 # there is a tower in each image may be the same. Range [0, 1].
+            "locations": [ # The location information for the detected entity. Multiple
+                # `LocationInfo` elements can be present because one location may
+                # indicate the location of the scene in the image, and another location
+                # may indicate the location of the place where the image was taken.
+                # Location information is usually present for landmarks.
+              { # Detected entity location information.
+                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
+                    # of doubles representing degrees latitude and degrees longitude. Unless
+                    # specified otherwise, this must conform to the
+                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                    # standard</a>. Values must be within normalized ranges.
+                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+                },
+              },
+            ],
             "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
                 # fields, such a score or string that qualifies the entity.
               { # A `Property` consists of a user-supplied name/value pair.
@@ -1180,94 +1092,189 @@
                 "name": "A String", # Name of the property.
               },
             ],
+            "score": 3.14, # Overall score of the result. Range [0, 1].
           },
         ],
         "context": { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
             # comes from.
             # information about the source of that image.
-          "uri": "A String", # The URI of the file used to produce the image.
           "pageNumber": 42, # If the file was a PDF or TIFF, this field gives the page number within
               # the file used to produce the image.
+          "uri": "A String", # The URI of the file used to produce the image.
         },
-        "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
-          "visuallySimilarImages": [ # The visually similar image results.
-            { # Metadata for online images.
-              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-              "url": "A String", # The result image URL.
+        "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
+          "cropHints": [ # Crop hint results.
+            { # Single crop hint that is used to generate a new crop when serving an image.
+              "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
+                  # image.
+              "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
+                  # box are in the original image's scale.
+                "normalizedVertices": [ # The bounding polygon normalized vertices.
+                  { # A vertex represents a 2D point in the image.
+                      # NOTE: the normalized vertex coordinates are relative to the original image
+                      # and range from 0 to 1.
+                    "y": 3.14, # Y coordinate.
+                    "x": 3.14, # X coordinate.
+                  },
+                ],
+                "vertices": [ # The bounding polygon vertices.
+                  { # A vertex represents a 2D point in the image.
+                      # NOTE: the vertex coordinates are in the same scale as the original image.
+                    "x": 42, # X coordinate.
+                    "y": 42, # Y coordinate.
+                  },
+                ],
+              },
+              "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
             },
           ],
-          "bestGuessLabels": [ # The service's best guess as to the topic of the request image.
-              # Inferred from similar images on the open web.
-            { # Label to provide extra metadata for the web detection.
-              "label": "A String", # Label for extra metadata.
-              "languageCode": "A String", # The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
-                  # For more information, see
-                  # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+        },
+        "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
+            # Note that filled-in image annotations are guaranteed to be
+            # correct, even when `error` is set.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
             },
           ],
-          "fullMatchingImages": [ # Fully matching images from the Internet.
-              # Can include resized copies of the query image.
-            { # Metadata for online images.
-              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-              "url": "A String", # The result image URL.
-            },
-          ],
-          "webEntities": [ # Deduced entities from similar images on the Internet.
-            { # Entity deduced from similar images on the Internet.
-              "entityId": "A String", # Opaque entity ID.
-              "description": "A String", # Canonical description of the entity, in English.
-              "score": 3.14, # Overall relevancy score for the entity.
-                  # Not normalized and not comparable across different image queries.
-            },
-          ],
-          "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
-            { # Metadata for web pages.
-              "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
-              "partialMatchingImages": [ # Partial matching images on the page.
-                  # Those images are similar enough to share some key-point features. For
-                  # example an original image will likely have partial matching for its
-                  # crops.
-                { # Metadata for online images.
-                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                  "url": "A String", # The result image URL.
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
+        "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
+          { # Set of detected entity features.
+            "description": "A String", # Entity textual description, expressed in its `locale` language.
+            "confidence": 3.14, # **Deprecated. Use `score` instead.**
+                # The accuracy of the entity detection in an image.
+                # For example, for an image in which the "Eiffel Tower" entity is detected,
+                # this field represents the confidence that there is a tower in the query
+                # image. Range [0, 1].
+            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                # for `LABEL_DETECTION` features.
+              "normalizedVertices": [ # The bounding polygon normalized vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the normalized vertex coordinates are relative to the original image
+                    # and range from 0 to 1.
+                  "y": 3.14, # Y coordinate.
+                  "x": 3.14, # X coordinate.
                 },
               ],
-              "url": "A String", # The result web page URL.
-              "pageTitle": "A String", # Title for the web page, may contain HTML markups.
-              "fullMatchingImages": [ # Fully matching images on the page.
-                  # Can include resized copies of the query image.
-                { # Metadata for online images.
-                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                  "url": "A String", # The result image URL.
+              "vertices": [ # The bounding polygon vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the vertex coordinates are in the same scale as the original image.
+                  "x": 42, # X coordinate.
+                  "y": 42, # Y coordinate.
                 },
               ],
             },
-          ],
-          "partialMatchingImages": [ # Partial matching images from the Internet.
-              # Those images are similar enough to share some key-point features. For
-              # example an original image will likely have partial matching for its crops.
-            { # Metadata for online images.
-              "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-              "url": "A String", # The result image URL.
+            "mid": "A String", # Opaque entity ID. Some IDs may be available in
+                # [Google Knowledge Graph Search
+                # API](https://developers.google.com/knowledge-graph/).
+            "locale": "A String", # The language code for the locale in which the entity textual
+                # `description` is expressed.
+            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+                # image. For example, the relevancy of "tower" is likely higher to an image
+                # containing the detected "Eiffel Tower" than to an image containing a
+                # detected distant towering building, even though the confidence that
+                # there is a tower in each image may be the same. Range [0, 1].
+            "locations": [ # The location information for the detected entity. Multiple
+                # `LocationInfo` elements can be present because one location may
+                # indicate the location of the scene in the image, and another location
+                # may indicate the location of the place where the image was taken.
+                # Location information is usually present for landmarks.
+              { # Detected entity location information.
+                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
+                    # of doubles representing degrees latitude and degrees longitude. Unless
+                    # specified otherwise, this must conform to the
+                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                    # standard</a>. Values must be within normalized ranges.
+                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+                },
+              },
+            ],
+            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                # fields, such a score or string that qualifies the entity.
+              { # A `Property` consists of a user-supplied name/value pair.
+                "value": "A String", # Value of the property.
+                "uint64Value": "A String", # Value of numeric properties.
+                "name": "A String", # Name of the property.
+              },
+            ],
+            "score": 3.14, # Overall score of the result. Range [0, 1].
+          },
+        ],
+        "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
+          { # Set of detected entity features.
+            "description": "A String", # Entity textual description, expressed in its `locale` language.
+            "confidence": 3.14, # **Deprecated. Use `score` instead.**
+                # The accuracy of the entity detection in an image.
+                # For example, for an image in which the "Eiffel Tower" entity is detected,
+                # this field represents the confidence that there is a tower in the query
+                # image. Range [0, 1].
+            "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                # for `LABEL_DETECTION` features.
+              "normalizedVertices": [ # The bounding polygon normalized vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the normalized vertex coordinates are relative to the original image
+                    # and range from 0 to 1.
+                  "y": 3.14, # Y coordinate.
+                  "x": 3.14, # X coordinate.
+                },
+              ],
+              "vertices": [ # The bounding polygon vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the vertex coordinates are in the same scale as the original image.
+                  "x": 42, # X coordinate.
+                  "y": 42, # Y coordinate.
+                },
+              ],
             },
-          ],
-        },
-        "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
-            # methods over safe-search verticals (for example, adult, spoof, medical,
-            # violence).
-          "adult": "A String", # Represents the adult content likelihood for the image. Adult content may
-              # contain elements such as nudity, pornographic images or cartoons, or
-              # sexual activities.
-          "spoof": "A String", # Spoof likelihood. The likelihood that an modification
-              # was made to the image's canonical version to make it appear
-              # funny or offensive.
-          "medical": "A String", # Likelihood that this is a medical image.
-          "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
-              # include (but is not limited to) skimpy or sheer clothing, strategically
-              # covered nudity, lewd or provocative poses, or close-ups of sensitive
-              # body areas.
-          "violence": "A String", # Likelihood that this image contains violent content.
-        },
+            "mid": "A String", # Opaque entity ID. Some IDs may be available in
+                # [Google Knowledge Graph Search
+                # API](https://developers.google.com/knowledge-graph/).
+            "locale": "A String", # The language code for the locale in which the entity textual
+                # `description` is expressed.
+            "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+                # image. For example, the relevancy of "tower" is likely higher to an image
+                # containing the detected "Eiffel Tower" than to an image containing a
+                # detected distant towering building, even though the confidence that
+                # there is a tower in each image may be the same. Range [0, 1].
+            "locations": [ # The location information for the detected entity. Multiple
+                # `LocationInfo` elements can be present because one location may
+                # indicate the location of the scene in the image, and another location
+                # may indicate the location of the place where the image was taken.
+                # Location information is usually present for landmarks.
+              { # Detected entity location information.
+                "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
+                    # of doubles representing degrees latitude and degrees longitude. Unless
+                    # specified otherwise, this must conform to the
+                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                    # standard</a>. Values must be within normalized ranges.
+                  "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+                  "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+                },
+              },
+            ],
+            "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                # fields, such a score or string that qualifies the entity.
+              { # A `Property` consists of a user-supplied name/value pair.
+                "value": "A String", # Value of the property.
+                "uint64Value": "A String", # Value of numeric properties.
+                "name": "A String", # Name of the property.
+              },
+            ],
+            "score": 3.14, # Overall score of the result. Range [0, 1].
+          },
+        ],
       },
     ],
   }</pre>
@@ -1302,139 +1309,6 @@
     The object takes the form of:
 
 { # Request for async image annotation for a list of images.
-    &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
-      { # Request for performing Google Cloud Vision API tasks over a user-provided
-          # image, with user-requested features, and with context information.
-        &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
-          &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
-              # Note: As with all `bytes` fields, protobuffers use a pure binary
-              # representation, whereas JSON representations use base64.
-          &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
-              # URL. If both `content` and `source` are provided for an image, `content`
-              # takes precedence and is used to perform the image annotation request.
-            &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
-                #
-                # The Google Cloud Storage  URI of the form
-                # `gs://bucket_name/object_name`. Object versioning is not supported. See
-                # [Google Cloud Storage Request
-                # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
-            &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
-                #
-                # 1. A Google Cloud Storage URI of the form
-                #    `gs://bucket_name/object_name`. Object versioning is not supported. See
-                #    [Google Cloud Storage Request
-                #    URIs](https://cloud.google.com/storage/docs/reference-uris) for more
-                #    info.
-                #
-                # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
-                #    HTTP/HTTPS URLs, Google cannot guarantee that the request will be
-                #    completed. Your request may fail if the specified host denies the
-                #    request (e.g. due to request throttling or DOS prevention), or if Google
-                #    throttles requests to the site for abuse prevention. You should not
-                #    depend on externally-hosted images for production applications.
-                #
-                # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
-                # precedence.
-          },
-        },
-        &quot;features&quot;: [ # Requested features.
-          { # The type of Google Cloud Vision API detection to perform, and the maximum
-              # number of results to return for that type. Multiple `Feature` objects can
-              # be specified in the `features` list.
-            &quot;type&quot;: &quot;A String&quot;, # The feature type.
-            &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
-                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
-            &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
-                # Supported values: &quot;builtin/stable&quot; (the default if unset) and
-                # &quot;builtin/latest&quot;.
-          },
-        ],
-        &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
-          &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
-              # yields the best results since it enables automatic language detection. For
-              # languages based on the Latin alphabet, setting `language_hints` is not
-              # needed. In rare cases, when the language of the text in the image is known,
-              # setting a hint will help get better results (although it will be a
-              # significant hindrance if the hint is wrong). Text detection returns an
-              # error if one or more of the specified languages is not one of the
-              # [supported languages](https://cloud.google.com/vision/docs/languages).
-            &quot;A String&quot;,
-          ],
-          &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
-            &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
-          },
-          &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
-            &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
-                # of doubles representing degrees latitude and degrees longitude. Unless
-                # specified otherwise, this must conform to the
-                # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
-                # standard&lt;/a&gt;. Values must be within normalized ranges.
-              &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-              &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-            },
-            &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
-                # of doubles representing degrees latitude and degrees longitude. Unless
-                # specified otherwise, this must conform to the
-                # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
-                # standard&lt;/a&gt;. Values must be within normalized ranges.
-              &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-              &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-            },
-          },
-          &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
-            &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
-                # of the image. For example, if the desired aspect ratio is 4/3, the
-                # corresponding float value should be 1.33333.  If not specified, the
-                # best possible crop is returned. The number of provided aspect ratios is
-                # limited to a maximum of 16; any aspect ratios provided after the 16th are
-                # ignored.
-              3.14,
-            ],
-          },
-          &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
-            &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
-                # on Product labels. We currently support an AND of OR of key-value
-                # expressions, where each expression within an OR must have the same key. An
-                # &#x27;=&#x27; should be used to connect the key and value.
-                #
-                # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
-                # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
-                # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
-            &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
-                #
-                # Format is:
-                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
-            &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
-                # If it is not specified, system discretion will be applied.
-              &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the normalized vertex coordinates are relative to the original image
-                    # and range from 0 to 1.
-                  &quot;y&quot;: 3.14, # Y coordinate.
-                  &quot;x&quot;: 3.14, # X coordinate.
-                },
-              ],
-              &quot;vertices&quot;: [ # The bounding polygon vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the vertex coordinates are in the same scale as the original image.
-                  &quot;x&quot;: 42, # X coordinate.
-                  &quot;y&quot;: 42, # Y coordinate.
-                },
-              ],
-            },
-            &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
-                # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
-                # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
-                # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
-                # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
-                # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
-                # migrate existing products to these categories as well.
-              &quot;A String&quot;,
-            ],
-          },
-        },
-      },
-    ],
     &quot;parent&quot;: &quot;A String&quot;, # Optional. Target project and location to make a call.
         # 
         # Format: `projects/{project-id}/locations/{location-id}`.
@@ -1484,6 +1358,142 @@
             # and overflows into multiple sharded files.
       },
     },
+    &quot;requests&quot;: [ # Required. Individual image annotation requests for this batch.
+      { # Request for performing Google Cloud Vision API tasks over a user-provided
+          # image, with user-requested features, and with context information.
+        &quot;image&quot;: { # Client image to perform Google Cloud Vision API tasks over. # The image to be processed.
+          &quot;source&quot;: { # External image source (Google Cloud Storage or web URL image location). # Google Cloud Storage image location, or publicly-accessible image
+              # URL. If both `content` and `source` are provided for an image, `content`
+              # takes precedence and is used to perform the image annotation request.
+            &quot;gcsImageUri&quot;: &quot;A String&quot;, # **Use `image_uri` instead.**
+                #
+                # The Google Cloud Storage  URI of the form
+                # `gs://bucket_name/object_name`. Object versioning is not supported. See
+                # [Google Cloud Storage Request
+                # URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
+            &quot;imageUri&quot;: &quot;A String&quot;, # The URI of the source image. Can be either:
+                #
+                # 1. A Google Cloud Storage URI of the form
+                #    `gs://bucket_name/object_name`. Object versioning is not supported. See
+                #    [Google Cloud Storage Request
+                #    URIs](https://cloud.google.com/storage/docs/reference-uris) for more
+                #    info.
+                #
+                # 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
+                #    HTTP/HTTPS URLs, Google cannot guarantee that the request will be
+                #    completed. Your request may fail if the specified host denies the
+                #    request (e.g. due to request throttling or DOS prevention), or if Google
+                #    throttles requests to the site for abuse prevention. You should not
+                #    depend on externally-hosted images for production applications.
+                #
+                # When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
+                # precedence.
+          },
+          &quot;content&quot;: &quot;A String&quot;, # Image content, represented as a stream of bytes.
+              # Note: As with all `bytes` fields, protobuffers use a pure binary
+              # representation, whereas JSON representations use base64.
+              #
+              # Currently, this field only works for BatchAnnotateImages requests. It does
+              # not work for AsyncBatchAnnotateImages requests.
+        },
+        &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
+          &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
+            &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
+                # of doubles representing degrees latitude and degrees longitude. Unless
+                # specified otherwise, this must conform to the
+                # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
+                # standard&lt;/a&gt;. Values must be within normalized ranges.
+              &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+              &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+            },
+            &quot;minLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
+                # of doubles representing degrees latitude and degrees longitude. Unless
+                # specified otherwise, this must conform to the
+                # &lt;a href=&quot;http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf&quot;&gt;WGS84
+                # standard&lt;/a&gt;. Values must be within normalized ranges.
+              &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+              &quot;latitude&quot;: 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+            },
+          },
+          &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
+            &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
+          },
+          &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
+              # yields the best results since it enables automatic language detection. For
+              # languages based on the Latin alphabet, setting `language_hints` is not
+              # needed. In rare cases, when the language of the text in the image is known,
+              # setting a hint will help get better results (although it will be a
+              # significant hindrance if the hint is wrong). Text detection returns an
+              # error if one or more of the specified languages is not one of the
+              # [supported languages](https://cloud.google.com/vision/docs/languages).
+            &quot;A String&quot;,
+          ],
+          &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
+            &quot;productCategories&quot;: [ # The list of product categories to search in. Currently, we only consider
+                # the first category, and either &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;, &quot;toys-v2&quot;,
+                # &quot;packagedgoods-v1&quot;, or &quot;general-v1&quot; should be specified. The legacy
+                # categories &quot;homegoods&quot;, &quot;apparel&quot;, and &quot;toys&quot; are still supported but will
+                # be deprecated. For new products, please use &quot;homegoods-v2&quot;, &quot;apparel-v2&quot;,
+                # or &quot;toys-v2&quot; for better product search accuracy. It is recommended to
+                # migrate existing products to these categories as well.
+              &quot;A String&quot;,
+            ],
+            &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
+                # If it is not specified, system discretion will be applied.
+              &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the normalized vertex coordinates are relative to the original image
+                    # and range from 0 to 1.
+                  &quot;y&quot;: 3.14, # Y coordinate.
+                  &quot;x&quot;: 3.14, # X coordinate.
+                },
+              ],
+              &quot;vertices&quot;: [ # The bounding polygon vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the vertex coordinates are in the same scale as the original image.
+                  &quot;x&quot;: 42, # X coordinate.
+                  &quot;y&quot;: 42, # Y coordinate.
+                },
+              ],
+            },
+            &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
+                # on Product labels. We currently support an AND of OR of key-value
+                # expressions, where each expression within an OR must have the same key. An
+                # &#x27;=&#x27; should be used to connect the key and value.
+                #
+                # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
+                # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
+                # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
+            &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
+                #
+                # Format is:
+                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+          },
+          &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
+            &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
+                # of the image. For example, if the desired aspect ratio is 4/3, the
+                # corresponding float value should be 1.33333.  If not specified, the
+                # best possible crop is returned. The number of provided aspect ratios is
+                # limited to a maximum of 16; any aspect ratios provided after the 16th are
+                # ignored.
+              3.14,
+            ],
+          },
+        },
+        &quot;features&quot;: [ # Requested features.
+          { # The type of Google Cloud Vision API detection to perform, and the maximum
+              # number of results to return for that type. Multiple `Feature` objects can
+              # be specified in the `features` list.
+            &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
+                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+            &quot;type&quot;: &quot;A String&quot;, # The feature type.
+            &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
+                # Supported values: &quot;builtin/stable&quot; (the default if unset) and
+                # &quot;builtin/latest&quot;.
+          },
+        ],
+      },
+    ],
   }
 
   x__xgafv: string, V1 error format.
@@ -1499,16 +1509,6 @@
     &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
         # If `true`, the operation is completed, and either `error` or `response` is
         # available.
-    &quot;response&quot;: { # The normal response of the operation in case of success.  If the original
-        # method returns no data on success, such as `Delete`, the response is
-        # `google.protobuf.Empty`.  If the original method is standard
-        # `Get`/`Create`/`Update`, the response should be the resource.  For other
-        # methods, the response should have the type `XxxResponse`, where `Xxx`
-        # is the original method name.  For example, if the original method name
-        # is `TakeSnapshot()`, the inferred response type is
-        # `TakeSnapshotResponse`.
-      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
-    },
     &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
         # originally returns it. If you use the default HTTP mapping, the
         # `name` should be a resource name ending with `operations/{unique_id}`.
@@ -1520,15 +1520,25 @@
         # You can find out more about this error model and how to work with it in the
         # [API Design Guide](https://cloud.google.com/apis/design/errors).
       &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
-      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
       &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
           # message types for APIs to use.
         {
           &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
         },
       ],
+      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+    },
+    &quot;response&quot;: { # The normal response of the operation in case of success.  If the original
+        # method returns no data on success, such as `Delete`, the response is
+        # `google.protobuf.Empty`.  If the original method is standard
+        # `Get`/`Create`/`Update`, the response should be the resource.  For other
+        # methods, the response should have the type `XxxResponse`, where `Xxx`
+        # is the original method name.  For example, if the original method name
+        # is `TakeSnapshot()`, the inferred response type is
+        # `TakeSnapshotResponse`.
+      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
     },
     &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
         # contains progress information and common metadata such as create time.