docs: update generated docs (#981)

diff --git a/docs/dyn/vision_v1p2beta1.projects.files.html b/docs/dyn/vision_v1p2beta1.projects.files.html
index 84101d5..7217879 100644
--- a/docs/dyn/vision_v1p2beta1.projects.files.html
+++ b/docs/dyn/vision_v1p2beta1.projects.files.html
@@ -123,6 +123,18 @@
     "requests": [ # Required. The list of file annotation requests. Right now we support only one
         # AnnotateFileRequest in BatchAnnotateFilesRequest.
       { # A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
+        "features": [ # Required. Requested features.
+          { # The type of Google Cloud Vision API detection to perform, and the maximum
+              # number of results to return for that type. Multiple `Feature` objects can
+              # be specified in the `features` list.
+            "type": "A String", # The feature type.
+            "maxResults": 42, # Maximum number of results of this type. Does not apply to
+                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+            "model": "A String", # Model to use for the feature.
+                # Supported values: "builtin/stable" (the default if unset) and
+                # "builtin/latest".
+          },
+        ],
         "pages": [ # Pages of the file to perform image annotation.
             #
             # Pages starts from 1, we assume the first page of the file is page 1.
@@ -139,32 +151,6 @@
             # for the first 5 pages of the file.
           42,
         ],
-        "inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
-          "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
-            "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
-                # Google Cloud Storage object. Wildcards are not currently supported.
-          },
-          "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
-              # "image/gif" are supported. Wildcards are not supported.
-          "content": "A String", # File content, represented as a stream of bytes.
-              # Note: As with all `bytes` fields, protobuffers use a pure binary
-              # representation, whereas JSON representations use base64.
-              #
-              # Currently, this field only works for BatchAnnotateFiles requests. It does
-              # not work for AsyncBatchAnnotateFiles requests.
-        },
-        "features": [ # Required. Requested features.
-          { # The type of Google Cloud Vision API detection to perform, and the maximum
-              # number of results to return for that type. Multiple `Feature` objects can
-              # be specified in the `features` list.
-            "type": "A String", # The feature type.
-            "maxResults": 42, # Maximum number of results of this type. Does not apply to
-                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
-            "model": "A String", # Model to use for the feature.
-                # Supported values: "builtin/stable" (the default if unset) and
-                # "builtin/latest".
-          },
-        ],
         "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
           "cropHintsParams": { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
             "aspectRatios": [ # Aspect ratios in floats, representing the ratio of the width to the height
@@ -186,20 +172,15 @@
                 # migrate existing products to these categories as well.
               "A String",
             ],
-            "filter": "A String", # The filtering expression. This can be used to restrict search results based
-                # on Product labels. We currently support an AND of OR of key-value
-                # expressions, where each expression within an OR must have the same key. An
-                # '=' should be used to connect the key and value.
-                #
-                # For example, "(color = red OR color = blue) AND brand = Google" is
-                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
-                # "color: red" is not acceptable because it uses a ':' instead of an '='.
-            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
-                #
-                # Format is:
-                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
             "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
                 # If it is not specified, system discretion will be applied.
+              "vertices": [ # The bounding polygon vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the vertex coordinates are in the same scale as the original image.
+                  "x": 42, # X coordinate.
+                  "y": 42, # Y coordinate.
+                },
+              ],
               "normalizedVertices": [ # The bounding polygon normalized vertices.
                 { # A vertex represents a 2D point in the image.
                     # NOTE: the normalized vertex coordinates are relative to the original image
@@ -208,14 +189,19 @@
                   "y": 3.14, # Y coordinate.
                 },
               ],
-              "vertices": [ # The bounding polygon vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the vertex coordinates are in the same scale as the original image.
-                  "y": 42, # Y coordinate.
-                  "x": 42, # X coordinate.
-                },
-              ],
             },
+            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
+                #
+                # Format is:
+                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+            "filter": "A String", # The filtering expression. This can be used to restrict search results based
+                # on Product labels. We currently support an AND of OR of key-value
+                # expressions, where each expression within an OR must have the same key. An
+                # '=' should be used to connect the key and value.
+                #
+                # For example, "(color = red OR color = blue) AND brand = Google" is
+                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
+                # "color: red" is not acceptable because it uses a ':' instead of an '='.
           },
           "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
               # yields the best results since it enables automatic language detection. For
@@ -227,9 +213,6 @@
               # [supported languages](https://cloud.google.com/vision/docs/languages).
             "A String",
           ],
-          "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
-            "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
-          },
           "latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
             "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
                 # of doubles representing degrees latitude and degrees longitude. Unless
@@ -248,6 +231,23 @@
               "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
             },
           },
+          "webDetectionParams": { # Parameters for web detection request. # Parameters for web detection.
+            "includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
+          },
+        },
+        "inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
+          "content": "A String", # File content, represented as a stream of bytes.
+              # Note: As with all `bytes` fields, protobuffers use a pure binary
+              # representation, whereas JSON representations use base64.
+              #
+              # Currently, this field only works for BatchAnnotateFiles requests. It does
+              # not work for AsyncBatchAnnotateFiles requests.
+          "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
+              # "image/gif" are supported. Wildcards are not supported.
+          "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
+            "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
+                # Google Cloud Storage object. Wildcards are not currently supported.
+          },
         },
       },
     ],
@@ -266,6 +266,20 @@
         # AnnotateFileRequest in BatchAnnotateFilesRequest.
       { # Response to a single file annotation request. A file may contain one or more
           # images, which individually have their own responses.
+        "inputConfig": { # The desired input location and metadata. # Information about the file for which this response is generated.
+          "content": "A String", # File content, represented as a stream of bytes.
+              # Note: As with all `bytes` fields, protobuffers use a pure binary
+              # representation, whereas JSON representations use base64.
+              #
+              # Currently, this field only works for BatchAnnotateFiles requests. It does
+              # not work for AsyncBatchAnnotateFiles requests.
+          "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
+              # "image/gif" are supported. Wildcards are not supported.
+          "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
+            "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
+                # Google Cloud Storage object. Wildcards are not currently supported.
+          },
+        },
         "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
             # `responses` field will not be set in this case.
             # different programming environments, including REST APIs and RPC APIs. It is
@@ -275,53 +289,45 @@
             # You can find out more about this error model and how to work with it in the
             # [API Design Guide](https://cloud.google.com/apis/design/errors).
           "code": 42, # The status code, which should be an enum value of google.rpc.Code.
-          "message": "A String", # A developer-facing error message, which should be in English. Any
-              # user-facing error message should be localized and sent in the
-              # google.rpc.Status.details field, or localized by the client.
           "details": [ # A list of messages that carry the error details.  There is a common set of
               # message types for APIs to use.
             {
               "a_key": "", # Properties of the object. Contains field @type with type URL.
             },
           ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
         },
+        "totalPages": 42, # This field gives the total number of pages in the file.
         "responses": [ # Individual responses to images found within the file. This field will be
             # empty if the `error` field is set.
           { # Response to an image annotation request.
-            "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
-                # This will be sorted descending by confidence score.
-              { # Set of detected objects with bounding boxes.
-                "name": "A String", # Object name, expressed in its `language_code` language.
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "x": 3.14, # X coordinate.
-                      "y": 3.14, # Y coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-                "score": 3.14, # Score of the result. Range [0, 1].
-                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                    # information, see
-                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                "mid": "A String", # Object ID that should align with EntityAnnotation mid.
-              },
-            ],
             "productSearchResults": { # Results for a product search request. # If present, product search has completed successfully.
+              "indexTime": "A String", # Timestamp of the index which provided these results. Products added to the
+                  # product set and products removed from the product set after this time are
+                  # not reflected in the current results.
               "results": [ # List of results, one for each product match.
                 { # Information about a product.
+                  "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
+                      # 1 (full confidence).
                   "image": "A String", # The resource name of the image from the product that is the closest match
                       # to the query.
                   "product": { # A Product contains ReferenceImages. # The Product.
+                    "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
+                        # 4096 characters long.
+                    "name": "A String", # The resource name of the product.
+                        #
+                        # Format is:
+                        # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+                        #
+                        # This field is ignored when creating a product.
+                    "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
+                        # characters long.
+                    "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
+                        # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
+                        # "homegoods", "apparel", and "toys" are still supported, but these should
+                        # not be used for new products.
                     "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
                         # constraints can be specified based on the product_labels.
                         #
@@ -336,51 +342,29 @@
                         # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
                         # will refuse to work for that ProductSet.
                       { # A product label represented as a key-value pair.
-                        "value": "A String", # The value of the label attached to the product. Cannot be empty and
-                            # cannot exceed 128 bytes.
                         "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
                             # exceed 128 bytes.
+                        "value": "A String", # The value of the label attached to the product. Cannot be empty and
+                            # cannot exceed 128 bytes.
                       },
                     ],
-                    "name": "A String", # The resource name of the product.
-                        #
-                        # Format is:
-                        # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
-                        #
-                        # This field is ignored when creating a product.
-                    "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
-                        # 4096 characters long.
-                    "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
-                        # characters long.
-                    "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
-                        # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
-                        # "homegoods", "apparel", and "toys" are still supported, but these should
-                        # not be used for new products.
                   },
-                  "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
-                      # 1 (full confidence).
                 },
               ],
-              "indexTime": "A String", # Timestamp of the index which provided these results. Products added to the
-                  # product set and products removed from the product set after this time are
-                  # not reflected in the current results.
               "productGroupedResults": [ # List of results grouped by products detected in the query image. Each entry
                   # corresponds to one bounding polygon in the query image, and contains the
                   # matching products specific to that region. There may be duplicate product
                   # matches in the union of all the per-product results.
                 { # Information about the products similar to a single product in a query
                     # image.
-                  "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
-                    { # Prediction for what the object in the bounding box is.
-                      "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                          # information, see
-                          # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                      "mid": "A String", # Object ID that should align with EntityAnnotation mid.
-                      "name": "A String", # Object name, expressed in its `language_code` language.
-                      "score": 3.14, # Score of the result. Range [0, 1].
-                    },
-                  ],
                   "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
+                    "vertices": [ # The bounding polygon vertices.
+                      { # A vertex represents a 2D point in the image.
+                          # NOTE: the vertex coordinates are in the same scale as the original image.
+                        "x": 42, # X coordinate.
+                        "y": 42, # Y coordinate.
+                      },
+                    ],
                     "normalizedVertices": [ # The bounding polygon normalized vertices.
                       { # A vertex represents a 2D point in the image.
                           # NOTE: the normalized vertex coordinates are relative to the original image
@@ -389,19 +373,38 @@
                         "y": 3.14, # Y coordinate.
                       },
                     ],
-                    "vertices": [ # The bounding polygon vertices.
-                      { # A vertex represents a 2D point in the image.
-                          # NOTE: the vertex coordinates are in the same scale as the original image.
-                        "y": 42, # Y coordinate.
-                        "x": 42, # X coordinate.
-                      },
-                    ],
                   },
+                  "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
+                    { # Prediction for what the object in the bounding box is.
+                      "score": 3.14, # Score of the result. Range [0, 1].
+                      "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                          # information, see
+                          # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                      "mid": "A String", # Object ID that should align with EntityAnnotation mid.
+                      "name": "A String", # Object name, expressed in its `language_code` language.
+                    },
+                  ],
                   "results": [ # List of results, one for each product match.
                     { # Information about a product.
+                      "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
+                          # 1 (full confidence).
                       "image": "A String", # The resource name of the image from the product that is the closest match
                           # to the query.
                       "product": { # A Product contains ReferenceImages. # The Product.
+                        "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
+                            # 4096 characters long.
+                        "name": "A String", # The resource name of the product.
+                            #
+                            # Format is:
+                            # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+                            #
+                            # This field is ignored when creating a product.
+                        "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
+                            # characters long.
+                        "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
+                            # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
+                            # "homegoods", "apparel", and "toys" are still supported, but these should
+                            # not be used for new products.
                         "productLabels": [ # Key-value pairs that can be attached to a product. At query time,
                             # constraints can be specified based on the product_labels.
                             #
@@ -416,334 +419,27 @@
                             # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
                             # will refuse to work for that ProductSet.
                           { # A product label represented as a key-value pair.
-                            "value": "A String", # The value of the label attached to the product. Cannot be empty and
-                                # cannot exceed 128 bytes.
                             "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
                                 # exceed 128 bytes.
+                            "value": "A String", # The value of the label attached to the product. Cannot be empty and
+                                # cannot exceed 128 bytes.
                           },
                         ],
-                        "name": "A String", # The resource name of the product.
-                            #
-                            # Format is:
-                            # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
-                            #
-                            # This field is ignored when creating a product.
-                        "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
-                            # 4096 characters long.
-                        "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
-                            # characters long.
-                        "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
-                            # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
-                            # "homegoods", "apparel", and "toys" are still supported, but these should
-                            # not be used for new products.
                       },
-                      "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
-                          # 1 (full confidence).
                     },
                   ],
                 },
               ],
             },
-            "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
-                # Note that filled-in image annotations are guaranteed to be
-                # correct, even when `error` is set.
-                # different programming environments, including REST APIs and RPC APIs. It is
-                # used by [gRPC](https://github.com/grpc). Each `Status` message contains
-                # three pieces of data: error code, error message, and error details.
-                #
-                # You can find out more about this error model and how to work with it in the
-                # [API Design Guide](https://cloud.google.com/apis/design/errors).
-              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
-              "message": "A String", # A developer-facing error message, which should be in English. Any
-                  # user-facing error message should be localized and sent in the
-                  # google.rpc.Status.details field, or localized by the client.
-              "details": [ # A list of messages that carry the error details.  There is a common set of
-                  # message types for APIs to use.
-                {
-                  "a_key": "", # Properties of the object. Contains field @type with type URL.
-                },
-              ],
-            },
-            "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
-                # completed successfully.
-                # This annotation provides the structural hierarchy for the OCR detected
-                # text.
-                # The hierarchy of an OCR extracted text structure is like this:
-                #     TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
-                # Each structural component, starting from Page, may further have their own
-                # properties. Properties describe detected languages, breaks etc.. Please refer
-                # to the TextAnnotation.TextProperty message definition below for more
-                # detail.
-              "text": "A String", # UTF-8 text detected on the pages.
-              "pages": [ # List of pages detected by OCR.
-                { # Detected page from OCR.
-                  "width": 42, # Page width. For PDFs the unit is points. For images (including
-                      # TIFFs) the unit is pixels.
-                  "blocks": [ # List of blocks of text, images etc on this page.
-                    { # Logical element on the page.
-                      "property": { # Additional information detected on the structural component. # Additional information detected for the block.
-                        "detectedLanguages": [ # A list of detected languages together with confidence.
-                          { # Detected language for a structural component.
-                            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                                # information, see
-                                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                            "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                          },
-                        ],
-                        "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                          "type": "A String", # Detected break type.
-                          "isPrefix": True or False, # True if break prepends the element.
-                        },
-                      },
-                      "blockType": "A String", # Detected block type (text, image etc) for this block.
-                      "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the block.
-                          # The vertices are in the order of top-left, top-right, bottom-right,
-                          # bottom-left. When a rotation of the bounding box is detected the rotation
-                          # is represented as around the top-left corner as defined when the text is
-                          # read in the 'natural' orientation.
-                          # For example:
-                          #
-                          # * when the text is horizontal it might look like:
-                          #
-                          #         0----1
-                          #         |    |
-                          #         3----2
-                          #
-                          # * when it's rotated 180 degrees around the top-left corner it becomes:
-                          #
-                          #         2----3
-                          #         |    |
-                          #         1----0
-                          #
-                          #   and the vertex order will still be (0, 1, 2, 3).
-                        "normalizedVertices": [ # The bounding polygon normalized vertices.
-                          { # A vertex represents a 2D point in the image.
-                              # NOTE: the normalized vertex coordinates are relative to the original image
-                              # and range from 0 to 1.
-                            "x": 3.14, # X coordinate.
-                            "y": 3.14, # Y coordinate.
-                          },
-                        ],
-                        "vertices": [ # The bounding polygon vertices.
-                          { # A vertex represents a 2D point in the image.
-                              # NOTE: the vertex coordinates are in the same scale as the original image.
-                            "y": 42, # Y coordinate.
-                            "x": 42, # X coordinate.
-                          },
-                        ],
-                      },
-                      "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
-                      "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
-                        { # Structural unit of text representing a number of words in certain order.
-                          "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
-                              # The vertices are in the order of top-left, top-right, bottom-right,
-                              # bottom-left. When a rotation of the bounding box is detected the rotation
-                              # is represented as around the top-left corner as defined when the text is
-                              # read in the 'natural' orientation.
-                              # For example:
-                              #   * when the text is horizontal it might look like:
-                              #      0----1
-                              #      |    |
-                              #      3----2
-                              #   * when it's rotated 180 degrees around the top-left corner it becomes:
-                              #      2----3
-                              #      |    |
-                              #      1----0
-                              #   and the vertex order will still be (0, 1, 2, 3).
-                            "normalizedVertices": [ # The bounding polygon normalized vertices.
-                              { # A vertex represents a 2D point in the image.
-                                  # NOTE: the normalized vertex coordinates are relative to the original image
-                                  # and range from 0 to 1.
-                                "x": 3.14, # X coordinate.
-                                "y": 3.14, # Y coordinate.
-                              },
-                            ],
-                            "vertices": [ # The bounding polygon vertices.
-                              { # A vertex represents a 2D point in the image.
-                                  # NOTE: the vertex coordinates are in the same scale as the original image.
-                                "y": 42, # Y coordinate.
-                                "x": 42, # X coordinate.
-                              },
-                            ],
-                          },
-                          "confidence": 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
-                          "words": [ # List of all words in this paragraph.
-                            { # A word representation.
-                              "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
-                                  # The vertices are in the order of top-left, top-right, bottom-right,
-                                  # bottom-left. When a rotation of the bounding box is detected the rotation
-                                  # is represented as around the top-left corner as defined when the text is
-                                  # read in the 'natural' orientation.
-                                  # For example:
-                                  #   * when the text is horizontal it might look like:
-                                  #      0----1
-                                  #      |    |
-                                  #      3----2
-                                  #   * when it's rotated 180 degrees around the top-left corner it becomes:
-                                  #      2----3
-                                  #      |    |
-                                  #      1----0
-                                  #   and the vertex order will still be (0, 1, 2, 3).
-                                "normalizedVertices": [ # The bounding polygon normalized vertices.
-                                  { # A vertex represents a 2D point in the image.
-                                      # NOTE: the normalized vertex coordinates are relative to the original image
-                                      # and range from 0 to 1.
-                                    "x": 3.14, # X coordinate.
-                                    "y": 3.14, # Y coordinate.
-                                  },
-                                ],
-                                "vertices": [ # The bounding polygon vertices.
-                                  { # A vertex represents a 2D point in the image.
-                                      # NOTE: the vertex coordinates are in the same scale as the original image.
-                                    "y": 42, # Y coordinate.
-                                    "x": 42, # X coordinate.
-                                  },
-                                ],
-                              },
-                              "confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
-                              "symbols": [ # List of symbols in the word.
-                                  # The order of the symbols follows the natural reading order.
-                                { # A single symbol representation.
-                                  "text": "A String", # The actual UTF-8 representation of the symbol.
-                                  "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
-                                    "detectedLanguages": [ # A list of detected languages together with confidence.
-                                      { # Detected language for a structural component.
-                                        "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                                            # information, see
-                                            # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                                        "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                                      },
-                                    ],
-                                    "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                                      "type": "A String", # Detected break type.
-                                      "isPrefix": True or False, # True if break prepends the element.
-                                    },
-                                  },
-                                  "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
-                                      # The vertices are in the order of top-left, top-right, bottom-right,
-                                      # bottom-left. When a rotation of the bounding box is detected the rotation
-                                      # is represented as around the top-left corner as defined when the text is
-                                      # read in the 'natural' orientation.
-                                      # For example:
-                                      #   * when the text is horizontal it might look like:
-                                      #      0----1
-                                      #      |    |
-                                      #      3----2
-                                      #   * when it's rotated 180 degrees around the top-left corner it becomes:
-                                      #      2----3
-                                      #      |    |
-                                      #      1----0
-                                      #   and the vertex order will still be (0, 1, 2, 3).
-                                    "normalizedVertices": [ # The bounding polygon normalized vertices.
-                                      { # A vertex represents a 2D point in the image.
-                                          # NOTE: the normalized vertex coordinates are relative to the original image
-                                          # and range from 0 to 1.
-                                        "x": 3.14, # X coordinate.
-                                        "y": 3.14, # Y coordinate.
-                                      },
-                                    ],
-                                    "vertices": [ # The bounding polygon vertices.
-                                      { # A vertex represents a 2D point in the image.
-                                          # NOTE: the vertex coordinates are in the same scale as the original image.
-                                        "y": 42, # Y coordinate.
-                                        "x": 42, # X coordinate.
-                                      },
-                                    ],
-                                  },
-                                  "confidence": 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
-                                },
-                              ],
-                              "property": { # Additional information detected on the structural component. # Additional information detected for the word.
-                                "detectedLanguages": [ # A list of detected languages together with confidence.
-                                  { # Detected language for a structural component.
-                                    "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                                        # information, see
-                                        # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                                    "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                                  },
-                                ],
-                                "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                                  "type": "A String", # Detected break type.
-                                  "isPrefix": True or False, # True if break prepends the element.
-                                },
-                              },
-                            },
-                          ],
-                          "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
-                            "detectedLanguages": [ # A list of detected languages together with confidence.
-                              { # Detected language for a structural component.
-                                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                                    # information, see
-                                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                                "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                              },
-                            ],
-                            "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                              "type": "A String", # Detected break type.
-                              "isPrefix": True or False, # True if break prepends the element.
-                            },
-                          },
-                        },
-                      ],
-                    },
-                  ],
-                  "property": { # Additional information detected on the structural component. # Additional information detected on the page.
-                    "detectedLanguages": [ # A list of detected languages together with confidence.
-                      { # Detected language for a structural component.
-                        "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                            # information, see
-                            # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                        "confidence": 3.14, # Confidence of detected language. Range [0, 1].
-                      },
-                    ],
-                    "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
-                      "type": "A String", # Detected break type.
-                      "isPrefix": True or False, # True if break prepends the element.
-                    },
-                  },
-                  "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
-                  "height": 42, # Page height. For PDFs the unit is points. For images (including
-                      # TIFFs) the unit is pixels.
-                },
-              ],
-            },
             "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
               { # Set of detected entity features.
-                "locale": "A String", # The language code for the locale in which the entity textual
-                    # `description` is expressed.
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                    # for `LABEL_DETECTION` features.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "x": 3.14, # X coordinate.
-                      "y": 3.14, # Y coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
                 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
                     # image. For example, the relevancy of "tower" is likely higher to an image
                     # containing the detected "Eiffel Tower" than to an image containing a
                     # detected distant towering building, even though the confidence that
                     # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "value": "A String", # Value of the property.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                  },
-                ],
-                "score": 3.14, # Overall score of the result. Range [0, 1].
+                "locale": "A String", # The language code for the locale in which the entity textual
+                    # `description` is expressed.
                 "locations": [ # The location information for the detected entity. Multiple
                     # `LocationInfo` elements can be present because one location may
                     # indicate the location of the scene in the image, and another location
@@ -763,13 +459,120 @@
                 "mid": "A String", # Opaque entity ID. Some IDs may be available in
                     # [Google Knowledge Graph Search
                     # API](https://developers.google.com/knowledge-graph/).
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
                 "confidence": 3.14, # **Deprecated. Use `score` instead.**
                     # The accuracy of the entity detection in an image.
                     # For example, for an image in which the "Eiffel Tower" entity is detected,
                     # this field represents the confidence that there is a tower in the query
                     # image. Range [0, 1].
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                    # for `LABEL_DETECTION` features.
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "x": 42, # X coordinate.
+                      "y": 42, # Y coordinate.
+                    },
+                  ],
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "x": 3.14, # X coordinate.
+                      "y": 3.14, # Y coordinate.
+                    },
+                  ],
+                },
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "value": "A String", # Value of the property.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
+                "score": 3.14, # Overall score of the result. Range [0, 1].
               },
             ],
+            "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
+                # methods over safe-search verticals (for example, adult, spoof, medical,
+                # violence).
+              "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
+                  # include (but is not limited to) skimpy or sheer clothing, strategically
+                  # covered nudity, lewd or provocative poses, or close-ups of sensitive
+                  # body areas.
+              "medical": "A String", # Likelihood that this is a medical image.
+              "adult": "A String", # Represents the adult content likelihood for the image. Adult content may
+                  # contain elements such as nudity, pornographic images or cartoons, or
+                  # sexual activities.
+              "violence": "A String", # Likelihood that this image contains violent content.
+              "spoof": "A String", # Spoof likelihood. The likelihood that an modification
+                  # was made to the image's canonical version to make it appear
+                  # funny or offensive.
+            },
+            "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
+              "fullMatchingImages": [ # Fully matching images from the Internet.
+                  # Can include resized copies of the query image.
+                { # Metadata for online images.
+                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                  "url": "A String", # The result image URL.
+                },
+              ],
+              "bestGuessLabels": [ # The service's best guess as to the topic of the request image.
+                  # Inferred from similar images on the open web.
+                { # Label to provide extra metadata for the web detection.
+                  "languageCode": "A String", # The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
+                      # For more information, see
+                      # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                  "label": "A String", # Label for extra metadata.
+                },
+              ],
+              "visuallySimilarImages": [ # The visually similar image results.
+                { # Metadata for online images.
+                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                  "url": "A String", # The result image URL.
+                },
+              ],
+              "partialMatchingImages": [ # Partial matching images from the Internet.
+                  # Those images are similar enough to share some key-point features. For
+                  # example an original image will likely have partial matching for its crops.
+                { # Metadata for online images.
+                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                  "url": "A String", # The result image URL.
+                },
+              ],
+              "webEntities": [ # Deduced entities from similar images on the Internet.
+                { # Entity deduced from similar images on the Internet.
+                  "entityId": "A String", # Opaque entity ID.
+                  "score": 3.14, # Overall relevancy score for the entity.
+                      # Not normalized and not comparable across different image queries.
+                  "description": "A String", # Canonical description of the entity, in English.
+                },
+              ],
+              "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
+                { # Metadata for web pages.
+                  "partialMatchingImages": [ # Partial matching images on the page.
+                      # Those images are similar enough to share some key-point features. For
+                      # example an original image will likely have partial matching for its
+                      # crops.
+                    { # Metadata for online images.
+                      "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                      "url": "A String", # The result image URL.
+                    },
+                  ],
+                  "url": "A String", # The result web page URL.
+                  "fullMatchingImages": [ # Fully matching images on the page.
+                      # Can include resized copies of the query image.
+                    { # Metadata for online images.
+                      "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                      "url": "A String", # The result image URL.
+                    },
+                  ],
+                  "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
+                  "pageTitle": "A String", # Title for the web page, may contain HTML markups.
+                },
+              ],
+            },
             "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
               "dominantColors": { # Set of dominant colors and their corresponding scores. # If present, dominant colors completed successfully.
                 "colors": [ # RGB color values with their score and pixel fraction.
@@ -790,6 +593,10 @@
                         # DCI-P3, BT.2020, etc.). By default, applications SHOULD assume the sRGB color
                         # space.
                         #
+                        # Note: when color equality needs to be decided, implementations, unless
+                        # documented otherwise, will treat two colors to be equal if all their red,
+                        # green, blue and alpha values each differ by at most 1e-5.
+                        #
                         # Example (Java):
                         #
                         #      import com.google.type.Color;
@@ -896,8 +703,7 @@
                         #     };
                         #
                         #     // ...
-                      "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
-                      "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
+                      "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
                       "alpha": 3.14, # The fraction of this color that should be applied to the pixel. That is,
                           # the final pixel color is defined by the equation:
                           #
@@ -909,19 +715,315 @@
                           # possible to distinguish between a default value and the value being unset.
                           # If omitted, this color object is to be rendered as a solid color
                           # (as if the alpha value had been explicitly given with a value of 1.0).
-                      "red": 3.14, # The amount of red in the color as a value in the interval [0, 1].
+                      "blue": 3.14, # The amount of blue in the color as a value in the interval [0, 1].
+                      "green": 3.14, # The amount of green in the color as a value in the interval [0, 1].
                     },
                     "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
                   },
                 ],
               },
             },
-            "logoAnnotations": [ # If present, logo detection has completed successfully.
-              { # Set of detected entity features.
-                "locale": "A String", # The language code for the locale in which the entity textual
-                    # `description` is expressed.
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                    # for `LABEL_DETECTION` features.
+            "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
+              "cropHints": [ # Crop hint results.
+                { # Single crop hint that is used to generate a new crop when serving an image.
+                  "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
+                      # box are in the original image's scale.
+                    "vertices": [ # The bounding polygon vertices.
+                      { # A vertex represents a 2D point in the image.
+                          # NOTE: the vertex coordinates are in the same scale as the original image.
+                        "x": 42, # X coordinate.
+                        "y": 42, # Y coordinate.
+                      },
+                    ],
+                    "normalizedVertices": [ # The bounding polygon normalized vertices.
+                      { # A vertex represents a 2D point in the image.
+                          # NOTE: the normalized vertex coordinates are relative to the original image
+                          # and range from 0 to 1.
+                        "x": 3.14, # X coordinate.
+                        "y": 3.14, # Y coordinate.
+                      },
+                    ],
+                  },
+                  "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
+                      # image.
+                  "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
+                },
+              ],
+            },
+            "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
+                # completed successfully.
+                # This annotation provides the structural hierarchy for the OCR detected
+                # text.
+                # The hierarchy of an OCR extracted text structure is like this:
+                #     TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+                # Each structural component, starting from Page, may further have their own
+                # properties. Properties describe detected languages, breaks etc.. Please refer
+                # to the TextAnnotation.TextProperty message definition below for more
+                # detail.
+              "text": "A String", # UTF-8 text detected on the pages.
+              "pages": [ # List of pages detected by OCR.
+                { # Detected page from OCR.
+                  "blocks": [ # List of blocks of text, images etc on this page.
+                    { # Logical element on the page.
+                      "blockType": "A String", # Detected block type (text, image etc) for this block.
+                      "paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
+                        { # Structural unit of text representing a number of words in certain order.
+                          "confidence": 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
+                          "property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
+                            "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                              "type": "A String", # Detected break type.
+                              "isPrefix": True or False, # True if break prepends the element.
+                            },
+                            "detectedLanguages": [ # A list of detected languages together with confidence.
+                              { # Detected language for a structural component.
+                                "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                                    # information, see
+                                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                              },
+                            ],
+                          },
+                          "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
+                              # The vertices are in the order of top-left, top-right, bottom-right,
+                              # bottom-left. When a rotation of the bounding box is detected the rotation
+                              # is represented as around the top-left corner as defined when the text is
+                              # read in the 'natural' orientation.
+                              # For example:
+                              #   * when the text is horizontal it might look like:
+                              #      0----1
+                              #      |    |
+                              #      3----2
+                              #   * when it's rotated 180 degrees around the top-left corner it becomes:
+                              #      2----3
+                              #      |    |
+                              #      1----0
+                              #   and the vertex order will still be (0, 1, 2, 3).
+                            "vertices": [ # The bounding polygon vertices.
+                              { # A vertex represents a 2D point in the image.
+                                  # NOTE: the vertex coordinates are in the same scale as the original image.
+                                "x": 42, # X coordinate.
+                                "y": 42, # Y coordinate.
+                              },
+                            ],
+                            "normalizedVertices": [ # The bounding polygon normalized vertices.
+                              { # A vertex represents a 2D point in the image.
+                                  # NOTE: the normalized vertex coordinates are relative to the original image
+                                  # and range from 0 to 1.
+                                "x": 3.14, # X coordinate.
+                                "y": 3.14, # Y coordinate.
+                              },
+                            ],
+                          },
+                          "words": [ # List of all words in this paragraph.
+                            { # A word representation.
+                              "confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
+                              "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
+                                  # The vertices are in the order of top-left, top-right, bottom-right,
+                                  # bottom-left. When a rotation of the bounding box is detected the rotation
+                                  # is represented as around the top-left corner as defined when the text is
+                                  # read in the 'natural' orientation.
+                                  # For example:
+                                  #   * when the text is horizontal it might look like:
+                                  #      0----1
+                                  #      |    |
+                                  #      3----2
+                                  #   * when it's rotated 180 degrees around the top-left corner it becomes:
+                                  #      2----3
+                                  #      |    |
+                                  #      1----0
+                                  #   and the vertex order will still be (0, 1, 2, 3).
+                                "vertices": [ # The bounding polygon vertices.
+                                  { # A vertex represents a 2D point in the image.
+                                      # NOTE: the vertex coordinates are in the same scale as the original image.
+                                    "x": 42, # X coordinate.
+                                    "y": 42, # Y coordinate.
+                                  },
+                                ],
+                                "normalizedVertices": [ # The bounding polygon normalized vertices.
+                                  { # A vertex represents a 2D point in the image.
+                                      # NOTE: the normalized vertex coordinates are relative to the original image
+                                      # and range from 0 to 1.
+                                    "x": 3.14, # X coordinate.
+                                    "y": 3.14, # Y coordinate.
+                                  },
+                                ],
+                              },
+                              "property": { # Additional information detected on the structural component. # Additional information detected for the word.
+                                "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                                  "type": "A String", # Detected break type.
+                                  "isPrefix": True or False, # True if break prepends the element.
+                                },
+                                "detectedLanguages": [ # A list of detected languages together with confidence.
+                                  { # Detected language for a structural component.
+                                    "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                                    "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                                        # information, see
+                                        # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                                  },
+                                ],
+                              },
+                              "symbols": [ # List of symbols in the word.
+                                  # The order of the symbols follows the natural reading order.
+                                { # A single symbol representation.
+                                  "confidence": 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
+                                  "property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
+                                    "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                                      "type": "A String", # Detected break type.
+                                      "isPrefix": True or False, # True if break prepends the element.
+                                    },
+                                    "detectedLanguages": [ # A list of detected languages together with confidence.
+                                      { # Detected language for a structural component.
+                                        "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                                        "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                                            # information, see
+                                            # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                                      },
+                                    ],
+                                  },
+                                  "text": "A String", # The actual UTF-8 representation of the symbol.
+                                  "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the symbol.
+                                      # The vertices are in the order of top-left, top-right, bottom-right,
+                                      # bottom-left. When a rotation of the bounding box is detected the rotation
+                                      # is represented as around the top-left corner as defined when the text is
+                                      # read in the 'natural' orientation.
+                                      # For example:
+                                      #   * when the text is horizontal it might look like:
+                                      #      0----1
+                                      #      |    |
+                                      #      3----2
+                                      #   * when it's rotated 180 degrees around the top-left corner it becomes:
+                                      #      2----3
+                                      #      |    |
+                                      #      1----0
+                                      #   and the vertex order will still be (0, 1, 2, 3).
+                                    "vertices": [ # The bounding polygon vertices.
+                                      { # A vertex represents a 2D point in the image.
+                                          # NOTE: the vertex coordinates are in the same scale as the original image.
+                                        "x": 42, # X coordinate.
+                                        "y": 42, # Y coordinate.
+                                      },
+                                    ],
+                                    "normalizedVertices": [ # The bounding polygon normalized vertices.
+                                      { # A vertex represents a 2D point in the image.
+                                          # NOTE: the normalized vertex coordinates are relative to the original image
+                                          # and range from 0 to 1.
+                                        "x": 3.14, # X coordinate.
+                                        "y": 3.14, # Y coordinate.
+                                      },
+                                    ],
+                                  },
+                                },
+                              ],
+                            },
+                          ],
+                        },
+                      ],
+                      "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the block.
+                          # The vertices are in the order of top-left, top-right, bottom-right,
+                          # bottom-left. When a rotation of the bounding box is detected the rotation
+                          # is represented as around the top-left corner as defined when the text is
+                          # read in the 'natural' orientation.
+                          # For example:
+                          #
+                          # * when the text is horizontal it might look like:
+                          #
+                          #         0----1
+                          #         |    |
+                          #         3----2
+                          #
+                          # * when it's rotated 180 degrees around the top-left corner it becomes:
+                          #
+                          #         2----3
+                          #         |    |
+                          #         1----0
+                          #
+                          #   and the vertex order will still be (0, 1, 2, 3).
+                        "vertices": [ # The bounding polygon vertices.
+                          { # A vertex represents a 2D point in the image.
+                              # NOTE: the vertex coordinates are in the same scale as the original image.
+                            "x": 42, # X coordinate.
+                            "y": 42, # Y coordinate.
+                          },
+                        ],
+                        "normalizedVertices": [ # The bounding polygon normalized vertices.
+                          { # A vertex represents a 2D point in the image.
+                              # NOTE: the normalized vertex coordinates are relative to the original image
+                              # and range from 0 to 1.
+                            "x": 3.14, # X coordinate.
+                            "y": 3.14, # Y coordinate.
+                          },
+                        ],
+                      },
+                      "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
+                      "property": { # Additional information detected on the structural component. # Additional information detected for the block.
+                        "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                          "type": "A String", # Detected break type.
+                          "isPrefix": True or False, # True if break prepends the element.
+                        },
+                        "detectedLanguages": [ # A list of detected languages together with confidence.
+                          { # Detected language for a structural component.
+                            "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                            "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                                # information, see
+                                # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                          },
+                        ],
+                      },
+                    },
+                  ],
+                  "property": { # Additional information detected on the structural component. # Additional information detected on the page.
+                    "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
+                      "type": "A String", # Detected break type.
+                      "isPrefix": True or False, # True if break prepends the element.
+                    },
+                    "detectedLanguages": [ # A list of detected languages together with confidence.
+                      { # Detected language for a structural component.
+                        "confidence": 3.14, # Confidence of detected language. Range [0, 1].
+                        "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                            # information, see
+                            # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                      },
+                    ],
+                  },
+                  "width": 42, # Page width. For PDFs the unit is points. For images (including
+                      # TIFFs) the unit is pixels.
+                  "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
+                  "height": 42, # Page height. For PDFs the unit is points. For images (including
+                      # TIFFs) the unit is pixels.
+                },
+              ],
+            },
+            "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
+                # Note that filled-in image annotations are guaranteed to be
+                # correct, even when `error` is set.
+                # different programming environments, including REST APIs and RPC APIs. It is
+                # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+                # three pieces of data: error code, error message, and error details.
+                #
+                # You can find out more about this error model and how to work with it in the
+                # [API Design Guide](https://cloud.google.com/apis/design/errors).
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "details": [ # A list of messages that carry the error details.  There is a common set of
+                  # message types for APIs to use.
+                {
+                  "a_key": "", # Properties of the object. Contains field @type with type URL.
+                },
+              ],
+              "message": "A String", # A developer-facing error message, which should be in English. Any
+                  # user-facing error message should be localized and sent in the
+                  # google.rpc.Status.details field, or localized by the client.
+            },
+            "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
+                # This will be sorted descending by confidence score.
+              { # Set of detected objects with bounding boxes.
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "x": 42, # X coordinate.
+                      "y": 42, # Y coordinate.
+                    },
+                  ],
                   "normalizedVertices": [ # The bounding polygon normalized vertices.
                     { # A vertex represents a 2D point in the image.
                         # NOTE: the normalized vertex coordinates are relative to the original image
@@ -930,29 +1032,24 @@
                       "y": 3.14, # Y coordinate.
                     },
                   ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
                 },
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
+                "name": "A String", # Object name, expressed in its `language_code` language.
+                "mid": "A String", # Object ID that should align with EntityAnnotation mid.
+                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                    # information, see
+                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                "score": 3.14, # Score of the result. Range [0, 1].
+              },
+            ],
+            "labelAnnotations": [ # If present, label detection has completed successfully.
+              { # Set of detected entity features.
                 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
                     # image. For example, the relevancy of "tower" is likely higher to an image
                     # containing the detected "Eiffel Tower" than to an image containing a
                     # detected distant towering building, even though the confidence that
                     # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "value": "A String", # Value of the property.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                  },
-                ],
-                "score": 3.14, # Overall score of the result. Range [0, 1].
+                "locale": "A String", # The language code for the locale in which the entity textual
+                    # `description` is expressed.
                 "locations": [ # The location information for the detected entity. Multiple
                     # `LocationInfo` elements can be present because one location may
                     # indicate the location of the scene in the image, and another location
@@ -972,11 +1069,102 @@
                 "mid": "A String", # Opaque entity ID. Some IDs may be available in
                     # [Google Knowledge Graph Search
                     # API](https://developers.google.com/knowledge-graph/).
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
                 "confidence": 3.14, # **Deprecated. Use `score` instead.**
                     # The accuracy of the entity detection in an image.
                     # For example, for an image in which the "Eiffel Tower" entity is detected,
                     # this field represents the confidence that there is a tower in the query
                     # image. Range [0, 1].
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                    # for `LABEL_DETECTION` features.
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "x": 42, # X coordinate.
+                      "y": 42, # Y coordinate.
+                    },
+                  ],
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "x": 3.14, # X coordinate.
+                      "y": 3.14, # Y coordinate.
+                    },
+                  ],
+                },
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "value": "A String", # Value of the property.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
+                "score": 3.14, # Overall score of the result. Range [0, 1].
+              },
+            ],
+            "logoAnnotations": [ # If present, logo detection has completed successfully.
+              { # Set of detected entity features.
+                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+                    # image. For example, the relevancy of "tower" is likely higher to an image
+                    # containing the detected "Eiffel Tower" than to an image containing a
+                    # detected distant towering building, even though the confidence that
+                    # there is a tower in each image may be the same. Range [0, 1].
+                "locale": "A String", # The language code for the locale in which the entity textual
+                    # `description` is expressed.
+                "locations": [ # The location information for the detected entity. Multiple
+                    # `LocationInfo` elements can be present because one location may
+                    # indicate the location of the scene in the image, and another location
+                    # may indicate the location of the place where the image was taken.
+                    # Location information is usually present for landmarks.
+                  { # Detected entity location information.
+                    "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
+                        # of doubles representing degrees latitude and degrees longitude. Unless
+                        # specified otherwise, this must conform to the
+                        # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                        # standard</a>. Values must be within normalized ranges.
+                      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+                      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+                    },
+                  },
+                ],
+                "mid": "A String", # Opaque entity ID. Some IDs may be available in
+                    # [Google Knowledge Graph Search
+                    # API](https://developers.google.com/knowledge-graph/).
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
+                "confidence": 3.14, # **Deprecated. Use `score` instead.**
+                    # The accuracy of the entity detection in an image.
+                    # For example, for an image in which the "Eiffel Tower" entity is detected,
+                    # this field represents the confidence that there is a tower in the query
+                    # image. Range [0, 1].
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                    # for `LABEL_DETECTION` features.
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "x": 42, # X coordinate.
+                      "y": 42, # Y coordinate.
+                    },
+                  ],
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "x": 3.14, # X coordinate.
+                      "y": 3.14, # Y coordinate.
+                    },
+                  ],
+                },
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "value": "A String", # Value of the property.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
+                "score": 3.14, # Overall score of the result. Range [0, 1].
               },
             ],
             "context": { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
@@ -986,91 +1174,50 @@
               "pageNumber": 42, # If the file was a PDF or TIFF, this field gives the page number within
                   # the file used to produce the image.
             },
-            "webDetection": { # Relevant information for the image from the Internet. # If present, web detection has completed successfully.
-              "visuallySimilarImages": [ # The visually similar image results.
-                { # Metadata for online images.
-                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                  "url": "A String", # The result image URL.
-                },
-              ],
-              "bestGuessLabels": [ # The service's best guess as to the topic of the request image.
-                  # Inferred from similar images on the open web.
-                { # Label to provide extra metadata for the web detection.
-                  "label": "A String", # Label for extra metadata.
-                  "languageCode": "A String", # The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
-                      # For more information, see
-                      # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                },
-              ],
-              "fullMatchingImages": [ # Fully matching images from the Internet.
-                  # Can include resized copies of the query image.
-                { # Metadata for online images.
-                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                  "url": "A String", # The result image URL.
-                },
-              ],
-              "webEntities": [ # Deduced entities from similar images on the Internet.
-                { # Entity deduced from similar images on the Internet.
-                  "entityId": "A String", # Opaque entity ID.
-                  "description": "A String", # Canonical description of the entity, in English.
-                  "score": 3.14, # Overall relevancy score for the entity.
-                      # Not normalized and not comparable across different image queries.
-                },
-              ],
-              "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
-                { # Metadata for web pages.
-                  "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
-                  "partialMatchingImages": [ # Partial matching images on the page.
-                      # Those images are similar enough to share some key-point features. For
-                      # example an original image will likely have partial matching for its
-                      # crops.
-                    { # Metadata for online images.
-                      "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                      "url": "A String", # The result image URL.
+            "faceAnnotations": [ # If present, face detection has completed successfully.
+              { # A face annotation object contains the results of face detection.
+                "surpriseLikelihood": "A String", # Surprise likelihood.
+                "headwearLikelihood": "A String", # Headwear likelihood.
+                "angerLikelihood": "A String", # Anger likelihood.
+                "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
+                "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
+                "blurredLikelihood": "A String", # Blurred likelihood.
+                "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
+                    # pointing relative to the image's horizontal plane. Range [-180,180].
+                "sorrowLikelihood": "A String", # Sorrow likelihood.
+                "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
+                    # pointing relative to the vertical plane perpendicular to the image. Range
+                    # [-180,180].
+                "landmarks": [ # Detected face landmarks.
+                  { # A face-specific landmark (for example, a face feature).
+                    "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
+                        # A valid Position must have both x and y coordinates.
+                        # The position coordinates are in the same scale as the original image.
+                      "z": 3.14, # Z coordinate (or depth).
+                      "y": 3.14, # Y coordinate.
+                      "x": 3.14, # X coordinate.
+                    },
+                    "type": "A String", # Face landmark type.
+                  },
+                ],
+                "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+                    # of the face relative to the image vertical about the axis perpendicular to
+                    # the face. Range [-180,180].
+                "underExposedLikelihood": "A String", # Under-exposed likelihood.
+                "joyLikelihood": "A String", # Joy likelihood.
+                "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
+                    # `boundingPoly`, and encloses only the skin part of the face. Typically, it
+                    # is used to eliminate the face from any image analysis that detects the
+                    # "amount of skin" visible in an image. It is not based on the
+                    # landmarker results, only on the initial face detection, hence
+                    # the <code>fd</code> (face detection) prefix.
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "x": 42, # X coordinate.
+                      "y": 42, # Y coordinate.
                     },
                   ],
-                  "url": "A String", # The result web page URL.
-                  "pageTitle": "A String", # Title for the web page, may contain HTML markups.
-                  "fullMatchingImages": [ # Fully matching images on the page.
-                      # Can include resized copies of the query image.
-                    { # Metadata for online images.
-                      "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                      "url": "A String", # The result image URL.
-                    },
-                  ],
-                },
-              ],
-              "partialMatchingImages": [ # Partial matching images from the Internet.
-                  # Those images are similar enough to share some key-point features. For
-                  # example an original image will likely have partial matching for its crops.
-                { # Metadata for online images.
-                  "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                  "url": "A String", # The result image URL.
-                },
-              ],
-            },
-            "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
-                # methods over safe-search verticals (for example, adult, spoof, medical,
-                # violence).
-              "adult": "A String", # Represents the adult content likelihood for the image. Adult content may
-                  # contain elements such as nudity, pornographic images or cartoons, or
-                  # sexual activities.
-              "spoof": "A String", # Spoof likelihood. The likelihood that an modification
-                  # was made to the image's canonical version to make it appear
-                  # funny or offensive.
-              "medical": "A String", # Likelihood that this is a medical image.
-              "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
-                  # include (but is not limited to) skimpy or sheer clothing, strategically
-                  # covered nudity, lewd or provocative poses, or close-ups of sensitive
-                  # body areas.
-              "violence": "A String", # Likelihood that this image contains violent content.
-            },
-            "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
-              { # Set of detected entity features.
-                "locale": "A String", # The language code for the locale in which the entity textual
-                    # `description` is expressed.
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                    # for `LABEL_DETECTION` features.
                   "normalizedVertices": [ # The bounding polygon normalized vertices.
                     { # A vertex represents a 2D point in the image.
                         # NOTE: the normalized vertex coordinates are relative to the original image
@@ -1079,64 +1226,7 @@
                       "y": 3.14, # Y coordinate.
                     },
                   ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
                 },
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
-                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
-                    # image. For example, the relevancy of "tower" is likely higher to an image
-                    # containing the detected "Eiffel Tower" than to an image containing a
-                    # detected distant towering building, even though the confidence that
-                    # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "value": "A String", # Value of the property.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                  },
-                ],
-                "score": 3.14, # Overall score of the result. Range [0, 1].
-                "locations": [ # The location information for the detected entity. Multiple
-                    # `LocationInfo` elements can be present because one location may
-                    # indicate the location of the scene in the image, and another location
-                    # may indicate the location of the place where the image was taken.
-                    # Location information is usually present for landmarks.
-                  { # Detected entity location information.
-                    "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
-                        # of doubles representing degrees latitude and degrees longitude. Unless
-                        # specified otherwise, this must conform to the
-                        # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                        # standard</a>. Values must be within normalized ranges.
-                      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-                      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-                    },
-                  },
-                ],
-                "mid": "A String", # Opaque entity ID. Some IDs may be available in
-                    # [Google Knowledge Graph Search
-                    # API](https://developers.google.com/knowledge-graph/).
-                "confidence": 3.14, # **Deprecated. Use `score` instead.**
-                    # The accuracy of the entity detection in an image.
-                    # For example, for an image in which the "Eiffel Tower" entity is detected,
-                    # this field represents the confidence that there is a tower in the query
-                    # image. Range [0, 1].
-              },
-            ],
-            "faceAnnotations": [ # If present, face detection has completed successfully.
-              { # A face annotation object contains the results of face detection.
-                "underExposedLikelihood": "A String", # Under-exposed likelihood.
-                "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
-                    # pointing relative to the vertical plane perpendicular to the image. Range
-                    # [-180,180].
-                "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
-                "blurredLikelihood": "A String", # Blurred likelihood.
-                "headwearLikelihood": "A String", # Headwear likelihood.
                 "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
                     # are in the original image's scale.
                     # The bounding box is computed to "frame" the face in accordance with human
@@ -1144,6 +1234,13 @@
                     # Note that one or more x and/or y coordinates may not be generated in the
                     # `BoundingPoly` (the polygon will be unbounded) if only a partial face
                     # appears in the image to be annotated.
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "x": 42, # X coordinate.
+                      "y": 42, # Y coordinate.
+                    },
+                  ],
                   "normalizedVertices": [ # The bounding polygon normalized vertices.
                     { # A vertex represents a 2D point in the image.
                         # NOTE: the normalized vertex coordinates are relative to the original image
@@ -1152,124 +1249,18 @@
                       "y": 3.14, # Y coordinate.
                     },
                   ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
                 },
-                "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
-                    # of the face relative to the image vertical about the axis perpendicular to
-                    # the face. Range [-180,180].
-                "sorrowLikelihood": "A String", # Sorrow likelihood.
-                "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
-                    # pointing relative to the image's horizontal plane. Range [-180,180].
-                "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
-                    # `boundingPoly`, and encloses only the skin part of the face. Typically, it
-                    # is used to eliminate the face from any image analysis that detects the
-                    # "amount of skin" visible in an image. It is not based on the
-                    # landmarker results, only on the initial face detection, hence
-                    # the <code>fd</code> (face detection) prefix.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "x": 3.14, # X coordinate.
-                      "y": 3.14, # Y coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-                "angerLikelihood": "A String", # Anger likelihood.
-                "landmarks": [ # Detected face landmarks.
-                  { # A face-specific landmark (for example, a face feature).
-                    "type": "A String", # Face landmark type.
-                    "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
-                        # A valid Position must have both x and y coordinates.
-                        # The position coordinates are in the same scale as the original image.
-                      "x": 3.14, # X coordinate.
-                      "z": 3.14, # Z coordinate (or depth).
-                      "y": 3.14, # Y coordinate.
-                    },
-                  },
-                ],
-                "surpriseLikelihood": "A String", # Surprise likelihood.
-                "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
-                "joyLikelihood": "A String", # Joy likelihood.
               },
             ],
-            "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
-              "cropHints": [ # Crop hint results.
-                { # Single crop hint that is used to generate a new crop when serving an image.
-                  "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
-                  "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
-                      # image.
-                  "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
-                      # box are in the original image's scale.
-                    "normalizedVertices": [ # The bounding polygon normalized vertices.
-                      { # A vertex represents a 2D point in the image.
-                          # NOTE: the normalized vertex coordinates are relative to the original image
-                          # and range from 0 to 1.
-                        "x": 3.14, # X coordinate.
-                        "y": 3.14, # Y coordinate.
-                      },
-                    ],
-                    "vertices": [ # The bounding polygon vertices.
-                      { # A vertex represents a 2D point in the image.
-                          # NOTE: the vertex coordinates are in the same scale as the original image.
-                        "y": 42, # Y coordinate.
-                        "x": 42, # X coordinate.
-                      },
-                    ],
-                  },
-                },
-              ],
-            },
-            "labelAnnotations": [ # If present, label detection has completed successfully.
+            "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
               { # Set of detected entity features.
-                "locale": "A String", # The language code for the locale in which the entity textual
-                    # `description` is expressed.
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                    # for `LABEL_DETECTION` features.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "x": 3.14, # X coordinate.
-                      "y": 3.14, # Y coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
                 "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
                     # image. For example, the relevancy of "tower" is likely higher to an image
                     # containing the detected "Eiffel Tower" than to an image containing a
                     # detected distant towering building, even though the confidence that
                     # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "value": "A String", # Value of the property.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                  },
-                ],
-                "score": 3.14, # Overall score of the result. Range [0, 1].
+                "locale": "A String", # The language code for the locale in which the entity textual
+                    # `description` is expressed.
                 "locations": [ # The location information for the detected entity. Multiple
                     # `LocationInfo` elements can be present because one location may
                     # indicate the location of the scene in the image, and another location
@@ -1289,30 +1280,43 @@
                 "mid": "A String", # Opaque entity ID. Some IDs may be available in
                     # [Google Knowledge Graph Search
                     # API](https://developers.google.com/knowledge-graph/).
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
                 "confidence": 3.14, # **Deprecated. Use `score` instead.**
                     # The accuracy of the entity detection in an image.
                     # For example, for an image in which the "Eiffel Tower" entity is detected,
                     # this field represents the confidence that there is a tower in the query
                     # image. Range [0, 1].
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                    # for `LABEL_DETECTION` features.
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "x": 42, # X coordinate.
+                      "y": 42, # Y coordinate.
+                    },
+                  ],
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "x": 3.14, # X coordinate.
+                      "y": 3.14, # Y coordinate.
+                    },
+                  ],
+                },
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "value": "A String", # Value of the property.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
+                "score": 3.14, # Overall score of the result. Range [0, 1].
               },
             ],
           },
         ],
-        "inputConfig": { # The desired input location and metadata. # Information about the file for which this response is generated.
-          "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
-            "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
-                # Google Cloud Storage object. Wildcards are not currently supported.
-          },
-          "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
-              # "image/gif" are supported. Wildcards are not supported.
-          "content": "A String", # File content, represented as a stream of bytes.
-              # Note: As with all `bytes` fields, protobuffers use a pure binary
-              # representation, whereas JSON representations use base64.
-              #
-              # Currently, this field only works for BatchAnnotateFiles requests. It does
-              # not work for AsyncBatchAnnotateFiles requests.
-        },
-        "totalPages": 42, # This field gives the total number of pages in the file.
       },
     ],
   }</pre>
@@ -1347,32 +1351,6 @@
       # call.
     &quot;requests&quot;: [ # Required. Individual async file annotation requests for this batch.
       { # An offline file annotation request.
-        &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
-          &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
-            &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
-                # Google Cloud Storage object. Wildcards are not currently supported.
-          },
-          &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
-              # &quot;image/gif&quot; are supported. Wildcards are not supported.
-          &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
-              # Note: As with all `bytes` fields, protobuffers use a pure binary
-              # representation, whereas JSON representations use base64.
-              #
-              # Currently, this field only works for BatchAnnotateFiles requests. It does
-              # not work for AsyncBatchAnnotateFiles requests.
-        },
-        &quot;features&quot;: [ # Required. Requested features.
-          { # The type of Google Cloud Vision API detection to perform, and the maximum
-              # number of results to return for that type. Multiple `Feature` objects can
-              # be specified in the `features` list.
-            &quot;type&quot;: &quot;A String&quot;, # The feature type.
-            &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
-                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
-            &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
-                # Supported values: &quot;builtin/stable&quot; (the default if unset) and
-                # &quot;builtin/latest&quot;.
-          },
-        ],
         &quot;imageContext&quot;: { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
           &quot;cropHintsParams&quot;: { # Parameters for crop hints annotation request. # Parameters for crop hints annotation request.
             &quot;aspectRatios&quot;: [ # Aspect ratios in floats, representing the ratio of the width to the height
@@ -1394,20 +1372,15 @@
                 # migrate existing products to these categories as well.
               &quot;A String&quot;,
             ],
-            &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
-                # on Product labels. We currently support an AND of OR of key-value
-                # expressions, where each expression within an OR must have the same key. An
-                # &#x27;=&#x27; should be used to connect the key and value.
-                #
-                # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
-                # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
-                # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
-            &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
-                #
-                # Format is:
-                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
             &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
                 # If it is not specified, system discretion will be applied.
+              &quot;vertices&quot;: [ # The bounding polygon vertices.
+                { # A vertex represents a 2D point in the image.
+                    # NOTE: the vertex coordinates are in the same scale as the original image.
+                  &quot;x&quot;: 42, # X coordinate.
+                  &quot;y&quot;: 42, # Y coordinate.
+                },
+              ],
               &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
                 { # A vertex represents a 2D point in the image.
                     # NOTE: the normalized vertex coordinates are relative to the original image
@@ -1416,14 +1389,19 @@
                   &quot;y&quot;: 3.14, # Y coordinate.
                 },
               ],
-              &quot;vertices&quot;: [ # The bounding polygon vertices.
-                { # A vertex represents a 2D point in the image.
-                    # NOTE: the vertex coordinates are in the same scale as the original image.
-                  &quot;y&quot;: 42, # Y coordinate.
-                  &quot;x&quot;: 42, # X coordinate.
-                },
-              ],
             },
+            &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
+                #
+                # Format is:
+                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
+            &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
+                # on Product labels. We currently support an AND of OR of key-value
+                # expressions, where each expression within an OR must have the same key. An
+                # &#x27;=&#x27; should be used to connect the key and value.
+                #
+                # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
+                # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
+                # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
           },
           &quot;languageHints&quot;: [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
               # yields the best results since it enables automatic language detection. For
@@ -1435,9 +1413,6 @@
               # [supported languages](https://cloud.google.com/vision/docs/languages).
             &quot;A String&quot;,
           ],
-          &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
-            &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
-          },
           &quot;latLongRect&quot;: { # Rectangle determined by min and max `LatLng` pairs. # Not used.
             &quot;maxLatLng&quot;: { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
                 # of doubles representing degrees latitude and degrees longitude. Unless
@@ -1456,8 +1431,48 @@
               &quot;longitude&quot;: 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
             },
           },
+          &quot;webDetectionParams&quot;: { # Parameters for web detection request. # Parameters for web detection.
+            &quot;includeGeoResults&quot;: True or False, # Whether to include results derived from the geo information in the image.
+          },
         },
+        &quot;inputConfig&quot;: { # The desired input location and metadata. # Required. Information about the input file.
+          &quot;content&quot;: &quot;A String&quot;, # File content, represented as a stream of bytes.
+              # Note: As with all `bytes` fields, protobuffers use a pure binary
+              # representation, whereas JSON representations use base64.
+              #
+              # Currently, this field only works for BatchAnnotateFiles requests. It does
+              # not work for AsyncBatchAnnotateFiles requests.
+          &quot;mimeType&quot;: &quot;A String&quot;, # The type of the file. Currently only &quot;application/pdf&quot;, &quot;image/tiff&quot; and
+              # &quot;image/gif&quot; are supported. Wildcards are not supported.
+          &quot;gcsSource&quot;: { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
+            &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI for the input file. This must only be a
+                # Google Cloud Storage object. Wildcards are not currently supported.
+          },
+        },
+        &quot;features&quot;: [ # Required. Requested features.
+          { # The type of Google Cloud Vision API detection to perform, and the maximum
+              # number of results to return for that type. Multiple `Feature` objects can
+              # be specified in the `features` list.
+            &quot;type&quot;: &quot;A String&quot;, # The feature type.
+            &quot;maxResults&quot;: 42, # Maximum number of results of this type. Does not apply to
+                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+            &quot;model&quot;: &quot;A String&quot;, # Model to use for the feature.
+                # Supported values: &quot;builtin/stable&quot; (the default if unset) and
+                # &quot;builtin/latest&quot;.
+          },
+        ],
         &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
+          &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
+              # Google Cloud Storage.
+              # The valid range is [1, 100]. If not specified, the default value is 20.
+              #
+              # For example, for one pdf file with 100 pages, 100 response protos will
+              # be generated. If `batch_size` = 20, then 5 json files each
+              # containing 20 response protos will be written under the prefix
+              # `gcs_destination`.`uri`.
+              #
+              # Currently, batch_size only applies to GcsDestination, with potential future
+              # support for other output configurations.
           &quot;gcsDestination&quot;: { # The Google Cloud Storage location where the output will be written to. # The Google Cloud Storage location to write the output(s) to.
             &quot;uri&quot;: &quot;A String&quot;, # Google Cloud Storage URI prefix where the results will be stored. Results
                 # will be in JSON format and preceded by its corresponding input URI prefix.
@@ -1482,17 +1497,6 @@
                 # Multiple outputs can happen if, for example, the output JSON is too large
                 # and overflows into multiple sharded files.
           },
-          &quot;batchSize&quot;: 42, # The max number of response protos to put into each output JSON file on
-              # Google Cloud Storage.
-              # The valid range is [1, 100]. If not specified, the default value is 20.
-              #
-              # For example, for one pdf file with 100 pages, 100 response protos will
-              # be generated. If `batch_size` = 20, then 5 json files each
-              # containing 20 response protos will be written under the prefix
-              # `gcs_destination`.`uri`.
-              #
-              # Currently, batch_size only applies to GcsDestination, with potential future
-              # support for other output configurations.
         },
       },
     ],
@@ -1520,33 +1524,6 @@
 
     { # This resource represents a long-running operation that is the result of a
       # network API call.
-    &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
-        # different programming environments, including REST APIs and RPC APIs. It is
-        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
-        # three pieces of data: error code, error message, and error details.
-        #
-        # You can find out more about this error model and how to work with it in the
-        # [API Design Guide](https://cloud.google.com/apis/design/errors).
-      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
-      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
-      &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
-          # message types for APIs to use.
-        {
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
-        },
-      ],
-    },
-    &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
-        # contains progress information and common metadata such as create time.
-        # Some services might not provide such metadata.  Any method that returns a
-        # long-running operation should document the metadata type, if any.
-      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
-    },
-    &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
-        # If `true`, the operation is completed, and either `error` or `response` is
-        # available.
     &quot;response&quot;: { # The normal response of the operation in case of success.  If the original
         # method returns no data on success, such as `Delete`, the response is
         # `google.protobuf.Empty`.  If the original method is standard
@@ -1557,9 +1534,36 @@
         # `TakeSnapshotResponse`.
       &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
     },
+    &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
+        # contains progress information and common metadata such as create time.
+        # Some services might not provide such metadata.  Any method that returns a
+        # long-running operation should document the metadata type, if any.
+      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+    },
     &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
         # originally returns it. If you use the default HTTP mapping, the
         # `name` should be a resource name ending with `operations/{unique_id}`.
+    &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
+        # different programming environments, including REST APIs and RPC APIs. It is
+        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+        # three pieces of data: error code, error message, and error details.
+        #
+        # You can find out more about this error model and how to work with it in the
+        # [API Design Guide](https://cloud.google.com/apis/design/errors).
+      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
+      &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
+          # message types for APIs to use.
+        {
+          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+        },
+      ],
+      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+    },
+    &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
+        # If `true`, the operation is completed, and either `error` or `response` is
+        # available.
   }</pre>
 </div>