docs: update docs (#916)

* fix: re-run script

* test: fix noxfile
diff --git a/docs/dyn/vision_v1p1beta1.projects.locations.files.html b/docs/dyn/vision_v1p1beta1.projects.locations.files.html
index f5fd1c0..a083987 100644
--- a/docs/dyn/vision_v1p1beta1.projects.locations.files.html
+++ b/docs/dyn/vision_v1p1beta1.projects.locations.files.html
@@ -108,63 +108,9 @@
     The object takes the form of:
 
 { # A list of requests to annotate files using the BatchAnnotateFiles API.
-    "parent": "A String", # Optional. Target project and location to make a call.
-        # 
-        # Format: `projects/{project-id}/locations/{location-id}`.
-        # 
-        # If no parent is specified, a region will be chosen automatically.
-        # 
-        # Supported location-ids:
-        #     `us`: USA country only,
-        #     `asia`: East asia areas, like Japan, Taiwan,
-        #     `eu`: The European Union.
-        # 
-        # Example: `projects/project-A/locations/eu`.
     "requests": [ # Required. The list of file annotation requests. Right now we support only one
         # AnnotateFileRequest in BatchAnnotateFilesRequest.
       { # A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
-        "pages": [ # Pages of the file to perform image annotation.
-            #
-            # Pages starts from 1, we assume the first page of the file is page 1.
-            # At most 5 pages are supported per request. Pages can be negative.
-            #
-            # Page 1 means the first page.
-            # Page 2 means the second page.
-            # Page -1 means the last page.
-            # Page -2 means the second to the last page.
-            #
-            # If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
-            #
-            # If this field is empty, by default the service performs image annotation
-            # for the first 5 pages of the file.
-          42,
-        ],
-        "inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
-          "content": "A String", # File content, represented as a stream of bytes.
-              # Note: As with all `bytes` fields, protobuffers use a pure binary
-              # representation, whereas JSON representations use base64.
-              #
-              # Currently, this field only works for BatchAnnotateFiles requests. It does
-              # not work for AsyncBatchAnnotateFiles requests.
-          "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
-            "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
-                # Google Cloud Storage object. Wildcards are not currently supported.
-          },
-          "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
-              # "image/gif" are supported. Wildcards are not supported.
-        },
-        "features": [ # Required. Requested features.
-          { # The type of Google Cloud Vision API detection to perform, and the maximum
-              # number of results to return for that type. Multiple `Feature` objects can
-              # be specified in the `features` list.
-            "type": "A String", # The feature type.
-            "maxResults": 42, # Maximum number of results of this type. Does not apply to
-                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
-            "model": "A String", # Model to use for the feature.
-                # Supported values: "builtin/stable" (the default if unset) and
-                # "builtin/latest".
-          },
-        ],
         "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
           "languageHints": [ # List of languages to use for TEXT_DETECTION. In most cases, an empty value
               # yields the best results since it enables automatic language detection. For
@@ -208,18 +154,6 @@
             ],
           },
           "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
-            "filter": "A String", # The filtering expression. This can be used to restrict search results based
-                # on Product labels. We currently support an AND of OR of key-value
-                # expressions, where each expression within an OR must have the same key. An
-                # '=' should be used to connect the key and value.
-                #
-                # For example, "(color = red OR color = blue) AND brand = Google" is
-                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
-                # "color: red" is not acceptable because it uses a ':' instead of an '='.
-            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
-                #
-                # Format is:
-                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
             "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
                 # If it is not specified, system discretion will be applied.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
@@ -247,10 +181,76 @@
                 # migrate existing products to these categories as well.
               "A String",
             ],
+            "filter": "A String", # The filtering expression. This can be used to restrict search results based
+                # on Product labels. We currently support an AND of OR of key-value
+                # expressions, where each expression within an OR must have the same key. An
+                # '=' should be used to connect the key and value.
+                #
+                # For example, "(color = red OR color = blue) AND brand = Google" is
+                # acceptable, but "(color = red OR brand = Google)" is not acceptable.
+                # "color: red" is not acceptable because it uses a ':' instead of an '='.
+            "productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
+                #
+                # Format is:
+                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
           },
         },
+        "pages": [ # Pages of the file to perform image annotation.
+            #
+            # Pages starts from 1, we assume the first page of the file is page 1.
+            # At most 5 pages are supported per request. Pages can be negative.
+            #
+            # Page 1 means the first page.
+            # Page 2 means the second page.
+            # Page -1 means the last page.
+            # Page -2 means the second to the last page.
+            #
+            # If the file is GIF instead of PDF or TIFF, page refers to GIF frames.
+            #
+            # If this field is empty, by default the service performs image annotation
+            # for the first 5 pages of the file.
+          42,
+        ],
+        "inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
+          "content": "A String", # File content, represented as a stream of bytes.
+              # Note: As with all `bytes` fields, protobuffers use a pure binary
+              # representation, whereas JSON representations use base64.
+              #
+              # Currently, this field only works for BatchAnnotateFiles requests. It does
+              # not work for AsyncBatchAnnotateFiles requests.
+          "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
+            "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
+                # Google Cloud Storage object. Wildcards are not currently supported.
+          },
+          "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
+              # "image/gif" are supported. Wildcards are not supported.
+        },
+        "features": [ # Required. Requested features.
+          { # The type of Google Cloud Vision API detection to perform, and the maximum
+              # number of results to return for that type. Multiple `Feature` objects can
+              # be specified in the `features` list.
+            "type": "A String", # The feature type.
+            "maxResults": 42, # Maximum number of results of this type. Does not apply to
+                # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+            "model": "A String", # Model to use for the feature.
+                # Supported values: "builtin/stable" (the default if unset) and
+                # "builtin/latest".
+          },
+        ],
       },
     ],
+    "parent": "A String", # Optional. Target project and location to make a call.
+        # 
+        # Format: `projects/{project-id}/locations/{location-id}`.
+        # 
+        # If no parent is specified, a region will be chosen automatically.
+        # 
+        # Supported location-ids:
+        #     `us`: USA country only,
+        #     `asia`: East asia areas, like Japan, Taiwan,
+        #     `eu`: The European Union.
+        # 
+        # Example: `projects/project-A/locations/eu`.
   }
 
   x__xgafv: string, V1 error format.
@@ -266,71 +266,14 @@
         # AnnotateFileRequest in BatchAnnotateFilesRequest.
       { # Response to a single file annotation request. A file may contain one or more
           # images, which individually have their own responses.
-        "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
-            # `responses` field will not be set in this case.
-            # different programming environments, including REST APIs and RPC APIs. It is
-            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
-            # three pieces of data: error code, error message, and error details.
-            #
-            # You can find out more about this error model and how to work with it in the
-            # [API Design Guide](https://cloud.google.com/apis/design/errors).
-          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
-          "message": "A String", # A developer-facing error message, which should be in English. Any
-              # user-facing error message should be localized and sent in the
-              # google.rpc.Status.details field, or localized by the client.
-          "details": [ # A list of messages that carry the error details.  There is a common set of
-              # message types for APIs to use.
-            {
-              "a_key": "", # Properties of the object. Contains field @type with type URL.
-            },
-          ],
-        },
         "responses": [ # Individual responses to images found within the file. This field will be
             # empty if the `error` field is set.
           { # Response to an image annotation request.
-            "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
-              { # Set of detected entity features.
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
-                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
-                    # image. For example, the relevancy of "tower" is likely higher to an image
-                    # containing the detected "Eiffel Tower" than to an image containing a
-                    # detected distant towering building, even though the confidence that
-                    # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                    "value": "A String", # Value of the property.
-                  },
-                ],
-                "score": 3.14, # Overall score of the result. Range [0, 1].
-                "locations": [ # The location information for the detected entity. Multiple
-                    # `LocationInfo` elements can be present because one location may
-                    # indicate the location of the scene in the image, and another location
-                    # may indicate the location of the place where the image was taken.
-                    # Location information is usually present for landmarks.
-                  { # Detected entity location information.
-                    "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
-                        # of doubles representing degrees latitude and degrees longitude. Unless
-                        # specified otherwise, this must conform to the
-                        # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                        # standard</a>. Values must be within normalized ranges.
-                      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-                      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-                    },
-                  },
-                ],
-                "mid": "A String", # Opaque entity ID. Some IDs may be available in
-                    # [Google Knowledge Graph Search
-                    # API](https://developers.google.com/knowledge-graph/).
-                "confidence": 3.14, # **Deprecated. Use `score` instead.**
-                    # The accuracy of the entity detection in an image.
-                    # For example, for an image in which the "Eiffel Tower" entity is detected,
-                    # this field represents the confidence that there is a tower in the query
-                    # image. Range [0, 1].
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                    # for `LABEL_DETECTION` features.
+            "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
+                # This will be sorted descending by confidence score.
+              { # Set of detected objects with bounding boxes.
+                "name": "A String", # Object name, expressed in its `language_code` language.
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
                   "normalizedVertices": [ # The bounding polygon normalized vertices.
                     { # A vertex represents a 2D point in the image.
                         # NOTE: the normalized vertex coordinates are relative to the original image
@@ -347,176 +290,11 @@
                     },
                   ],
                 },
-                "locale": "A String", # The language code for the locale in which the entity textual
-                    # `description` is expressed.
-              },
-            ],
-            "faceAnnotations": [ # If present, face detection has completed successfully.
-              { # A face annotation object contains the results of face detection.
-                "angerLikelihood": "A String", # Anger likelihood.
-                "landmarks": [ # Detected face landmarks.
-                  { # A face-specific landmark (for example, a face feature).
-                    "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
-                        # A valid Position must have both x and y coordinates.
-                        # The position coordinates are in the same scale as the original image.
-                      "x": 3.14, # X coordinate.
-                      "z": 3.14, # Z coordinate (or depth).
-                      "y": 3.14, # Y coordinate.
-                    },
-                    "type": "A String", # Face landmark type.
-                  },
-                ],
-                "surpriseLikelihood": "A String", # Surprise likelihood.
-                "joyLikelihood": "A String", # Joy likelihood.
-                "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
-                "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
-                "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
-                    # pointing relative to the vertical plane perpendicular to the image. Range
-                    # [-180,180].
-                "underExposedLikelihood": "A String", # Under-exposed likelihood.
-                "blurredLikelihood": "A String", # Blurred likelihood.
-                "headwearLikelihood": "A String", # Headwear likelihood.
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
-                    # are in the original image's scale.
-                    # The bounding box is computed to "frame" the face in accordance with human
-                    # expectations. It is based on the landmarker results.
-                    # Note that one or more x and/or y coordinates may not be generated in the
-                    # `BoundingPoly` (the polygon will be unbounded) if only a partial face
-                    # appears in the image to be annotated.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "y": 3.14, # Y coordinate.
-                      "x": 3.14, # X coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-                "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
-                    # of the face relative to the image vertical about the axis perpendicular to
-                    # the face. Range [-180,180].
-                "sorrowLikelihood": "A String", # Sorrow likelihood.
-                "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
-                    # pointing relative to the image's horizontal plane. Range [-180,180].
-                "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
-                    # `boundingPoly`, and encloses only the skin part of the face. Typically, it
-                    # is used to eliminate the face from any image analysis that detects the
-                    # "amount of skin" visible in an image. It is not based on the
-                    # landmarker results, only on the initial face detection, hence
-                    # the <code>fd</code> (face detection) prefix.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "y": 3.14, # Y coordinate.
-                      "x": 3.14, # X coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-              },
-            ],
-            "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
-              "cropHints": [ # Crop hint results.
-                { # Single crop hint that is used to generate a new crop when serving an image.
-                  "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
-                  "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
-                      # image.
-                  "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
-                      # box are in the original image's scale.
-                    "normalizedVertices": [ # The bounding polygon normalized vertices.
-                      { # A vertex represents a 2D point in the image.
-                          # NOTE: the normalized vertex coordinates are relative to the original image
-                          # and range from 0 to 1.
-                        "y": 3.14, # Y coordinate.
-                        "x": 3.14, # X coordinate.
-                      },
-                    ],
-                    "vertices": [ # The bounding polygon vertices.
-                      { # A vertex represents a 2D point in the image.
-                          # NOTE: the vertex coordinates are in the same scale as the original image.
-                        "y": 42, # Y coordinate.
-                        "x": 42, # X coordinate.
-                      },
-                    ],
-                  },
-                },
-              ],
-            },
-            "labelAnnotations": [ # If present, label detection has completed successfully.
-              { # Set of detected entity features.
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
-                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
-                    # image. For example, the relevancy of "tower" is likely higher to an image
-                    # containing the detected "Eiffel Tower" than to an image containing a
-                    # detected distant towering building, even though the confidence that
-                    # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                    "value": "A String", # Value of the property.
-                  },
-                ],
-                "score": 3.14, # Overall score of the result. Range [0, 1].
-                "locations": [ # The location information for the detected entity. Multiple
-                    # `LocationInfo` elements can be present because one location may
-                    # indicate the location of the scene in the image, and another location
-                    # may indicate the location of the place where the image was taken.
-                    # Location information is usually present for landmarks.
-                  { # Detected entity location information.
-                    "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
-                        # of doubles representing degrees latitude and degrees longitude. Unless
-                        # specified otherwise, this must conform to the
-                        # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                        # standard</a>. Values must be within normalized ranges.
-                      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
-                      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
-                    },
-                  },
-                ],
-                "mid": "A String", # Opaque entity ID. Some IDs may be available in
-                    # [Google Knowledge Graph Search
-                    # API](https://developers.google.com/knowledge-graph/).
-                "confidence": 3.14, # **Deprecated. Use `score` instead.**
-                    # The accuracy of the entity detection in an image.
-                    # For example, for an image in which the "Eiffel Tower" entity is detected,
-                    # this field represents the confidence that there is a tower in the query
-                    # image. Range [0, 1].
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
-                    # for `LABEL_DETECTION` features.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "y": 3.14, # Y coordinate.
-                      "x": 3.14, # X coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-                "locale": "A String", # The language code for the locale in which the entity textual
-                    # `description` is expressed.
+                "score": 3.14, # Score of the result. Range [0, 1].
+                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                    # information, see
+                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                "mid": "A String", # Object ID that should align with EntityAnnotation mid.
               },
             ],
             "productSearchResults": { # Results for a product search request. # If present, product search has completed successfully.
@@ -526,16 +304,6 @@
                   # matches in the union of all the per-product results.
                 { # Information about the products similar to a single product in a query
                     # image.
-                  "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
-                    { # Prediction for what the object in the bounding box is.
-                      "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                          # information, see
-                          # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                      "mid": "A String", # Object ID that should align with EntityAnnotation mid.
-                      "name": "A String", # Object name, expressed in its `language_code` language.
-                      "score": 3.14, # Score of the result. Range [0, 1].
-                    },
-                  ],
                   "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the product detected in the query image.
                     "normalizedVertices": [ # The bounding polygon normalized vertices.
                       { # A vertex represents a 2D point in the image.
@@ -558,6 +326,12 @@
                       "image": "A String", # The resource name of the image from the product that is the closest match
                           # to the query.
                       "product": { # A Product contains ReferenceImages. # The Product.
+                        "name": "A String", # The resource name of the product.
+                            #
+                            # Format is:
+                            # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+                            #
+                            # This field is ignored when creating a product.
                         "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
                             # 4096 characters long.
                         "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
@@ -586,17 +360,21 @@
                                 # cannot exceed 128 bytes.
                           },
                         ],
-                        "name": "A String", # The resource name of the product.
-                            #
-                            # Format is:
-                            # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
-                            #
-                            # This field is ignored when creating a product.
                       },
                       "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
                           # 1 (full confidence).
                     },
                   ],
+                  "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
+                    { # Prediction for what the object in the bounding box is.
+                      "score": 3.14, # Score of the result. Range [0, 1].
+                      "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                          # information, see
+                          # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                      "mid": "A String", # Object ID that should align with EntityAnnotation mid.
+                      "name": "A String", # Object name, expressed in its `language_code` language.
+                    },
+                  ],
                 },
               ],
               "results": [ # List of results, one for each product match.
@@ -604,6 +382,12 @@
                   "image": "A String", # The resource name of the image from the product that is the closest match
                       # to the query.
                   "product": { # A Product contains ReferenceImages. # The Product.
+                    "name": "A String", # The resource name of the product.
+                        #
+                        # Format is:
+                        # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
+                        #
+                        # This field is ignored when creating a product.
                     "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
                         # 4096 characters long.
                     "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
@@ -632,12 +416,6 @@
                             # cannot exceed 128 bytes.
                       },
                     ],
-                    "name": "A String", # The resource name of the product.
-                        #
-                        # Format is:
-                        # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
-                        #
-                        # This field is ignored when creating a product.
                   },
                   "score": 3.14, # A confidence level on the match, ranging from 0 (no confidence) to
                       # 1 (full confidence).
@@ -647,34 +425,6 @@
                   # product set and products removed from the product set after this time are
                   # not reflected in the current results.
             },
-            "localizedObjectAnnotations": [ # If present, localized object detection has completed successfully.
-                # This will be sorted descending by confidence score.
-              { # Set of detected objects with bounding boxes.
-                "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
-                    # information, see
-                    # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
-                "mid": "A String", # Object ID that should align with EntityAnnotation mid.
-                "name": "A String", # Object name, expressed in its `language_code` language.
-                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
-                  "normalizedVertices": [ # The bounding polygon normalized vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the normalized vertex coordinates are relative to the original image
-                        # and range from 0 to 1.
-                      "y": 3.14, # Y coordinate.
-                      "x": 3.14, # X coordinate.
-                    },
-                  ],
-                  "vertices": [ # The bounding polygon vertices.
-                    { # A vertex represents a 2D point in the image.
-                        # NOTE: the vertex coordinates are in the same scale as the original image.
-                      "y": 42, # Y coordinate.
-                      "x": 42, # X coordinate.
-                    },
-                  ],
-                },
-                "score": 3.14, # Score of the result. Range [0, 1].
-              },
-            ],
             "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the operation.
                 # Note that filled-in image annotations are guaranteed to be
                 # correct, even when `error` is set.
@@ -684,16 +434,16 @@
                 #
                 # You can find out more about this error model and how to work with it in the
                 # [API Design Guide](https://cloud.google.com/apis/design/errors).
-              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
-              "message": "A String", # A developer-facing error message, which should be in English. Any
-                  # user-facing error message should be localized and sent in the
-                  # google.rpc.Status.details field, or localized by the client.
               "details": [ # A list of messages that carry the error details.  There is a common set of
                   # message types for APIs to use.
                 {
                   "a_key": "", # Properties of the object. Contains field @type with type URL.
                 },
               ],
+              "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+              "message": "A String", # A developer-facing error message, which should be in English. Any
+                  # user-facing error message should be localized and sent in the
+                  # google.rpc.Status.details field, or localized by the client.
             },
             "fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
                 # completed successfully.
@@ -707,11 +457,6 @@
                 # detail.
               "pages": [ # List of pages detected by OCR.
                 { # Detected page from OCR.
-                  "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
-                  "height": 42, # Page height. For PDFs the unit is points. For images (including
-                      # TIFFs) the unit is pixels.
-                  "width": 42, # Page width. For PDFs the unit is points. For images (including
-                      # TIFFs) the unit is pixels.
                   "blocks": [ # List of blocks of text, images etc on this page.
                     { # Logical element on the page.
                       "property": { # Additional information detected on the structural component. # Additional information detected for the block.
@@ -934,26 +679,17 @@
                       "isPrefix": True or False, # True if break prepends the element.
                     },
                   },
+                  "confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
+                  "height": 42, # Page height. For PDFs the unit is points. For images (including
+                      # TIFFs) the unit is pixels.
+                  "width": 42, # Page width. For PDFs the unit is points. For images (including
+                      # TIFFs) the unit is pixels.
                 },
               ],
               "text": "A String", # UTF-8 text detected on the pages.
             },
             "textAnnotations": [ # If present, text (OCR) detection has completed successfully.
               { # Set of detected entity features.
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
-                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
-                    # image. For example, the relevancy of "tower" is likely higher to an image
-                    # containing the detected "Eiffel Tower" than to an image containing a
-                    # detected distant towering building, even though the confidence that
-                    # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                    "value": "A String", # Value of the property.
-                  },
-                ],
                 "score": 3.14, # Overall score of the result. Range [0, 1].
                 "locations": [ # The location information for the detected entity. Multiple
                     # `LocationInfo` elements can be present because one location may
@@ -999,6 +735,20 @@
                 },
                 "locale": "A String", # The language code for the locale in which the entity textual
                     # `description` is expressed.
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
+                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+                    # image. For example, the relevancy of "tower" is likely higher to an image
+                    # containing the detected "Eiffel Tower" than to an image containing a
+                    # detected distant towering building, even though the confidence that
+                    # there is a tower in each image may be the same. Range [0, 1].
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "value": "A String", # Value of the property.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
               },
             ],
             "imagePropertiesAnnotation": { # Stores image properties, such as dominant colors. # If present, image properties were extracted successfully.
@@ -1006,6 +756,7 @@
                 "colors": [ # RGB color values with their score and pixel fraction.
                   { # Color information consists of RGB channels, score, and the fraction of
                       # the image that the color occupies in the image.
+                    "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
                     "pixelFraction": 3.14, # The fraction of pixels the color occupies in the image.
                         # Value in range [0, 1].
                     "color": { # Represents a color in the RGBA color space. This representation is designed # RGB components of the color.
@@ -1142,27 +893,12 @@
                           # If omitted, this color object is to be rendered as a solid color
                           # (as if the alpha value had been explicitly given with a value of 1.0).
                     },
-                    "score": 3.14, # Image-specific score for this color. Value in range [0, 1].
                   },
                 ],
               },
             },
             "logoAnnotations": [ # If present, logo detection has completed successfully.
               { # Set of detected entity features.
-                "description": "A String", # Entity textual description, expressed in its `locale` language.
-                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
-                    # image. For example, the relevancy of "tower" is likely higher to an image
-                    # containing the detected "Eiffel Tower" than to an image containing a
-                    # detected distant towering building, even though the confidence that
-                    # there is a tower in each image may be the same. Range [0, 1].
-                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
-                    # fields, such a score or string that qualifies the entity.
-                  { # A `Property` consists of a user-supplied name/value pair.
-                    "uint64Value": "A String", # Value of numeric properties.
-                    "name": "A String", # Name of the property.
-                    "value": "A String", # Value of the property.
-                  },
-                ],
                 "score": 3.14, # Overall score of the result. Range [0, 1].
                 "locations": [ # The location information for the detected entity. Multiple
                     # `LocationInfo` elements can be present because one location may
@@ -1208,6 +944,20 @@
                 },
                 "locale": "A String", # The language code for the locale in which the entity textual
                     # `description` is expressed.
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
+                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+                    # image. For example, the relevancy of "tower" is likely higher to an image
+                    # containing the detected "Eiffel Tower" than to an image containing a
+                    # detected distant towering building, even though the confidence that
+                    # there is a tower in each image may be the same. Range [0, 1].
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "value": "A String", # Value of the property.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
               },
             ],
             "context": { # If an image was produced from a file (e.g. a PDF), this message gives # If present, contextual information is needed to understand where this image
@@ -1250,15 +1000,6 @@
               ],
               "pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
                 { # Metadata for web pages.
-                  "pageTitle": "A String", # Title for the web page, may contain HTML markups.
-                  "fullMatchingImages": [ # Fully matching images on the page.
-                      # Can include resized copies of the query image.
-                    { # Metadata for online images.
-                      "score": 3.14, # (Deprecated) Overall relevancy score for the image.
-                      "url": "A String", # The result image URL.
-                    },
-                  ],
-                  "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
                   "partialMatchingImages": [ # Partial matching images on the page.
                       # Those images are similar enough to share some key-point features. For
                       # example an original image will likely have partial matching for its
@@ -1269,6 +1010,15 @@
                     },
                   ],
                   "url": "A String", # The result web page URL.
+                  "pageTitle": "A String", # Title for the web page, may contain HTML markups.
+                  "fullMatchingImages": [ # Fully matching images on the page.
+                      # Can include resized copies of the query image.
+                    { # Metadata for online images.
+                      "score": 3.14, # (Deprecated) Overall relevancy score for the image.
+                      "url": "A String", # The result image URL.
+                    },
+                  ],
+                  "score": 3.14, # (Deprecated) Overall relevancy score for the web page.
                 },
               ],
               "partialMatchingImages": [ # Partial matching images from the Internet.
@@ -1283,11 +1033,6 @@
             "safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
                 # methods over safe-search verticals (for example, adult, spoof, medical,
                 # violence).
-              "medical": "A String", # Likelihood that this is a medical image.
-              "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
-                  # include (but is not limited to) skimpy or sheer clothing, strategically
-                  # covered nudity, lewd or provocative poses, or close-ups of sensitive
-                  # body areas.
               "violence": "A String", # Likelihood that this image contains violent content.
               "adult": "A String", # Represents the adult content likelihood for the image. Adult content may
                   # contain elements such as nudity, pornographic images or cartoons, or
@@ -1295,7 +1040,243 @@
               "spoof": "A String", # Spoof likelihood. The likelihood that an modification
                   # was made to the image's canonical version to make it appear
                   # funny or offensive.
+              "medical": "A String", # Likelihood that this is a medical image.
+              "racy": "A String", # Likelihood that the request image contains racy content. Racy content may
+                  # include (but is not limited to) skimpy or sheer clothing, strategically
+                  # covered nudity, lewd or provocative poses, or close-ups of sensitive
+                  # body areas.
             },
+            "landmarkAnnotations": [ # If present, landmark detection has completed successfully.
+              { # Set of detected entity features.
+                "score": 3.14, # Overall score of the result. Range [0, 1].
+                "locations": [ # The location information for the detected entity. Multiple
+                    # `LocationInfo` elements can be present because one location may
+                    # indicate the location of the scene in the image, and another location
+                    # may indicate the location of the place where the image was taken.
+                    # Location information is usually present for landmarks.
+                  { # Detected entity location information.
+                    "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
+                        # of doubles representing degrees latitude and degrees longitude. Unless
+                        # specified otherwise, this must conform to the
+                        # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                        # standard</a>. Values must be within normalized ranges.
+                      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+                      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+                    },
+                  },
+                ],
+                "mid": "A String", # Opaque entity ID. Some IDs may be available in
+                    # [Google Knowledge Graph Search
+                    # API](https://developers.google.com/knowledge-graph/).
+                "confidence": 3.14, # **Deprecated. Use `score` instead.**
+                    # The accuracy of the entity detection in an image.
+                    # For example, for an image in which the "Eiffel Tower" entity is detected,
+                    # this field represents the confidence that there is a tower in the query
+                    # image. Range [0, 1].
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                    # for `LABEL_DETECTION` features.
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "y": 3.14, # Y coordinate.
+                      "x": 3.14, # X coordinate.
+                    },
+                  ],
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "y": 42, # Y coordinate.
+                      "x": 42, # X coordinate.
+                    },
+                  ],
+                },
+                "locale": "A String", # The language code for the locale in which the entity textual
+                    # `description` is expressed.
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
+                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+                    # image. For example, the relevancy of "tower" is likely higher to an image
+                    # containing the detected "Eiffel Tower" than to an image containing a
+                    # detected distant towering building, even though the confidence that
+                    # there is a tower in each image may be the same. Range [0, 1].
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "value": "A String", # Value of the property.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
+              },
+            ],
+            "faceAnnotations": [ # If present, face detection has completed successfully.
+              { # A face annotation object contains the results of face detection.
+                "surpriseLikelihood": "A String", # Surprise likelihood.
+                "landmarks": [ # Detected face landmarks.
+                  { # A face-specific landmark (for example, a face feature).
+                    "position": { # A 3D position in the image, used primarily for Face detection landmarks. # Face landmark position.
+                        # A valid Position must have both x and y coordinates.
+                        # The position coordinates are in the same scale as the original image.
+                      "x": 3.14, # X coordinate.
+                      "z": 3.14, # Z coordinate (or depth).
+                      "y": 3.14, # Y coordinate.
+                    },
+                    "type": "A String", # Face landmark type.
+                  },
+                ],
+                "angerLikelihood": "A String", # Anger likelihood.
+                "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
+                "joyLikelihood": "A String", # Joy likelihood.
+                "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
+                "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
+                    # pointing relative to the vertical plane perpendicular to the image. Range
+                    # [-180,180].
+                "underExposedLikelihood": "A String", # Under-exposed likelihood.
+                "blurredLikelihood": "A String", # Blurred likelihood.
+                "headwearLikelihood": "A String", # Headwear likelihood.
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the face. The coordinates of the bounding box
+                    # are in the original image's scale.
+                    # The bounding box is computed to "frame" the face in accordance with human
+                    # expectations. It is based on the landmarker results.
+                    # Note that one or more x and/or y coordinates may not be generated in the
+                    # `BoundingPoly` (the polygon will be unbounded) if only a partial face
+                    # appears in the image to be annotated.
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "y": 3.14, # Y coordinate.
+                      "x": 3.14, # X coordinate.
+                    },
+                  ],
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "y": 42, # Y coordinate.
+                      "x": 42, # X coordinate.
+                    },
+                  ],
+                },
+                "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
+                    # of the face relative to the image vertical about the axis perpendicular to
+                    # the face. Range [-180,180].
+                "sorrowLikelihood": "A String", # Sorrow likelihood.
+                "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
+                    # pointing relative to the image's horizontal plane. Range [-180,180].
+                "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
+                    # `boundingPoly`, and encloses only the skin part of the face. Typically, it
+                    # is used to eliminate the face from any image analysis that detects the
+                    # "amount of skin" visible in an image. It is not based on the
+                    # landmarker results, only on the initial face detection, hence
+                    # the <code>fd</code> (face detection) prefix.
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "y": 3.14, # Y coordinate.
+                      "x": 3.14, # X coordinate.
+                    },
+                  ],
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "y": 42, # Y coordinate.
+                      "x": 42, # X coordinate.
+                    },
+                  ],
+                },
+              },
+            ],
+            "cropHintsAnnotation": { # Set of crop hints that are used to generate new crops when serving images. # If present, crop hints have completed successfully.
+              "cropHints": [ # Crop hint results.
+                { # Single crop hint that is used to generate a new crop when serving an image.
+                  "confidence": 3.14, # Confidence of this being a salient region.  Range [0, 1].
+                  "importanceFraction": 3.14, # Fraction of importance of this salient region with respect to the original
+                      # image.
+                  "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon for the crop region. The coordinates of the bounding
+                      # box are in the original image's scale.
+                    "normalizedVertices": [ # The bounding polygon normalized vertices.
+                      { # A vertex represents a 2D point in the image.
+                          # NOTE: the normalized vertex coordinates are relative to the original image
+                          # and range from 0 to 1.
+                        "y": 3.14, # Y coordinate.
+                        "x": 3.14, # X coordinate.
+                      },
+                    ],
+                    "vertices": [ # The bounding polygon vertices.
+                      { # A vertex represents a 2D point in the image.
+                          # NOTE: the vertex coordinates are in the same scale as the original image.
+                        "y": 42, # Y coordinate.
+                        "x": 42, # X coordinate.
+                      },
+                    ],
+                  },
+                },
+              ],
+            },
+            "labelAnnotations": [ # If present, label detection has completed successfully.
+              { # Set of detected entity features.
+                "score": 3.14, # Overall score of the result. Range [0, 1].
+                "locations": [ # The location information for the detected entity. Multiple
+                    # `LocationInfo` elements can be present because one location may
+                    # indicate the location of the scene in the image, and another location
+                    # may indicate the location of the place where the image was taken.
+                    # Location information is usually present for landmarks.
+                  { # Detected entity location information.
+                    "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
+                        # of doubles representing degrees latitude and degrees longitude. Unless
+                        # specified otherwise, this must conform to the
+                        # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+                        # standard</a>. Values must be within normalized ranges.
+                      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+                      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+                    },
+                  },
+                ],
+                "mid": "A String", # Opaque entity ID. Some IDs may be available in
+                    # [Google Knowledge Graph Search
+                    # API](https://developers.google.com/knowledge-graph/).
+                "confidence": 3.14, # **Deprecated. Use `score` instead.**
+                    # The accuracy of the entity detection in an image.
+                    # For example, for an image in which the "Eiffel Tower" entity is detected,
+                    # this field represents the confidence that there is a tower in the query
+                    # image. Range [0, 1].
+                "boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this entity belongs. Not produced
+                    # for `LABEL_DETECTION` features.
+                  "normalizedVertices": [ # The bounding polygon normalized vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the normalized vertex coordinates are relative to the original image
+                        # and range from 0 to 1.
+                      "y": 3.14, # Y coordinate.
+                      "x": 3.14, # X coordinate.
+                    },
+                  ],
+                  "vertices": [ # The bounding polygon vertices.
+                    { # A vertex represents a 2D point in the image.
+                        # NOTE: the vertex coordinates are in the same scale as the original image.
+                      "y": 42, # Y coordinate.
+                      "x": 42, # X coordinate.
+                    },
+                  ],
+                },
+                "locale": "A String", # The language code for the locale in which the entity textual
+                    # `description` is expressed.
+                "description": "A String", # Entity textual description, expressed in its `locale` language.
+                "topicality": 3.14, # The relevancy of the ICA (Image Content Annotation) label to the
+                    # image. For example, the relevancy of "tower" is likely higher to an image
+                    # containing the detected "Eiffel Tower" than to an image containing a
+                    # detected distant towering building, even though the confidence that
+                    # there is a tower in each image may be the same. Range [0, 1].
+                "properties": [ # Some entities may have optional user-supplied `Property` (name/value)
+                    # fields, such a score or string that qualifies the entity.
+                  { # A `Property` consists of a user-supplied name/value pair.
+                    "value": "A String", # Value of the property.
+                    "uint64Value": "A String", # Value of numeric properties.
+                    "name": "A String", # Name of the property.
+                  },
+                ],
+              },
+            ],
           },
         ],
         "inputConfig": { # The desired input location and metadata. # Information about the file for which this response is generated.
@@ -1313,6 +1294,25 @@
               # "image/gif" are supported. Wildcards are not supported.
         },
         "totalPages": 42, # This field gives the total number of pages in the file.
+        "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
+            # `responses` field will not be set in this case.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
       },
     ],
   }</pre>
@@ -1390,18 +1390,6 @@
             ],
           },
           &quot;productSearchParams&quot;: { # Parameters for a product search request. # Parameters for product search.
-            &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
-                # on Product labels. We currently support an AND of OR of key-value
-                # expressions, where each expression within an OR must have the same key. An
-                # &#x27;=&#x27; should be used to connect the key and value.
-                #
-                # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
-                # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
-                # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
-            &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
-                #
-                # Format is:
-                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
             &quot;boundingPoly&quot;: { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
                 # If it is not specified, system discretion will be applied.
               &quot;normalizedVertices&quot;: [ # The bounding polygon normalized vertices.
@@ -1429,6 +1417,18 @@
                 # migrate existing products to these categories as well.
               &quot;A String&quot;,
             ],
+            &quot;filter&quot;: &quot;A String&quot;, # The filtering expression. This can be used to restrict search results based
+                # on Product labels. We currently support an AND of OR of key-value
+                # expressions, where each expression within an OR must have the same key. An
+                # &#x27;=&#x27; should be used to connect the key and value.
+                #
+                # For example, &quot;(color = red OR color = blue) AND brand = Google&quot; is
+                # acceptable, but &quot;(color = red OR brand = Google)&quot; is not acceptable.
+                # &quot;color: red&quot; is not acceptable because it uses a &#x27;:&#x27; instead of an &#x27;=&#x27;.
+            &quot;productSet&quot;: &quot;A String&quot;, # The resource name of a ProductSet to be searched for similar images.
+                #
+                # Format is:
+                # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
           },
         },
         &quot;outputConfig&quot;: { # The desired output location and metadata. # Required. The desired output location and metadata (e.g. format).
@@ -1527,16 +1527,16 @@
         #
         # You can find out more about this error model and how to work with it in the
         # [API Design Guide](https://cloud.google.com/apis/design/errors).
-      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
-      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
       &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
           # message types for APIs to use.
         {
           &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
         },
       ],
+      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
+      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
     },
     &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
         # contains progress information and common metadata such as create time.