chore: regens API reference docs (#889)

diff --git a/docs/dyn/vision_v1p2beta1.images.html b/docs/dyn/vision_v1p2beta1.images.html
index 43ddfc1..e20e9e0 100644
--- a/docs/dyn/vision_v1p2beta1.images.html
+++ b/docs/dyn/vision_v1p2beta1.images.html
@@ -75,22 +75,22 @@
 <h1><a href="vision_v1p2beta1.html">Cloud Vision API</a> . <a href="vision_v1p2beta1.images.html">images</a></h1>
 <h2>Instance Methods</h2>
 <p class="toc_element">
-  <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p>
+  <code><a href="#annotate">annotate(body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Run image detection and annotation for a batch of images.</p>
 <p class="toc_element">
-  <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body, x__xgafv=None)</a></code></p>
+  <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Run asynchronous image detection and annotation for a list of images.</p>
 <h3>Method Details</h3>
 <div class="method">
-    <code class="details" id="annotate">annotate(body, x__xgafv=None)</code>
+    <code class="details" id="annotate">annotate(body=None, x__xgafv=None)</code>
   <pre>Run image detection and annotation for a batch of images.
 
 Args:
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Multiple image annotation requests are batched into a single service call.
-    "requests": [ # Individual image annotation requests for this batch.
+    "requests": [ # Required. Individual image annotation requests for this batch.
       { # Request for performing Google Cloud Vision API tasks over a user-provided
           # image, with user-requested features, and with context information.
         "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
@@ -98,16 +98,16 @@
             "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
                 # of doubles representing degrees latitude and degrees longitude. Unless
                 # specified otherwise, this must conform to the
-                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                # standard</a>. Values must be within normalized ranges.
+                # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                # standard&lt;/a&gt;. Values must be within normalized ranges.
               "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
               "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
             },
             "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
                 # of doubles representing degrees latitude and degrees longitude. Unless
                 # specified otherwise, this must conform to the
-                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                # standard</a>. Values must be within normalized ranges.
+                # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                # standard&lt;/a&gt;. Values must be within normalized ranges.
               "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
               "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
             },
@@ -119,17 +119,17 @@
               # setting a hint will help get better results (although it will be a
               # significant hindrance if the hint is wrong). Text detection returns an
               # error if one or more of the specified languages is not one of the
-              # [supported languages](/vision/docs/languages).
+              # [supported languages](https://cloud.google.com/vision/docs/languages).
             "A String",
           ],
           "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
             "productCategories": [ # The list of product categories to search in. Currently, we only consider
-                # the first category, and either "homegoods-v2", "apparel-v2", or "toys-v2"
-                # should be specified. The legacy categories "homegoods", "apparel", and
-                # "toys" are still supported but will be deprecated. For new products, please
-                # use "homegoods-v2", "apparel-v2", or "toys-v2" for better product search
-                # accuracy. It is recommended to migrate existing products to these
-                # categories as well.
+                # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
+                # "packagedgoods-v1", or "general-v1" should be specified. The legacy
+                # categories "homegoods", "apparel", and "toys" are still supported but will
+                # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
+                # or "toys-v2" for better product search accuracy. It is recommended to
+                # migrate existing products to these categories as well.
               "A String",
             ],
             "filter": "A String", # The filtering expression. This can be used to restrict search results based
@@ -145,7 +145,7 @@
                 # Format is:
                 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
             "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
-                # Optional. If it is not specified, system discretion will be applied.
+                # If it is not specified, system discretion will be applied.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
                 { # A vertex represents a 2D point in the image.
                     # NOTE: the normalized vertex coordinates are relative to the original image
@@ -223,6 +223,18 @@
         ],
       },
     ],
+    "parent": "A String", # Optional. Target project and location to make a call.
+        # 
+        # Format: `projects/{project-id}/locations/{location-id}`.
+        # 
+        # If no parent is specified, a region will be chosen automatically.
+        # 
+        # Supported location-ids:
+        #     `us`: USA country only,
+        #     `asia`: East asia areas, like Japan, Taiwan,
+        #     `eu`: The European Union.
+        # 
+        # Example: `projects/project-A/locations/eu`.
   }
 
   x__xgafv: string, V1 error format.
@@ -279,8 +291,8 @@
                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
                     # of doubles representing degrees latitude and degrees longitude. Unless
                     # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
+                    # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                    # standard&lt;/a&gt;. Values must be within normalized ranges.
                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
                 },
@@ -411,7 +423,7 @@
             # This annotation provides the structural hierarchy for the OCR detected
             # text.
             # The hierarchy of an OCR extracted text structure is like this:
-            #     TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+            #     TextAnnotation -&gt; Page -&gt; Block -&gt; Paragraph -&gt; Word -&gt; Symbol
             # Each structural component, starting from Page, may further have their own
             # properties. Properties describe detected languages, breaks etc.. Please refer
             # to the TextAnnotation.TextProperty message definition below for more
@@ -538,8 +550,39 @@
                           },
                         ],
                       },
-                      "words": [ # List of words in this paragraph.
+                      "words": [ # List of all words in this paragraph.
                         { # A word representation.
+                          "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
+                              # The vertices are in the order of top-left, top-right, bottom-right,
+                              # bottom-left. When a rotation of the bounding box is detected the rotation
+                              # is represented as around the top-left corner as defined when the text is
+                              # read in the 'natural' orientation.
+                              # For example:
+                              #   * when the text is horizontal it might look like:
+                              #      0----1
+                              #      |    |
+                              #      3----2
+                              #   * when it's rotated 180 degrees around the top-left corner it becomes:
+                              #      2----3
+                              #      |    |
+                              #      1----0
+                              #   and the vertex order will still be (0, 1, 2, 3).
+                            "normalizedVertices": [ # The bounding polygon normalized vertices.
+                              { # A vertex represents a 2D point in the image.
+                                  # NOTE: the normalized vertex coordinates are relative to the original image
+                                  # and range from 0 to 1.
+                                "y": 3.14, # Y coordinate.
+                                "x": 3.14, # X coordinate.
+                              },
+                            ],
+                            "vertices": [ # The bounding polygon vertices.
+                              { # A vertex represents a 2D point in the image.
+                                  # NOTE: the vertex coordinates are in the same scale as the original image.
+                                "y": 42, # Y coordinate.
+                                "x": 42, # X coordinate.
+                              },
+                            ],
+                          },
                           "symbols": [ # List of symbols in the word.
                               # The order of the symbols follows the natural reading order.
                             { # A single symbol representation.
@@ -557,7 +600,7 @@
                                   #      2----3
                                   #      |    |
                                   #      1----0
-                                  #   and the vertice order will still be (0, 1, 2, 3).
+                                  #   and the vertex order will still be (0, 1, 2, 3).
                                 "normalizedVertices": [ # The bounding polygon normalized vertices.
                                   { # A vertex represents a 2D point in the image.
                                       # NOTE: the normalized vertex coordinates are relative to the original image
@@ -592,37 +635,6 @@
                               },
                             },
                           ],
-                          "boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
-                              # The vertices are in the order of top-left, top-right, bottom-right,
-                              # bottom-left. When a rotation of the bounding box is detected the rotation
-                              # is represented as around the top-left corner as defined when the text is
-                              # read in the 'natural' orientation.
-                              # For example:
-                              #   * when the text is horizontal it might look like:
-                              #      0----1
-                              #      |    |
-                              #      3----2
-                              #   * when it's rotated 180 degrees around the top-left corner it becomes:
-                              #      2----3
-                              #      |    |
-                              #      1----0
-                              #   and the vertex order will still be (0, 1, 2, 3).
-                            "normalizedVertices": [ # The bounding polygon normalized vertices.
-                              { # A vertex represents a 2D point in the image.
-                                  # NOTE: the normalized vertex coordinates are relative to the original image
-                                  # and range from 0 to 1.
-                                "y": 3.14, # Y coordinate.
-                                "x": 3.14, # X coordinate.
-                              },
-                            ],
-                            "vertices": [ # The bounding polygon vertices.
-                              { # A vertex represents a 2D point in the image.
-                                  # NOTE: the vertex coordinates are in the same scale as the original image.
-                                "y": 42, # Y coordinate.
-                                "x": 42, # X coordinate.
-                              },
-                            ],
-                          },
                           "confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
                           "property": { # Additional information detected on the structural component. # Additional information detected for the word.
                             "detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
@@ -676,8 +688,8 @@
                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
                     # of doubles representing degrees latitude and degrees longitude. Unless
                     # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
+                    # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                    # standard&lt;/a&gt;. Values must be within normalized ranges.
                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
                 },
@@ -787,14 +799,14 @@
                     #
                     #      static Color* toProto(UIColor* color) {
                     #          CGFloat red, green, blue, alpha;
-                    #          if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
+                    #          if (![color getRed:&amp;red green:&amp;green blue:&amp;blue alpha:&amp;alpha]) {
                     #            return nil;
                     #          }
                     #          Color* result = [[Color alloc] init];
                     #          [result setRed:red];
                     #          [result setGreen:green];
                     #          [result setBlue:blue];
-                    #          if (alpha <= 0.9999) {
+                    #          if (alpha &lt;= 0.9999) {
                     #            [result setAlpha:floatWrapperWithValue(alpha)];
                     #          }
                     #          [result autorelease];
@@ -824,11 +836,11 @@
                     #     };
                     #
                     #     var rgbToCssColor_ = function(red, green, blue) {
-                    #       var rgbNumber = new Number((red << 16) | (green << 8) | blue);
+                    #       var rgbNumber = new Number((red &lt;&lt; 16) | (green &lt;&lt; 8) | blue);
                     #       var hexString = rgbNumber.toString(16);
                     #       var missingZeros = 6 - hexString.length;
                     #       var resultBuilder = ['#'];
-                    #       for (var i = 0; i < missingZeros; i++) {
+                    #       for (var i = 0; i &lt; missingZeros; i++) {
                     #          resultBuilder.push('0');
                     #       }
                     #       resultBuilder.push(hexString);
@@ -860,8 +872,9 @@
         },
         "faceAnnotations": [ # If present, face detection has completed successfully.
           { # A face annotation object contains the results of face detection.
-            "sorrowLikelihood": "A String", # Sorrow likelihood.
-            "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
+            "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
+                # pointing relative to the vertical plane perpendicular to the image. Range
+                # [-180,180].
             "underExposedLikelihood": "A String", # Under-exposed likelihood.
             "detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
             "joyLikelihood": "A String", # Joy likelihood.
@@ -877,7 +890,7 @@
                 "type": "A String", # Face landmark type.
               },
             ],
-            "surpriseLikelihood": "A String", # Surprise likelihood.
+            "sorrowLikelihood": "A String", # Sorrow likelihood.
             "blurredLikelihood": "A String", # Blurred likelihood.
             "tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
                 # pointing relative to the image's horizontal plane. Range [-180,180].
@@ -908,16 +921,14 @@
             "rollAngle": 3.14, # Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
                 # of the face relative to the image vertical about the axis perpendicular to
                 # the face. Range [-180,180].
-            "panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
-                # pointing relative to the vertical plane perpendicular to the image. Range
-                # [-180,180].
             "headwearLikelihood": "A String", # Headwear likelihood.
+            "surpriseLikelihood": "A String", # Surprise likelihood.
             "fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
                 # `boundingPoly`, and encloses only the skin part of the face. Typically, it
                 # is used to eliminate the face from any image analysis that detects the
                 # "amount of skin" visible in an image. It is not based on the
                 # landmarker results, only on the initial face detection, hence
-                # the <code>fd</code> (face detection) prefix.
+                # the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
                 { # A vertex represents a 2D point in the image.
                     # NOTE: the normalized vertex coordinates are relative to the original image
@@ -934,6 +945,7 @@
                 },
               ],
             },
+            "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
           },
         ],
         "productSearchResults": { # Results for a product search request. # If present, product search has completed successfully.
@@ -943,6 +955,16 @@
               # matches in the union of all the per-product results.
             { # Information about the products similar to a single product in a query
                 # image.
+              "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
+                { # Prediction for what the object in the bounding box is.
+                  "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+                      # information, see
+                      # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+                  "score": 3.14, # Score of the result. Range [0, 1].
+                  "name": "A String", # Object name, expressed in its `language_code` language.
+                  "mid": "A String", # Object ID that should align with EntityAnnotation mid.
+                },
+              ],
               "results": [ # List of results, one for each product match.
                 { # Information about a product.
                   "image": "A String", # The resource name of the image from the product that is the closest match
@@ -958,7 +980,11 @@
                         # to be supported soon.
                         #
                         # Multiple values can be assigned to the same key. One product may have up to
-                        # 100 product_labels.
+                        # 500 product_labels.
+                        #
+                        # Notice that the total number of distinct product_labels over all products
+                        # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
+                        # will refuse to work for that ProductSet.
                       { # A product label represented as a key-value pair.
                         "value": "A String", # The value of the label attached to the product. Cannot be empty and
                             # cannot exceed 128 bytes.
@@ -968,20 +994,18 @@
                     ],
                     "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
                         # 4096 characters long.
+                    "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
+                        # characters long.
+                    "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
+                        # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
+                        # "homegoods", "apparel", and "toys" are still supported, but these should
+                        # not be used for new products.
                     "name": "A String", # The resource name of the product.
                         #
                         # Format is:
                         # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
                         #
                         # This field is ignored when creating a product.
-                    "productCategory": "A String", # The category for the product identified by the reference image. This should
-                        # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
-                        # "homegoods", "apparel", and "toys" are still supported, but these should
-                        # not be used for new products.
-                        #
-                        # This field is immutable.
-                    "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
-                        # characters long.
                   },
                 },
               ],
@@ -1019,7 +1043,11 @@
                     # to be supported soon.
                     #
                     # Multiple values can be assigned to the same key. One product may have up to
-                    # 100 product_labels.
+                    # 500 product_labels.
+                    #
+                    # Notice that the total number of distinct product_labels over all products
+                    # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
+                    # will refuse to work for that ProductSet.
                   { # A product label represented as a key-value pair.
                     "value": "A String", # The value of the label attached to the product. Cannot be empty and
                         # cannot exceed 128 bytes.
@@ -1029,20 +1057,18 @@
                 ],
                 "displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
                     # 4096 characters long.
+                "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
+                    # characters long.
+                "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
+                    # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
+                    # "homegoods", "apparel", and "toys" are still supported, but these should
+                    # not be used for new products.
                 "name": "A String", # The resource name of the product.
                     #
                     # Format is:
                     # `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
                     #
                     # This field is ignored when creating a product.
-                "productCategory": "A String", # The category for the product identified by the reference image. This should
-                    # be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
-                    # "homegoods", "apparel", and "toys" are still supported, but these should
-                    # not be used for new products.
-                    #
-                    # This field is immutable.
-                "description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
-                    # characters long.
               },
             },
           ],
@@ -1077,8 +1103,8 @@
                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
                     # of doubles representing degrees latitude and degrees longitude. Unless
                     # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
+                    # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                    # standard&lt;/a&gt;. Values must be within normalized ranges.
                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
                 },
@@ -1140,8 +1166,8 @@
                 "latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
                     # of doubles representing degrees latitude and degrees longitude. Unless
                     # specified otherwise, this must conform to the
-                    # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                    # standard</a>. Values must be within normalized ranges.
+                    # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                    # standard&lt;/a&gt;. Values must be within normalized ranges.
                   "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
                   "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
                 },
@@ -1236,7 +1262,7 @@
 </div>
 
 <div class="method">
-    <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body, x__xgafv=None)</code>
+    <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</code>
   <pre>Run asynchronous image detection and annotation for a list of images.
 
 Progress and results can be retrieved through the
@@ -1248,7 +1274,7 @@
 GCS bucket, each json file containing BatchAnnotateImagesResponse proto.
 
 Args:
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Request for async image annotation for a list of images.
@@ -1289,7 +1315,7 @@
             # and overflows into multiple sharded files.
       },
     },
-    "requests": [ # Individual image annotation requests for this batch.
+    "requests": [ # Required. Individual image annotation requests for this batch.
       { # Request for performing Google Cloud Vision API tasks over a user-provided
           # image, with user-requested features, and with context information.
         "imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image.
@@ -1297,16 +1323,16 @@
             "minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
                 # of doubles representing degrees latitude and degrees longitude. Unless
                 # specified otherwise, this must conform to the
-                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                # standard</a>. Values must be within normalized ranges.
+                # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                # standard&lt;/a&gt;. Values must be within normalized ranges.
               "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
               "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
             },
             "maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
                 # of doubles representing degrees latitude and degrees longitude. Unless
                 # specified otherwise, this must conform to the
-                # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
-                # standard</a>. Values must be within normalized ranges.
+                # &lt;a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf"&gt;WGS84
+                # standard&lt;/a&gt;. Values must be within normalized ranges.
               "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
               "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
             },
@@ -1318,17 +1344,17 @@
               # setting a hint will help get better results (although it will be a
               # significant hindrance if the hint is wrong). Text detection returns an
               # error if one or more of the specified languages is not one of the
-              # [supported languages](/vision/docs/languages).
+              # [supported languages](https://cloud.google.com/vision/docs/languages).
             "A String",
           ],
           "productSearchParams": { # Parameters for a product search request. # Parameters for product search.
             "productCategories": [ # The list of product categories to search in. Currently, we only consider
-                # the first category, and either "homegoods-v2", "apparel-v2", or "toys-v2"
-                # should be specified. The legacy categories "homegoods", "apparel", and
-                # "toys" are still supported but will be deprecated. For new products, please
-                # use "homegoods-v2", "apparel-v2", or "toys-v2" for better product search
-                # accuracy. It is recommended to migrate existing products to these
-                # categories as well.
+                # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
+                # "packagedgoods-v1", or "general-v1" should be specified. The legacy
+                # categories "homegoods", "apparel", and "toys" are still supported but will
+                # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
+                # or "toys-v2" for better product search accuracy. It is recommended to
+                # migrate existing products to these categories as well.
               "A String",
             ],
             "filter": "A String", # The filtering expression. This can be used to restrict search results based
@@ -1344,7 +1370,7 @@
                 # Format is:
                 # `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
             "boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
-                # Optional. If it is not specified, system discretion will be applied.
+                # If it is not specified, system discretion will be applied.
               "normalizedVertices": [ # The bounding polygon normalized vertices.
                 { # A vertex represents a 2D point in the image.
                     # NOTE: the normalized vertex coordinates are relative to the original image
@@ -1422,6 +1448,18 @@
         ],
       },
     ],
+    "parent": "A String", # Optional. Target project and location to make a call.
+        # 
+        # Format: `projects/{project-id}/locations/{location-id}`.
+        # 
+        # If no parent is specified, a region will be chosen automatically.
+        # 
+        # Supported location-ids:
+        #     `us`: USA country only,
+        #     `asia`: East asia areas, like Japan, Taiwan,
+        #     `eu`: The European Union.
+        # 
+        # Example: `projects/project-A/locations/eu`.
   }
 
   x__xgafv: string, V1 error format.