chore: regens API reference docs (#889)
diff --git a/docs/dyn/vision_v1.files.html b/docs/dyn/vision_v1.files.html
index 8f8b819..dd96a69 100644
--- a/docs/dyn/vision_v1.files.html
+++ b/docs/dyn/vision_v1.files.html
@@ -75,14 +75,14 @@
<h1><a href="vision_v1.html">Cloud Vision API</a> . <a href="vision_v1.files.html">files</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
- <code><a href="#annotate">annotate(body, x__xgafv=None)</a></code></p>
+ <code><a href="#annotate">annotate(body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Service that performs image detection and annotation for a batch of files.</p>
<p class="toc_element">
- <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body, x__xgafv=None)</a></code></p>
+ <code><a href="#asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Run asynchronous image detection and annotation for a list of generic</p>
<h3>Method Details</h3>
<div class="method">
- <code class="details" id="annotate">annotate(body, x__xgafv=None)</code>
+ <code class="details" id="annotate">annotate(body=None, x__xgafv=None)</code>
<pre>Service that performs image detection and annotation for a batch of files.
Now only "application/pdf", "image/tiff" and "image/gif" are supported.
@@ -92,11 +92,11 @@
extracted.
Args:
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # A list of requests to annotate files using the BatchAnnotateFiles API.
- "requests": [ # The list of file annotation requests. Right now we support only one
+ "requests": [ # Required. The list of file annotation requests. Right now we support only one
# AnnotateFileRequest in BatchAnnotateFilesRequest.
{ # A request to annotate one single file, e.g. a PDF, TIFF or GIF file.
"imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
@@ -104,16 +104,16 @@
"minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
"maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
@@ -125,10 +125,19 @@
# setting a hint will help get better results (although it will be a
# significant hindrance if the hint is wrong). Text detection returns an
# error if one or more of the specified languages is not one of the
- # [supported languages](/vision/docs/languages).
+ # [supported languages](https://cloud.google.com/vision/docs/languages).
"A String",
],
"productSearchParams": { # Parameters for a product search request. # Parameters for product search.
+ "productCategories": [ # The list of product categories to search in. Currently, we only consider
+ # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
+ # "packagedgoods-v1", or "general-v1" should be specified. The legacy
+ # categories "homegoods", "apparel", and "toys" are still supported but will
+ # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
+ # or "toys-v2" for better product search accuracy. It is recommended to
+ # migrate existing products to these categories as well.
+ "A String",
+ ],
"filter": "A String", # The filtering expression. This can be used to restrict search results based
# on Product labels. We currently support an AND of OR of key-value
# expressions, where each expression within an OR must have the same key. An
@@ -137,21 +146,12 @@
# For example, "(color = red OR color = blue) AND brand = Google" is
# acceptable, but "(color = red OR brand = Google)" is not acceptable.
# "color: red" is not acceptable because it uses a ':' instead of an '='.
- "productCategories": [ # The list of product categories to search in. Currently, we only consider
- # the first category, and either "homegoods-v2", "apparel-v2", or "toys-v2"
- # should be specified. The legacy categories "homegoods", "apparel", and
- # "toys" are still supported but will be deprecated. For new products, please
- # use "homegoods-v2", "apparel-v2", or "toys-v2" for better product search
- # accuracy. It is recommended to migrate existing products to these
- # categories as well.
- "A String",
- ],
"productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
#
# Format is:
# `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
"boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
- # Optional. If it is not specified, system discretion will be applied.
+ # If it is not specified, system discretion will be applied.
"normalizedVertices": [ # The bounding polygon normalized vertices.
{ # A vertex represents a 2D point in the image.
# NOTE: the normalized vertex coordinates are relative to the original image
@@ -183,18 +183,6 @@
"includeGeoResults": True or False, # Whether to include results derived from the geo information in the image.
},
},
- "features": [ # Required. Requested features.
- { # The type of Google Cloud Vision API detection to perform, and the maximum
- # number of results to return for that type. Multiple `Feature` objects can
- # be specified in the `features` list.
- "model": "A String", # Model to use for the feature.
- # Supported values: "builtin/stable" (the default if unset) and
- # "builtin/latest".
- "type": "A String", # The feature type.
- "maxResults": 42, # Maximum number of results of this type. Does not apply to
- # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
- },
- ],
"pages": [ # Pages of the file to perform image annotation.
#
# Pages starts from 1, we assume the first page of the file is page 1.
@@ -212,21 +200,45 @@
42,
],
"inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
+ "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
+ # "image/gif" are supported. Wildcards are not supported.
"content": "A String", # File content, represented as a stream of bytes.
# Note: As with all `bytes` fields, protobuffers use a pure binary
# representation, whereas JSON representations use base64.
#
# Currently, this field only works for BatchAnnotateFiles requests. It does
# not work for AsyncBatchAnnotateFiles requests.
- "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
- # "image/gif" are supported. Wildcards are not supported.
"gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
"uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
# Google Cloud Storage object. Wildcards are not currently supported.
},
},
+ "features": [ # Required. Requested features.
+ { # The type of Google Cloud Vision API detection to perform, and the maximum
+ # number of results to return for that type. Multiple `Feature` objects can
+ # be specified in the `features` list.
+ "model": "A String", # Model to use for the feature.
+ # Supported values: "builtin/stable" (the default if unset) and
+ # "builtin/latest".
+ "type": "A String", # The feature type.
+ "maxResults": 42, # Maximum number of results of this type. Does not apply to
+ # `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
+ },
+ ],
},
],
+ "parent": "A String", # Optional. Target project and location to make a call.
+ #
+ # Format: `projects/{project-id}/locations/{location-id}`.
+ #
+ # If no parent is specified, a region will be chosen automatically.
+ #
+ # Supported location-ids:
+ # `us`: USA country only,
+ # `asia`: East asia areas, like Japan, Taiwan,
+ # `eu`: The European Union.
+ #
+ # Example: `projects/project-A/locations/eu`.
}
x__xgafv: string, V1 error format.
@@ -243,16 +255,31 @@
{ # Response to a single file annotation request. A file may contain one or more
# images, which individually have their own responses.
"totalPages": 42, # This field gives the total number of pages in the file.
- "responses": [ # Individual responses to images found within the file.
+ "inputConfig": { # The desired input location and metadata. # Information about the file for which this response is generated.
+ "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
+ # "image/gif" are supported. Wildcards are not supported.
+ "content": "A String", # File content, represented as a stream of bytes.
+ # Note: As with all `bytes` fields, protobuffers use a pure binary
+ # representation, whereas JSON representations use base64.
+ #
+ # Currently, this field only works for BatchAnnotateFiles requests. It does
+ # not work for AsyncBatchAnnotateFiles requests.
+ "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
+ "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
+ # Google Cloud Storage object. Wildcards are not currently supported.
+ },
+ },
+ "responses": [ # Individual responses to images found within the file. This field will be
+ # empty if the `error` field is set.
{ # Response to an image annotation request.
"safeSearchAnnotation": { # Set of features pertaining to the image, computed by computer vision # If present, safe-search annotation has completed successfully.
# methods over safe-search verticals (for example, adult, spoof, medical,
# violence).
+ "medical": "A String", # Likelihood that this is a medical image.
"spoof": "A String", # Spoof likelihood. The likelihood that an modification
# was made to the image's canonical version to make it appear
# funny or offensive.
"violence": "A String", # Likelihood that this image contains violent content.
- "medical": "A String", # Likelihood that this is a medical image.
"adult": "A String", # Represents the adult content likelihood for the image. Adult content may
# contain elements such as nudity, pornographic images or cartoons, or
# sexual activities.
@@ -285,8 +312,8 @@
"latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
@@ -334,8 +361,8 @@
],
"pagesWithMatchingImages": [ # Web pages containing the matching images from the Internet.
{ # Metadata for web pages.
- "pageTitle": "A String", # Title for the web page, may contain HTML markups.
"url": "A String", # The result web page URL.
+ "pageTitle": "A String", # Title for the web page, may contain HTML markups.
"score": 3.14, # (Deprecated) Overall relevancy score for the web page.
"partialMatchingImages": [ # Partial matching images on the page.
# Those images are similar enough to share some key-point features. For
@@ -394,7 +421,7 @@
# information, see
# http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
"score": 3.14, # Score of the result. Range [0, 1].
- "mid": "A String", # Object ID that should align with EntityAnnotation mid.
+ "name": "A String", # Object name, expressed in its `language_code` language.
"boundingPoly": { # A bounding polygon for the detected image annotation. # Image region to which this object belongs. This must be populated.
"normalizedVertices": [ # The bounding polygon normalized vertices.
{ # A vertex represents a 2D point in the image.
@@ -412,7 +439,7 @@
},
],
},
- "name": "A String", # Object name, expressed in its `language_code` language.
+ "mid": "A String", # Object ID that should align with EntityAnnotation mid.
},
],
"fullTextAnnotation": { # TextAnnotation contains a structured representation of OCR extracted text. # If present, text (OCR) detection or document (OCR) text detection has
@@ -420,7 +447,7 @@
# This annotation provides the structural hierarchy for the OCR detected
# text.
# The hierarchy of an OCR extracted text structure is like this:
- # TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
+ # TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
# Each structural component, starting from Page, may further have their own
# properties. Properties describe detected languages, breaks etc.. Please refer
# to the TextAnnotation.TextProperty message definition below for more
@@ -433,8 +460,8 @@
"confidence": 3.14, # Confidence of the OCR results on the page. Range [0, 1].
"property": { # Additional information detected on the structural component. # Additional information detected on the page.
"detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
- "isPrefix": True or False, # True if break prepends the element.
"type": "A String", # Detected break type.
+ "isPrefix": True or False, # True if break prepends the element.
},
"detectedLanguages": [ # A list of detected languages together with confidence.
{ # Detected language for a structural component.
@@ -483,11 +510,11 @@
},
],
},
- "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
+ "blockType": "A String", # Detected block type (text, image etc) for this block.
"property": { # Additional information detected on the structural component. # Additional information detected for the block.
"detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
- "isPrefix": True or False, # True if break prepends the element.
"type": "A String", # Detected break type.
+ "isPrefix": True or False, # True if break prepends the element.
},
"detectedLanguages": [ # A list of detected languages together with confidence.
{ # Detected language for a structural component.
@@ -498,7 +525,7 @@
},
],
},
- "blockType": "A String", # Detected block type (text, image etc) for this block.
+ "confidence": 3.14, # Confidence of the OCR results on the block. Range [0, 1].
"paragraphs": [ # List of paragraphs in this block (if this blocks is of type text).
{ # Structural unit of text representing a number of words in certain order.
"boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the paragraph.
@@ -535,8 +562,8 @@
"confidence": 3.14, # Confidence of the OCR results for the paragraph. Range [0, 1].
"property": { # Additional information detected on the structural component. # Additional information detected for the paragraph.
"detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
- "isPrefix": True or False, # True if break prepends the element.
"type": "A String", # Detected break type.
+ "isPrefix": True or False, # True if break prepends the element.
},
"detectedLanguages": [ # A list of detected languages together with confidence.
{ # Detected language for a structural component.
@@ -547,7 +574,7 @@
},
],
},
- "words": [ # List of words in this paragraph.
+ "words": [ # List of all words in this paragraph.
{ # A word representation.
"boundingBox": { # A bounding polygon for the detected image annotation. # The bounding box for the word.
# The vertices are in the order of top-left, top-right, bottom-right,
@@ -597,7 +624,7 @@
# 2----3
# | |
# 1----0
- # and the vertice order will still be (0, 1, 2, 3).
+ # and the vertex order will still be (0, 1, 2, 3).
"normalizedVertices": [ # The bounding polygon normalized vertices.
{ # A vertex represents a 2D point in the image.
# NOTE: the normalized vertex coordinates are relative to the original image
@@ -618,8 +645,8 @@
"confidence": 3.14, # Confidence of the OCR results for the symbol. Range [0, 1].
"property": { # Additional information detected on the structural component. # Additional information detected for the symbol.
"detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
- "isPrefix": True or False, # True if break prepends the element.
"type": "A String", # Detected break type.
+ "isPrefix": True or False, # True if break prepends the element.
},
"detectedLanguages": [ # A list of detected languages together with confidence.
{ # Detected language for a structural component.
@@ -635,8 +662,8 @@
"confidence": 3.14, # Confidence of the OCR results for the word. Range [0, 1].
"property": { # Additional information detected on the structural component. # Additional information detected for the word.
"detectedBreak": { # Detected start or end of a structural component. # Detected start or end of a text segment.
- "isPrefix": True or False, # True if break prepends the element.
"type": "A String", # Detected break type.
+ "isPrefix": True or False, # True if break prepends the element.
},
"detectedLanguages": [ # A list of detected languages together with confidence.
{ # Detected language for a structural component.
@@ -682,8 +709,8 @@
"latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
@@ -796,14 +823,14 @@
#
# static Color* toProto(UIColor* color) {
# CGFloat red, green, blue, alpha;
- # if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
+ # if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
# return nil;
# }
# Color* result = [[Color alloc] init];
# [result setRed:red];
# [result setGreen:green];
# [result setBlue:blue];
- # if (alpha <= 0.9999) {
+ # if (alpha <= 0.9999) {
# [result setAlpha:floatWrapperWithValue(alpha)];
# }
# [result autorelease];
@@ -833,11 +860,11 @@
# };
#
# var rgbToCssColor_ = function(red, green, blue) {
- # var rgbNumber = new Number((red << 16) | (green << 8) | blue);
+ # var rgbNumber = new Number((red << 16) | (green << 8) | blue);
# var hexString = rgbNumber.toString(16);
# var missingZeros = 6 - hexString.length;
# var resultBuilder = ['#'];
- # for (var i = 0; i < missingZeros; i++) {
+ # for (var i = 0; i < missingZeros; i++) {
# resultBuilder.push('0');
# }
# resultBuilder.push(hexString);
@@ -872,6 +899,8 @@
"panAngle": 3.14, # Yaw angle, which indicates the leftward/rightward angle that the face is
# pointing relative to the vertical plane perpendicular to the image. Range
# [-180,180].
+ "sorrowLikelihood": "A String", # Sorrow likelihood.
+ "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
"underExposedLikelihood": "A String", # Under-exposed likelihood.
"detectionConfidence": 3.14, # Detection confidence. Range [0, 1].
"joyLikelihood": "A String", # Joy likelihood.
@@ -887,7 +916,7 @@
"type": "A String", # Face landmark type.
},
],
- "sorrowLikelihood": "A String", # Sorrow likelihood.
+ "surpriseLikelihood": "A String", # Surprise likelihood.
"blurredLikelihood": "A String", # Blurred likelihood.
"tiltAngle": 3.14, # Pitch angle, which indicates the upwards/downwards angle that the face is
# pointing relative to the image's horizontal plane. Range [-180,180].
@@ -919,13 +948,12 @@
# of the face relative to the image vertical about the axis perpendicular to
# the face. Range [-180,180].
"headwearLikelihood": "A String", # Headwear likelihood.
- "surpriseLikelihood": "A String", # Surprise likelihood.
"fdBoundingPoly": { # A bounding polygon for the detected image annotation. # The `fd_bounding_poly` bounding polygon is tighter than the
# `boundingPoly`, and encloses only the skin part of the face. Typically, it
# is used to eliminate the face from any image analysis that detects the
# "amount of skin" visible in an image. It is not based on the
# landmarker results, only on the initial face detection, hence
- # the <code>fd</code> (face detection) prefix.
+ # the <code>fd</code> (face detection) prefix.
"normalizedVertices": [ # The bounding polygon normalized vertices.
{ # A vertex represents a 2D point in the image.
# NOTE: the normalized vertex coordinates are relative to the original image
@@ -942,7 +970,6 @@
},
],
},
- "landmarkingConfidence": 3.14, # Face landmarking confidence. Range [0, 1].
},
],
"productSearchResults": { # Results for a product search request. # If present, product search has completed successfully.
@@ -952,6 +979,16 @@
# matches in the union of all the per-product results.
{ # Information about the products similar to a single product in a query
# image.
+ "objectAnnotations": [ # List of generic predictions for the object in the bounding box.
+ { # Prediction for what the object in the bounding box is.
+ "languageCode": "A String", # The BCP-47 language code, such as "en-US" or "sr-Latn". For more
+ # information, see
+ # http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+ "score": 3.14, # Score of the result. Range [0, 1].
+ "mid": "A String", # Object ID that should align with EntityAnnotation mid.
+ "name": "A String", # Object name, expressed in its `language_code` language.
+ },
+ ],
"results": [ # List of results, one for each product match.
{ # Information about a product.
"image": "A String", # The resource name of the image from the product that is the closest match
@@ -967,12 +1004,16 @@
# to be supported soon.
#
# Multiple values can be assigned to the same key. One product may have up to
- # 100 product_labels.
+ # 500 product_labels.
+ #
+ # Notice that the total number of distinct product_labels over all products
+ # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
+ # will refuse to work for that ProductSet.
{ # A product label represented as a key-value pair.
- "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
- # exceed 128 bytes.
"value": "A String", # The value of the label attached to the product. Cannot be empty and
# cannot exceed 128 bytes.
+ "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
+ # exceed 128 bytes.
},
],
"displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
@@ -983,12 +1024,10 @@
# `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
#
# This field is ignored when creating a product.
- "productCategory": "A String", # The category for the product identified by the reference image. This should
+ "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
# be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
# "homegoods", "apparel", and "toys" are still supported, but these should
# not be used for new products.
- #
- # This field is immutable.
"description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
# characters long.
},
@@ -1028,12 +1067,16 @@
# to be supported soon.
#
# Multiple values can be assigned to the same key. One product may have up to
- # 100 product_labels.
+ # 500 product_labels.
+ #
+ # Notice that the total number of distinct product_labels over all products
+ # in one ProductSet cannot exceed 1M, otherwise the product search pipeline
+ # will refuse to work for that ProductSet.
{ # A product label represented as a key-value pair.
- "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
- # exceed 128 bytes.
"value": "A String", # The value of the label attached to the product. Cannot be empty and
# cannot exceed 128 bytes.
+ "key": "A String", # The key of the label attached to the product. Cannot be empty and cannot
+ # exceed 128 bytes.
},
],
"displayName": "A String", # The user-provided name for this Product. Must not be empty. Must be at most
@@ -1044,12 +1087,10 @@
# `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
#
# This field is ignored when creating a product.
- "productCategory": "A String", # The category for the product identified by the reference image. This should
+ "productCategory": "A String", # Immutable. The category for the product identified by the reference image. This should
# be either "homegoods-v2", "apparel-v2", or "toys-v2". The legacy categories
# "homegoods", "apparel", and "toys" are still supported, but these should
# not be used for new products.
- #
- # This field is immutable.
"description": "A String", # User-provided metadata to be stored with this product. Must be at most 4096
# characters long.
},
@@ -1083,8 +1124,8 @@
"latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
@@ -1146,8 +1187,8 @@
"latLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # lat/long location coordinates.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
@@ -1241,19 +1282,24 @@
},
},
],
- "inputConfig": { # The desired input location and metadata. # Information about the file for which this response is generated.
- "content": "A String", # File content, represented as a stream of bytes.
- # Note: As with all `bytes` fields, protobuffers use a pure binary
- # representation, whereas JSON representations use base64.
- #
- # Currently, this field only works for BatchAnnotateFiles requests. It does
- # not work for AsyncBatchAnnotateFiles requests.
- "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
- # "image/gif" are supported. Wildcards are not supported.
- "gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
- "uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
- # Google Cloud Storage object. Wildcards are not currently supported.
- },
+ "error": { # The `Status` type defines a logical error model that is suitable for # If set, represents the error message for the failed request. The
+ # `responses` field will not be set in this case.
+ # different programming environments, including REST APIs and RPC APIs. It is
+ # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+ # three pieces of data: error code, error message, and error details.
+ #
+ # You can find out more about this error model and how to work with it in the
+ # [API Design Guide](https://cloud.google.com/apis/design/errors).
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "details": [ # A list of messages that carry the error details. There is a common set of
+ # message types for APIs to use.
+ {
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ ],
},
},
],
@@ -1261,7 +1307,7 @@
</div>
<div class="method">
- <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body, x__xgafv=None)</code>
+ <code class="details" id="asyncBatchAnnotate">asyncBatchAnnotate(body=None, x__xgafv=None)</code>
<pre>Run asynchronous image detection and annotation for a list of generic
files, such as PDF files, which may contain multiple pages and multiple
images per page. Progress and results can be retrieved through the
@@ -1270,28 +1316,28 @@
`Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results).
Args:
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # Multiple async file annotation requests are batched into a single service
# call.
- "requests": [ # Individual async file annotation requests for this batch.
+ "requests": [ # Required. Individual async file annotation requests for this batch.
{ # An offline file annotation request.
"imageContext": { # Image context and/or feature-specific parameters. # Additional context that may accompany the image(s) in the file.
"latLongRect": { # Rectangle determined by min and max `LatLng` pairs. # Not used.
"minLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Min lat/long pair.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
"maxLatLng": { # An object representing a latitude/longitude pair. This is expressed as a pair # Max lat/long pair.
# of doubles representing degrees latitude and degrees longitude. Unless
# specified otherwise, this must conform to the
- # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
- # standard</a>. Values must be within normalized ranges.
+ # <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
+ # standard</a>. Values must be within normalized ranges.
"latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
"longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
},
@@ -1303,10 +1349,19 @@
# setting a hint will help get better results (although it will be a
# significant hindrance if the hint is wrong). Text detection returns an
# error if one or more of the specified languages is not one of the
- # [supported languages](/vision/docs/languages).
+ # [supported languages](https://cloud.google.com/vision/docs/languages).
"A String",
],
"productSearchParams": { # Parameters for a product search request. # Parameters for product search.
+ "productCategories": [ # The list of product categories to search in. Currently, we only consider
+ # the first category, and either "homegoods-v2", "apparel-v2", "toys-v2",
+ # "packagedgoods-v1", or "general-v1" should be specified. The legacy
+ # categories "homegoods", "apparel", and "toys" are still supported but will
+ # be deprecated. For new products, please use "homegoods-v2", "apparel-v2",
+ # or "toys-v2" for better product search accuracy. It is recommended to
+ # migrate existing products to these categories as well.
+ "A String",
+ ],
"filter": "A String", # The filtering expression. This can be used to restrict search results based
# on Product labels. We currently support an AND of OR of key-value
# expressions, where each expression within an OR must have the same key. An
@@ -1315,21 +1370,12 @@
# For example, "(color = red OR color = blue) AND brand = Google" is
# acceptable, but "(color = red OR brand = Google)" is not acceptable.
# "color: red" is not acceptable because it uses a ':' instead of an '='.
- "productCategories": [ # The list of product categories to search in. Currently, we only consider
- # the first category, and either "homegoods-v2", "apparel-v2", or "toys-v2"
- # should be specified. The legacy categories "homegoods", "apparel", and
- # "toys" are still supported but will be deprecated. For new products, please
- # use "homegoods-v2", "apparel-v2", or "toys-v2" for better product search
- # accuracy. It is recommended to migrate existing products to these
- # categories as well.
- "A String",
- ],
"productSet": "A String", # The resource name of a ProductSet to be searched for similar images.
#
# Format is:
# `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
"boundingPoly": { # A bounding polygon for the detected image annotation. # The bounding polygon around the area of interest in the image.
- # Optional. If it is not specified, system discretion will be applied.
+ # If it is not specified, system discretion will be applied.
"normalizedVertices": [ # The bounding polygon normalized vertices.
{ # A vertex represents a 2D point in the image.
# NOTE: the normalized vertex coordinates are relative to the original image
@@ -1399,14 +1445,14 @@
},
},
"inputConfig": { # The desired input location and metadata. # Required. Information about the input file.
+ "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
+ # "image/gif" are supported. Wildcards are not supported.
"content": "A String", # File content, represented as a stream of bytes.
# Note: As with all `bytes` fields, protobuffers use a pure binary
# representation, whereas JSON representations use base64.
#
# Currently, this field only works for BatchAnnotateFiles requests. It does
# not work for AsyncBatchAnnotateFiles requests.
- "mimeType": "A String", # The type of the file. Currently only "application/pdf", "image/tiff" and
- # "image/gif" are supported. Wildcards are not supported.
"gcsSource": { # The Google Cloud Storage location where the input will be read from. # The Google Cloud Storage location to read the input from.
"uri": "A String", # Google Cloud Storage URI for the input file. This must only be a
# Google Cloud Storage object. Wildcards are not currently supported.
@@ -1426,6 +1472,18 @@
],
},
],
+ "parent": "A String", # Optional. Target project and location to make a call.
+ #
+ # Format: `projects/{project-id}/locations/{location-id}`.
+ #
+ # If no parent is specified, a region will be chosen automatically.
+ #
+ # Supported location-ids:
+ # `us`: USA country only,
+ # `asia`: East asia areas, like Japan, Taiwan,
+ # `eu`: The European Union.
+ #
+ # Example: `projects/project-A/locations/eu`.
}
x__xgafv: string, V1 error format.