chore: update docs/dyn (#1162)
This PR was generated using Autosynth. :rainbow:
Synth log will be available here:
https://source.cloud.google.com/results/invocations/b5e48daa-1759-436b-9fe7-ffce1482b520/targets
- [ ] To automatically regenerate this PR, check this box.
diff --git a/docs/dyn/datalabeling_v1beta1.projects.datasets.html b/docs/dyn/datalabeling_v1beta1.projects.datasets.html
index af6b827..3e98e47 100644
--- a/docs/dyn/datalabeling_v1beta1.projects.datasets.html
+++ b/docs/dyn/datalabeling_v1beta1.projects.datasets.html
@@ -144,37 +144,37 @@
The object takes the form of:
{ # Request message for CreateDataset.
- "dataset": { # Dataset is the resource to hold your data. You can request multiple labeling tasks for a dataset while each one will generate an AnnotatedDataset. # Required. The dataset to be created.
- "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
- { # The configuration of input data, including data type, location, etc.
- "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
- "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
- },
- "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
- },
- "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
- "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
- },
- "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
- "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
- "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
- },
- "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "dataset": { # Dataset is the resource to hold your data. You can request multiple labeling tasks for a dataset while each one will generate an AnnotatedDataset. # Required. The dataset to be created.
+ "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
+ "A String",
+ ],
+ "createTime": "A String", # Output only. Time the dataset is created.
+ "dataItemCount": "A String", # Output only. The number of data items in the dataset.
+ "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
+ "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
+ "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
+ { # The configuration of input data, including data type, location, etc.
+ "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
},
- ],
- "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
- "createTime": "A String", # Output only. Time the dataset is created.
- "dataItemCount": "A String", # Output only. The number of data items in the dataset.
- "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
- "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
- "A String",
- ],
- "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
- "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
- },
- }
+ "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
+ "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
+ },
+ "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
+ "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
+ "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
+ },
+ "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
+ "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
+ },
+ },
+ ],
+ "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
+ "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
+ },
+}
x__xgafv: string, V1 error format.
Allowed values
@@ -185,35 +185,35 @@
An object of the form:
{ # Dataset is the resource to hold your data. You can request multiple labeling tasks for a dataset while each one will generate an AnnotatedDataset.
- "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
- { # The configuration of input data, including data type, location, etc.
- "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
- "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
- },
- "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
- },
- "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
- "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
- },
- "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
- "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
- "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
- },
- "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
+ "A String",
+ ],
+ "createTime": "A String", # Output only. Time the dataset is created.
+ "dataItemCount": "A String", # Output only. The number of data items in the dataset.
+ "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
+ "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
+ "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
+ { # The configuration of input data, including data type, location, etc.
+ "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
},
- ],
- "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
- "createTime": "A String", # Output only. Time the dataset is created.
- "dataItemCount": "A String", # Output only. The number of data items in the dataset.
- "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
- "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
- "A String",
- ],
- "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
- "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
- }</pre>
+ "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
+ "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
+ },
+ "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
+ "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
+ "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
+ },
+ "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
+ "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
+ },
+ },
+ ],
+ "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
+ "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
+}</pre>
</div>
<div class="method">
@@ -231,7 +231,7 @@
An object of the form:
{ # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
- }</pre>
+}</pre>
</div>
<div class="method">
@@ -244,19 +244,19 @@
The object takes the form of:
{ # Request message for ExportData API.
- "annotatedDataset": "A String", # Required. Annotated dataset resource name. DataItem in Dataset and their annotations in specified annotated dataset will be exported. It's in format of projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/ {annotated_dataset_id}
- "userEmailAddress": "A String", # Email of the user who started the export task and should be notified by email. If empty no notification will be sent.
- "outputConfig": { # The configuration of output data. # Required. Specify the output destination.
- "gcsDestination": { # Export destination of the data.Only gcs path is allowed in output_uri. # Output to a file in Cloud Storage. Should be used for labeling output other than image segmentation.
- "outputUri": "A String", # Required. The output uri of destination file.
- "mimeType": "A String", # Required. The format of the gcs destination. Only "text/csv" and "application/json" are supported.
- },
- "gcsFolderDestination": { # Export folder destination of the data. # Output to a folder in Cloud Storage. Should be used for image segmentation or document de-identification labeling outputs.
- "outputFolderUri": "A String", # Required. Cloud Storage directory to export data to.
- },
+ "annotatedDataset": "A String", # Required. Annotated dataset resource name. DataItem in Dataset and their annotations in specified annotated dataset will be exported. It's in format of projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/ {annotated_dataset_id}
+ "filter": "A String", # Optional. Filter is not supported at this moment.
+ "outputConfig": { # The configuration of output data. # Required. Specify the output destination.
+ "gcsDestination": { # Export destination of the data.Only gcs path is allowed in output_uri. # Output to a file in Cloud Storage. Should be used for labeling output other than image segmentation.
+ "mimeType": "A String", # Required. The format of the gcs destination. Only "text/csv" and "application/json" are supported.
+ "outputUri": "A String", # Required. The output uri of destination file.
},
- "filter": "A String", # Optional. Filter is not supported at this moment.
- }
+ "gcsFolderDestination": { # Export folder destination of the data. # Output to a folder in Cloud Storage. Should be used for image segmentation or document de-identification labeling outputs.
+ "outputFolderUri": "A String", # Required. Cloud Storage directory to export data to.
+ },
+ },
+ "userEmailAddress": "A String", # Email of the user who started the export task and should be notified by email. If empty no notification will be sent.
+}
x__xgafv: string, V1 error format.
Allowed values
@@ -267,24 +267,24 @@
An object of the form:
{ # This resource represents a long-running operation that is the result of a network API call.
- "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
- "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
- "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
- "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
- {
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- ],
- },
- "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- }</pre>
+ "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+ "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+ {
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ ],
+ "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+ },
+ "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+ "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+}</pre>
</div>
<div class="method">
@@ -302,35 +302,35 @@
An object of the form:
{ # Dataset is the resource to hold your data. You can request multiple labeling tasks for a dataset while each one will generate an AnnotatedDataset.
- "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
- { # The configuration of input data, including data type, location, etc.
- "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
- "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
- },
- "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
- },
- "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
- "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
- },
- "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
- "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
- "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
- },
- "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
+ "A String",
+ ],
+ "createTime": "A String", # Output only. Time the dataset is created.
+ "dataItemCount": "A String", # Output only. The number of data items in the dataset.
+ "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
+ "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
+ "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
+ { # The configuration of input data, including data type, location, etc.
+ "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
},
- ],
- "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
- "createTime": "A String", # Output only. Time the dataset is created.
- "dataItemCount": "A String", # Output only. The number of data items in the dataset.
- "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
- "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
- "A String",
- ],
- "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
- "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
- }</pre>
+ "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
+ "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
+ },
+ "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
+ "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
+ "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
+ },
+ "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
+ "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
+ },
+ },
+ ],
+ "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
+ "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
+}</pre>
</div>
<div class="method">
@@ -343,25 +343,25 @@
The object takes the form of:
{ # Request message for ImportData API.
- "inputConfig": { # The configuration of input data, including data type, location, etc. # Required. Specify the input source of the data.
- "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
- "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
- },
- "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
- },
- "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
- "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
- },
- "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
- "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
- "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
- },
- "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "inputConfig": { # The configuration of input data, including data type, location, etc. # Required. Specify the input source of the data.
+ "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
},
- "userEmailAddress": "A String", # Email of the user who started the import task and should be notified by email. If empty no notification will be sent.
- }
+ "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
+ "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
+ },
+ "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
+ "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
+ "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
+ },
+ "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
+ "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
+ },
+ },
+ "userEmailAddress": "A String", # Email of the user who started the import task and should be notified by email. If empty no notification will be sent.
+}
x__xgafv: string, V1 error format.
Allowed values
@@ -372,24 +372,24 @@
An object of the form:
{ # This resource represents a long-running operation that is the result of a network API call.
- "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
- "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
- "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
- "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
- {
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- ],
- },
- "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- }</pre>
+ "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+ "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+ {
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ ],
+ "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+ },
+ "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+ "response": { # The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+}</pre>
</div>
<div class="method">
@@ -410,40 +410,40 @@
An object of the form:
{ # Results of listing datasets within a project.
- "datasets": [ # The list of datasets to return.
- { # Dataset is the resource to hold your data. You can request multiple labeling tasks for a dataset while each one will generate an AnnotatedDataset.
- "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
- { # The configuration of input data, including data type, location, etc.
- "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
- "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
- },
- "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
- "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
- },
- "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
- "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
- },
- "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
- "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
- "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
- },
- "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "datasets": [ # The list of datasets to return.
+ { # Dataset is the resource to hold your data. You can request multiple labeling tasks for a dataset while each one will generate an AnnotatedDataset.
+ "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
+ "A String",
+ ],
+ "createTime": "A String", # Output only. Time the dataset is created.
+ "dataItemCount": "A String", # Output only. The number of data items in the dataset.
+ "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
+ "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
+ "inputConfigs": [ # Output only. This is populated with the original input configs where ImportData is called. It is available only after the clients import data to this dataset.
+ { # The configuration of input data, including data type, location, etc.
+ "annotationType": "A String", # Optional. The type of annotation to be performed on this data. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "bigquerySource": { # The BigQuery location for input data. If used in an EvaluationJob, this is where the service saves the prediction input and output sampled from the model version. # Source located in BigQuery. You must specify this field if you are using this InputConfig in an EvaluationJob.
+ "inputUri": "A String", # Required. BigQuery URI to a table, up to 2,000 characters long. If you specify the URI of a table that does not exist, Data Labeling Service creates a table at the URI with the correct schema when you create your EvaluationJob. If you specify the URI of a table that already exists, it must have the [correct schema](/ml-engine/docs/continuous-evaluation/create-job#table-schema). Provide the table URI in the following format: "bq://{your_project_id}/ {your_dataset_name}/{your_table_name}" [Learn more](/ml-engine/docs/continuous-evaluation/create-job#table-schema).
},
- ],
- "description": "A String", # Optional. User-provided description of the annotation specification set. The description can be up to 10000 characters long.
- "createTime": "A String", # Output only. Time the dataset is created.
- "dataItemCount": "A String", # Output only. The number of data items in the dataset.
- "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
- "blockingResources": [ # Output only. The names of any related resources that are blocking changes to the dataset.
- "A String",
- ],
- "displayName": "A String", # Required. The display name of the dataset. Maximum of 64 characters.
- "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
- },
- ],
- "nextPageToken": "A String", # A token to retrieve next page of results.
- }</pre>
+ "classificationMetadata": { # Metadata for classification annotations. # Optional. Metadata about annotations for the input. You must specify this field if you are using this InputConfig in an EvaluationJob for a model version that performs classification.
+ "isMultiLabel": True or False, # Whether the classification task is multi-label or not.
+ },
+ "dataType": "A String", # Required. Data type must be specifed when user tries to import data.
+ "gcsSource": { # Source of the Cloud Storage file to be imported. # Source located in Cloud Storage.
+ "inputUri": "A String", # Required. The input URI of source file. This must be a Cloud Storage path (`gs://...`).
+ "mimeType": "A String", # Required. The format of the source file. Only "text/csv" is supported.
+ },
+ "textMetadata": { # Metadata for the text. # Required for text import, as language code must be specified.
+ "languageCode": "A String", # The language of this text, as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt). Default value is en-US.
+ },
+ },
+ ],
+ "lastMigrateTime": "A String", # Last time that the Dataset is migrated to AI Platform V2. If any of the AnnotatedDataset is migrated, the last_migration_time in Dataset is also updated.
+ "name": "A String", # Output only. Dataset resource name, format is: projects/{project_id}/datasets/{dataset_id}
+ },
+ ],
+ "nextPageToken": "A String", # A token to retrieve next page of results.
+}</pre>
</div>
<div class="method">