Bu Sun Kim | 673ec5c | 2020-11-16 11:05:03 -0700 | [diff] [blame] | 1 | <html><body> |
| 2 | <style> |
| 3 | |
| 4 | body, h1, h2, h3, div, span, p, pre, a { |
| 5 | margin: 0; |
| 6 | padding: 0; |
| 7 | border: 0; |
| 8 | font-weight: inherit; |
| 9 | font-style: inherit; |
| 10 | font-size: 100%; |
| 11 | font-family: inherit; |
| 12 | vertical-align: baseline; |
| 13 | } |
| 14 | |
| 15 | body { |
| 16 | font-size: 13px; |
| 17 | padding: 1em; |
| 18 | } |
| 19 | |
| 20 | h1 { |
| 21 | font-size: 26px; |
| 22 | margin-bottom: 1em; |
| 23 | } |
| 24 | |
| 25 | h2 { |
| 26 | font-size: 24px; |
| 27 | margin-bottom: 1em; |
| 28 | } |
| 29 | |
| 30 | h3 { |
| 31 | font-size: 20px; |
| 32 | margin-bottom: 1em; |
| 33 | margin-top: 1em; |
| 34 | } |
| 35 | |
| 36 | pre, code { |
| 37 | line-height: 1.5; |
| 38 | font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace; |
| 39 | } |
| 40 | |
| 41 | pre { |
| 42 | margin-top: 0.5em; |
| 43 | } |
| 44 | |
| 45 | h1, h2, h3, p { |
| 46 | font-family: Arial, sans serif; |
| 47 | } |
| 48 | |
| 49 | h1, h2, h3 { |
| 50 | border-bottom: solid #CCC 1px; |
| 51 | } |
| 52 | |
| 53 | .toc_element { |
| 54 | margin-top: 0.5em; |
| 55 | } |
| 56 | |
| 57 | .firstline { |
| 58 | margin-left: 2 em; |
| 59 | } |
| 60 | |
| 61 | .method { |
| 62 | margin-top: 1em; |
| 63 | border: solid 1px #CCC; |
| 64 | padding: 1em; |
| 65 | background: #EEE; |
| 66 | } |
| 67 | |
| 68 | .details { |
| 69 | font-weight: bold; |
| 70 | font-size: 14px; |
| 71 | } |
| 72 | |
| 73 | </style> |
| 74 | |
| 75 | <h1><a href="datalabeling_v1beta1.html">Data Labeling API</a> . <a href="datalabeling_v1beta1.projects.html">projects</a> . <a href="datalabeling_v1beta1.projects.datasets.html">datasets</a> . <a href="datalabeling_v1beta1.projects.datasets.evaluations.html">evaluations</a></h1> |
| 76 | <h2>Instance Methods</h2> |
| 77 | <p class="toc_element"> |
| 78 | <code><a href="datalabeling_v1beta1.projects.datasets.evaluations.exampleComparisons.html">exampleComparisons()</a></code> |
| 79 | </p> |
| 80 | <p class="firstline">Returns the exampleComparisons Resource.</p> |
| 81 | |
| 82 | <p class="toc_element"> |
| 83 | <code><a href="#close">close()</a></code></p> |
| 84 | <p class="firstline">Close httplib2 connections.</p> |
| 85 | <p class="toc_element"> |
| 86 | <code><a href="#get">get(name, x__xgafv=None)</a></code></p> |
| 87 | <p class="firstline">Gets an evaluation by resource name (to search, use projects.evaluations.search).</p> |
| 88 | <h3>Method Details</h3> |
| 89 | <div class="method"> |
| 90 | <code class="details" id="close">close()</code> |
| 91 | <pre>Close httplib2 connections.</pre> |
| 92 | </div> |
| 93 | |
| 94 | <div class="method"> |
| 95 | <code class="details" id="get">get(name, x__xgafv=None)</code> |
| 96 | <pre>Gets an evaluation by resource name (to search, use projects.evaluations.search). |
| 97 | |
| 98 | Args: |
| 99 | name: string, Required. Name of the evaluation. Format: "projects/{project_id}/datasets/ {dataset_id}/evaluations/{evaluation_id}' (required) |
| 100 | x__xgafv: string, V1 error format. |
| 101 | Allowed values |
| 102 | 1 - v1 error format |
| 103 | 2 - v2 error format |
| 104 | |
| 105 | Returns: |
| 106 | An object of the form: |
| 107 | |
| 108 | { # Describes an evaluation between a machine learning model's predictions and ground truth labels. Created when an EvaluationJob runs successfully. |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 109 | "name": "A String", # Output only. Resource name of an evaluation. The name has the following format: "projects/{project_id}/datasets/{dataset_id}/evaluations/ {evaluation_id}' |
| 110 | "createTime": "A String", # Output only. Timestamp for when this evaluation was created. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 111 | "annotationType": "A String", # Output only. Type of task that the model version being evaluated performs, as defined in the evaluationJobConfig.inputConfig.annotationType field of the evaluation job that created this evaluation. |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 112 | "config": { # Configuration details used for calculating evaluation metrics and creating an Evaluation. # Output only. Options used in the evaluation job that created this evaluation. |
| 113 | "boundingBoxEvaluationOptions": { # Options regarding evaluation between bounding boxes. # Only specify this field if the related model performs image object detection (`IMAGE_BOUNDING_BOX_ANNOTATION`). Describes how to evaluate bounding boxes. |
| 114 | "iouThreshold": 3.14, # Minimum [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) required for 2 bounding boxes to be considered a match. This must be a number between 0 and 1. |
| 115 | }, |
| 116 | }, |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 117 | "evaluationMetrics": { # Output only. Metrics comparing predictions to ground truth labels. |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 118 | "classificationMetrics": { # Metrics calculated for a classification model. |
| 119 | "prCurve": { # Precision-recall curve based on ground truth labels, predicted labels, and scores for the predicted labels. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 120 | "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the label for which the precision-recall curve calculated. If this field is empty, that means the precision-recall curve is an aggregate curve for all labels. |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 121 | "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 122 | "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values. |
| 123 | "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 124 | }, |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 125 | "areaUnderCurve": 3.14, # Area under the precision-recall curve. Not to be confused with area under a receiver operating characteristic (ROC) curve. |
| 126 | "confidenceMetricsEntries": [ # Entries that make up the precision-recall graph. Each entry is a "point" on the graph drawn for a different `confidence_threshold`. |
| 127 | { |
| 128 | "recall": 3.14, # Recall value. |
| 129 | "f1ScoreAt1": 3.14, # The harmonic mean of recall_at1 and precision_at1. |
| 130 | "f1Score": 3.14, # Harmonic mean of recall and precision. |
| 131 | "f1ScoreAt5": 3.14, # The harmonic mean of recall_at5 and precision_at5. |
| 132 | "precisionAt1": 3.14, # Precision value for entries with label that has highest score. |
| 133 | "confidenceThreshold": 3.14, # Threshold used for this entry. For classification tasks, this is a classification threshold: a predicted label is categorized as positive or negative (in the context of this point on the PR curve) based on whether the label's score meets this threshold. For image object detection (bounding box) tasks, this is the [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) threshold for the context of this point on the PR curve. |
| 134 | "precision": 3.14, # Precision value. |
| 135 | "precisionAt5": 3.14, # Precision value for entries with label that has highest 5 scores. |
| 136 | "recallAt5": 3.14, # Recall value for entries with label that has highest 5 scores. |
| 137 | "recallAt1": 3.14, # Recall value for entries with label that has highest score. |
| 138 | }, |
| 139 | ], |
| 140 | "meanAveragePrecision": 3.14, # Mean average prcision of this curve. |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 141 | }, |
| 142 | "confusionMatrix": { # Confusion matrix of the model running the classification. Only applicable when the metrics entry aggregates multiple labels. Not applicable when the entry is for a single label. # Confusion matrix of predicted labels vs. ground truth labels. |
| 143 | "row": [ |
| 144 | { # A row in the confusion matrix. Each entry in this row has the same ground truth label. |
| 145 | "entries": [ # A list of the confusion matrix entries. One entry for each possible predicted label. |
| 146 | { |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 147 | "itemCount": 42, # Number of items predicted to have this label. (The ground truth label for these items is the `Row.annotationSpec` of this entry's parent.) |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 148 | "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of a predicted label. |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 149 | "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long. |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 150 | "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 151 | "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters. |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 152 | }, |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 153 | }, |
| 154 | ], |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 155 | "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the ground truth label for this row. |
| 156 | "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long. |
| 157 | "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values. |
| 158 | "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters. |
| 159 | }, |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 160 | }, |
| 161 | ], |
| 162 | }, |
| 163 | }, |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 164 | "objectDetectionMetrics": { # Metrics calculated for an image object detection (bounding box) model. |
| 165 | "prCurve": { # Precision-recall curve. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 166 | "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the label for which the precision-recall curve calculated. If this field is empty, that means the precision-recall curve is an aggregate curve for all labels. |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 167 | "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 168 | "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values. |
| 169 | "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 170 | }, |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 171 | "areaUnderCurve": 3.14, # Area under the precision-recall curve. Not to be confused with area under a receiver operating characteristic (ROC) curve. |
| 172 | "confidenceMetricsEntries": [ # Entries that make up the precision-recall graph. Each entry is a "point" on the graph drawn for a different `confidence_threshold`. |
| 173 | { |
| 174 | "recall": 3.14, # Recall value. |
| 175 | "f1ScoreAt1": 3.14, # The harmonic mean of recall_at1 and precision_at1. |
| 176 | "f1Score": 3.14, # Harmonic mean of recall and precision. |
| 177 | "f1ScoreAt5": 3.14, # The harmonic mean of recall_at5 and precision_at5. |
| 178 | "precisionAt1": 3.14, # Precision value for entries with label that has highest score. |
| 179 | "confidenceThreshold": 3.14, # Threshold used for this entry. For classification tasks, this is a classification threshold: a predicted label is categorized as positive or negative (in the context of this point on the PR curve) based on whether the label's score meets this threshold. For image object detection (bounding box) tasks, this is the [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) threshold for the context of this point on the PR curve. |
| 180 | "precision": 3.14, # Precision value. |
| 181 | "precisionAt5": 3.14, # Precision value for entries with label that has highest 5 scores. |
| 182 | "recallAt5": 3.14, # Recall value for entries with label that has highest 5 scores. |
| 183 | "recallAt1": 3.14, # Recall value for entries with label that has highest score. |
| 184 | }, |
| 185 | ], |
| 186 | "meanAveragePrecision": 3.14, # Mean average prcision of this curve. |
Yoshi Automation Bot | b6971b0 | 2020-11-26 17:16:03 -0800 | [diff] [blame] | 187 | }, |
| 188 | }, |
Yoshi Automation Bot | c2228be | 2020-11-24 15:48:03 -0800 | [diff] [blame] | 189 | }, |
Bu Sun Kim | 673ec5c | 2020-11-16 11:05:03 -0700 | [diff] [blame] | 190 | "evaluatedItemCount": "A String", # Output only. The number of items in the ground truth dataset that were used for this evaluation. Only populated when the evaulation is for certain AnnotationTypes. |
Yoshi Automation Bot | 0bf565c | 2020-12-09 08:56:03 -0800 | [diff] [blame^] | 191 | "evaluationJobRunTime": "A String", # Output only. Timestamp for when the evaluation job that created this evaluation ran. |
Bu Sun Kim | 673ec5c | 2020-11-16 11:05:03 -0700 | [diff] [blame] | 192 | }</pre> |
| 193 | </div> |
| 194 | |
| 195 | </body></html> |