blob: baff556cdc998b4217bb6159140419639338a219 [file] [log] [blame]
Bu Sun Kim673ec5c2020-11-16 11:05:03 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="datalabeling_v1beta1.html">Data Labeling API</a> . <a href="datalabeling_v1beta1.projects.html">projects</a> . <a href="datalabeling_v1beta1.projects.datasets.html">datasets</a> . <a href="datalabeling_v1beta1.projects.datasets.evaluations.html">evaluations</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="datalabeling_v1beta1.projects.datasets.evaluations.exampleComparisons.html">exampleComparisons()</a></code>
79</p>
80<p class="firstline">Returns the exampleComparisons Resource.</p>
81
82<p class="toc_element">
83 <code><a href="#close">close()</a></code></p>
84<p class="firstline">Close httplib2 connections.</p>
85<p class="toc_element">
86 <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
87<p class="firstline">Gets an evaluation by resource name (to search, use projects.evaluations.search).</p>
88<h3>Method Details</h3>
89<div class="method">
90 <code class="details" id="close">close()</code>
91 <pre>Close httplib2 connections.</pre>
92</div>
93
94<div class="method">
95 <code class="details" id="get">get(name, x__xgafv=None)</code>
96 <pre>Gets an evaluation by resource name (to search, use projects.evaluations.search).
97
98Args:
99 name: string, Required. Name of the evaluation. Format: &quot;projects/{project_id}/datasets/ {dataset_id}/evaluations/{evaluation_id}&#x27; (required)
100 x__xgafv: string, V1 error format.
101 Allowed values
102 1 - v1 error format
103 2 - v2 error format
104
105Returns:
106 An object of the form:
107
108 { # Describes an evaluation between a machine learning model&#x27;s predictions and ground truth labels. Created when an EvaluationJob runs successfully.
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800109 &quot;annotationType&quot;: &quot;A String&quot;, # Output only. Type of task that the model version being evaluated performs, as defined in the evaluationJobConfig.inputConfig.annotationType field of the evaluation job that created this evaluation.
110 &quot;config&quot;: { # Configuration details used for calculating evaluation metrics and creating an Evaluation. # Output only. Options used in the evaluation job that created this evaluation.
111 &quot;boundingBoxEvaluationOptions&quot;: { # Options regarding evaluation between bounding boxes. # Only specify this field if the related model performs image object detection (`IMAGE_BOUNDING_BOX_ANNOTATION`). Describes how to evaluate bounding boxes.
112 &quot;iouThreshold&quot;: 3.14, # Minimum [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) required for 2 bounding boxes to be considered a match. This must be a number between 0 and 1.
Yoshi Automation Bot0bf565c2020-12-09 08:56:03 -0800113 },
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800114 },
115 &quot;createTime&quot;: &quot;A String&quot;, # Output only. Timestamp for when this evaluation was created.
116 &quot;evaluatedItemCount&quot;: &quot;A String&quot;, # Output only. The number of items in the ground truth dataset that were used for this evaluation. Only populated when the evaulation is for certain AnnotationTypes.
117 &quot;evaluationJobRunTime&quot;: &quot;A String&quot;, # Output only. Timestamp for when the evaluation job that created this evaluation ran.
118 &quot;evaluationMetrics&quot;: { # Output only. Metrics comparing predictions to ground truth labels.
119 &quot;classificationMetrics&quot;: { # Metrics calculated for a classification model.
120 &quot;confusionMatrix&quot;: { # Confusion matrix of the model running the classification. Only applicable when the metrics entry aggregates multiple labels. Not applicable when the entry is for a single label. # Confusion matrix of predicted labels vs. ground truth labels.
121 &quot;row&quot;: [
122 { # A row in the confusion matrix. Each entry in this row has the same ground truth label.
123 &quot;annotationSpec&quot;: { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the ground truth label for this row.
124 &quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
125 &quot;displayName&quot;: &quot;A String&quot;, # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
126 &quot;index&quot;: 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: &quot;dog&quot;, index: 0 }` and one AnnotationSpec with `{ display_name: &quot;cat&quot;, index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
Yoshi Automation Bot0bf565c2020-12-09 08:56:03 -0800127 },
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800128 &quot;entries&quot;: [ # A list of the confusion matrix entries. One entry for each possible predicted label.
129 {
130 &quot;annotationSpec&quot;: { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of a predicted label.
131 &quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
132 &quot;displayName&quot;: &quot;A String&quot;, # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
133 &quot;index&quot;: 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: &quot;dog&quot;, index: 0 }` and one AnnotationSpec with `{ display_name: &quot;cat&quot;, index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
Yoshi Automation Botc2228be2020-11-24 15:48:03 -0800134 },
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800135 &quot;itemCount&quot;: 42, # Number of items predicted to have this label. (The ground truth label for these items is the `Row.annotationSpec` of this entry&#x27;s parent.)
Yoshi Automation Bot0bf565c2020-12-09 08:56:03 -0800136 },
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800137 ],
Yoshi Automation Botb6971b02020-11-26 17:16:03 -0800138 },
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800139 ],
140 },
141 &quot;prCurve&quot;: { # Precision-recall curve based on ground truth labels, predicted labels, and scores for the predicted labels.
142 &quot;annotationSpec&quot;: { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the label for which the precision-recall curve calculated. If this field is empty, that means the precision-recall curve is an aggregate curve for all labels.
143 &quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
144 &quot;displayName&quot;: &quot;A String&quot;, # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
145 &quot;index&quot;: 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: &quot;dog&quot;, index: 0 }` and one AnnotationSpec with `{ display_name: &quot;cat&quot;, index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
Yoshi Automation Botb6971b02020-11-26 17:16:03 -0800146 },
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800147 &quot;areaUnderCurve&quot;: 3.14, # Area under the precision-recall curve. Not to be confused with area under a receiver operating characteristic (ROC) curve.
148 &quot;confidenceMetricsEntries&quot;: [ # Entries that make up the precision-recall graph. Each entry is a &quot;point&quot; on the graph drawn for a different `confidence_threshold`.
149 {
150 &quot;confidenceThreshold&quot;: 3.14, # Threshold used for this entry. For classification tasks, this is a classification threshold: a predicted label is categorized as positive or negative (in the context of this point on the PR curve) based on whether the label&#x27;s score meets this threshold. For image object detection (bounding box) tasks, this is the [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) threshold for the context of this point on the PR curve.
151 &quot;f1Score&quot;: 3.14, # Harmonic mean of recall and precision.
152 &quot;f1ScoreAt1&quot;: 3.14, # The harmonic mean of recall_at1 and precision_at1.
153 &quot;f1ScoreAt5&quot;: 3.14, # The harmonic mean of recall_at5 and precision_at5.
154 &quot;precision&quot;: 3.14, # Precision value.
155 &quot;precisionAt1&quot;: 3.14, # Precision value for entries with label that has highest score.
156 &quot;precisionAt5&quot;: 3.14, # Precision value for entries with label that has highest 5 scores.
157 &quot;recall&quot;: 3.14, # Recall value.
158 &quot;recallAt1&quot;: 3.14, # Recall value for entries with label that has highest score.
159 &quot;recallAt5&quot;: 3.14, # Recall value for entries with label that has highest 5 scores.
160 },
161 ],
162 &quot;meanAveragePrecision&quot;: 3.14, # Mean average prcision of this curve.
Yoshi Automation Botb6971b02020-11-26 17:16:03 -0800163 },
Yoshi Automation Botc2228be2020-11-24 15:48:03 -0800164 },
Yoshi Automation Botcc94ec82021-01-15 07:10:04 -0800165 &quot;objectDetectionMetrics&quot;: { # Metrics calculated for an image object detection (bounding box) model.
166 &quot;prCurve&quot;: { # Precision-recall curve.
167 &quot;annotationSpec&quot;: { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the label for which the precision-recall curve calculated. If this field is empty, that means the precision-recall curve is an aggregate curve for all labels.
168 &quot;description&quot;: &quot;A String&quot;, # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
169 &quot;displayName&quot;: &quot;A String&quot;, # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
170 &quot;index&quot;: 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: &quot;dog&quot;, index: 0 }` and one AnnotationSpec with `{ display_name: &quot;cat&quot;, index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
171 },
172 &quot;areaUnderCurve&quot;: 3.14, # Area under the precision-recall curve. Not to be confused with area under a receiver operating characteristic (ROC) curve.
173 &quot;confidenceMetricsEntries&quot;: [ # Entries that make up the precision-recall graph. Each entry is a &quot;point&quot; on the graph drawn for a different `confidence_threshold`.
174 {
175 &quot;confidenceThreshold&quot;: 3.14, # Threshold used for this entry. For classification tasks, this is a classification threshold: a predicted label is categorized as positive or negative (in the context of this point on the PR curve) based on whether the label&#x27;s score meets this threshold. For image object detection (bounding box) tasks, this is the [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) threshold for the context of this point on the PR curve.
176 &quot;f1Score&quot;: 3.14, # Harmonic mean of recall and precision.
177 &quot;f1ScoreAt1&quot;: 3.14, # The harmonic mean of recall_at1 and precision_at1.
178 &quot;f1ScoreAt5&quot;: 3.14, # The harmonic mean of recall_at5 and precision_at5.
179 &quot;precision&quot;: 3.14, # Precision value.
180 &quot;precisionAt1&quot;: 3.14, # Precision value for entries with label that has highest score.
181 &quot;precisionAt5&quot;: 3.14, # Precision value for entries with label that has highest 5 scores.
182 &quot;recall&quot;: 3.14, # Recall value.
183 &quot;recallAt1&quot;: 3.14, # Recall value for entries with label that has highest score.
184 &quot;recallAt5&quot;: 3.14, # Recall value for entries with label that has highest 5 scores.
185 },
186 ],
187 &quot;meanAveragePrecision&quot;: 3.14, # Mean average prcision of this curve.
188 },
189 },
190 },
191 &quot;name&quot;: &quot;A String&quot;, # Output only. Resource name of an evaluation. The name has the following format: &quot;projects/{project_id}/datasets/{dataset_id}/evaluations/ {evaluation_id}&#x27;
192}</pre>
Bu Sun Kim673ec5c2020-11-16 11:05:03 -0700193</div>
194
195</body></html>