docs: update docs (#916)
* fix: re-run script
* test: fix noxfile
diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html
index 91fc1e6..6d1b63c 100644
--- a/docs/dyn/bigquery_v2.models.html
+++ b/docs/dyn/bigquery_v2.models.html
@@ -81,7 +81,7 @@
<code><a href="#get">get(projectId, datasetId, modelId)</a></code></p>
<p class="firstline">Gets the specified model resource by model ID.</p>
<p class="toc_element">
- <code><a href="#list">list(projectId, datasetId, pageToken=None, maxResults=None)</a></code></p>
+ <code><a href="#list">list(projectId, datasetId, maxResults=None, pageToken=None)</a></code></p>
<p class="firstline">Lists all models in the specified dataset. Requires the READER dataset</p>
<p class="toc_element">
<code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -142,14 +142,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -169,14 +169,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -190,6 +190,14 @@
"startTime": "A String", # The start time of this training run.
"results": [ # Output of each iteration run, results.size() <= max_iterations.
{ # Information about a single iteration of the training run.
+ "clusterInfos": [ # Information about top clusters for clustering models.
+ { # Information about a single cluster for clustering model.
+ "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
+ # to each point assigned to the cluster.
+ "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
+ "centroidId": "A String", # Centroid id.
+ },
+ ],
"trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
"evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
"index": 42, # Index of the iteration, 0 based.
@@ -200,31 +208,31 @@
"arimaModelInfo": [ # This message is repeated because there are multiple arima models
# fitted in auto-arima. For non-auto-arima model, its size is one.
{ # Arima model information.
- "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
- "aic": 3.14, # AIC.
- "logLikelihood": 3.14, # Log-likelihood.
- "variance": 3.14, # Variance.
- },
- "timeSeriesId": "A String", # The id to indicate different time series.
- "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
- "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
- 3.14,
- ],
- "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
- 3.14,
- ],
- "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
- },
- "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
- # when d is not 1.
"seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported
# for one time series.
"A String",
],
"nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
- "q": "A String", # Order of the moving-average part.
"d": "A String", # Order of the differencing part.
"p": "A String", # Order of the autoregressive part.
+ "q": "A String", # Order of the moving-average part.
+ },
+ "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
+ "logLikelihood": 3.14, # Log-likelihood.
+ "variance": 3.14, # Variance.
+ "aic": 3.14, # AIC.
+ },
+ "timeSeriesId": "A String", # The id to indicate different time series.
+ "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
+ # when d is not 1.
+ "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
+ "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
+ 3.14,
+ ],
+ "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
+ "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
+ 3.14,
+ ],
},
},
],
@@ -233,86 +241,12 @@
"A String",
],
},
- "clusterInfos": [ # Information about top clusters for clustering models.
- { # Information about a single cluster for clustering model.
- "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
- "centroidId": "A String", # Centroid id.
- "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
- # to each point assigned to the cluster.
- },
- ],
},
],
"evaluationMetrics": { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
# end of training.
# data or just the eval data based on whether eval data was used during
# training. These are not present for imported models.
- "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
- "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
- # models, the metrics are either macro-averaged or micro-averaged. When
- # macro-averaged, the metrics are calculated for each label and then an
- # unweighted average is taken of those values. When micro-averaged, the
- # metric is calculated globally by counting the total number of correctly
- # predicted rows.
- "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
- # positive prediction. For multiclass this is a macro-averaged metric.
- "threshold": 3.14, # Threshold at which the metrics are computed. For binary
- # classification models this is the positive class threshold.
- # For multi-class classfication models this is the confidence
- # threshold.
- "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
- # metric.
- "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
- "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
- # this is a macro-averaged metric.
- "precision": 3.14, # Precision is the fraction of actual positive predictions that had
- # positive actual labels. For multiclass this is a macro-averaged
- # metric treating each class as a binary classifier.
- "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
- # multiclass this is a micro-averaged metric.
- },
- "negativeLabel": "A String", # Label representing the negative class.
- "positiveLabel": "A String", # Label representing the positive class.
- "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
- { # Confusion matrix for binary classification models.
- "falseNegatives": "A String", # Number of false samples predicted as false.
- "falsePositives": "A String", # Number of false samples predicted as true.
- "trueNegatives": "A String", # Number of true samples predicted as false.
- "f1Score": 3.14, # The equally weighted average of recall and precision.
- "precision": 3.14, # The fraction of actual positive predictions that had positive actual
- # labels.
- "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
- "accuracy": 3.14, # The fraction of predictions given the correct label.
- "truePositives": "A String", # Number of true samples predicted as true.
- "recall": 3.14, # The fraction of actual positive labels that were given a positive
- # prediction.
- },
- ],
- },
- "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
- # factorization models.
- # factorization models.
- "meanSquaredError": 3.14, # Mean squared error.
- "rSquared": 3.14, # R^2 score.
- "medianAbsoluteError": 3.14, # Median absolute error.
- "meanSquaredLogError": 3.14, # Mean squared log error.
- "meanAbsoluteError": 3.14, # Mean absolute error.
- },
- "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
- # models.
- # feedback_type=implicit.
- "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
- # then averages all the precisions across all the users.
- "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
- # predicted confidence by comparing it to an ideal rank measured by the
- # original ratings.
- "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
- # from the predicted confidence and dividing it by the original rank.
- "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
- # recommendation models except instead of computing the rating directly,
- # the output from evaluate is computed against a preference which is 1 or 0
- # depending on if the rating exists or not.
- },
"multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
"aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
# models, the metrics are either macro-averaged or micro-averaged. When
@@ -343,15 +277,15 @@
# confusion matrix.
"rows": [ # One row per actual label.
{ # A single row in the confusion matrix.
+ "actualLabel": "A String", # The original label of this row.
"entries": [ # Info describing predicted label distribution.
{ # A single entry in the confusion matrix.
+ "itemCount": "A String", # Number of items being predicted as this label.
"predictedLabel": "A String", # The predicted label. For confidence_threshold > 0, we will
# also add an entry indicating the number of items under the
# confidence threshold.
- "itemCount": "A String", # Number of items being predicted as this label.
},
],
- "actualLabel": "A String", # The original label of this row.
},
],
},
@@ -362,11 +296,10 @@
"daviesBouldinIndex": 3.14, # Davies-Bouldin index.
"clusters": [ # [Beta] Information for all clusters.
{ # Message containing the information about one cluster.
+ "centroidId": "A String", # Centroid id.
"count": "A String", # Count of training data rows that were assigned to this cluster.
"featureValues": [ # Values of highly variant features for this cluster.
{ # Representative value of a single feature within the cluster.
- "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
- # feature.
"featureColumn": "A String", # The feature column name.
"categoricalValue": { # Representative value of a categorical feature. # The categorical feature value.
"categoryCounts": [ # Counts of all categories for the categorical feature. If there are
@@ -380,33 +313,82 @@
},
],
},
+ "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
+ # feature.
},
],
- "centroidId": "A String", # Centroid id.
},
],
},
+ "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+ "positiveLabel": "A String", # Label representing the positive class.
+ "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
+ { # Confusion matrix for binary classification models.
+ "f1Score": 3.14, # The equally weighted average of recall and precision.
+ "precision": 3.14, # The fraction of actual positive predictions that had positive actual
+ # labels.
+ "accuracy": 3.14, # The fraction of predictions given the correct label.
+ "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
+ "truePositives": "A String", # Number of true samples predicted as true.
+ "recall": 3.14, # The fraction of actual positive labels that were given a positive
+ # prediction.
+ "falseNegatives": "A String", # Number of false samples predicted as false.
+ "trueNegatives": "A String", # Number of true samples predicted as false.
+ "falsePositives": "A String", # Number of false samples predicted as true.
+ },
+ ],
+ "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+ # models, the metrics are either macro-averaged or micro-averaged. When
+ # macro-averaged, the metrics are calculated for each label and then an
+ # unweighted average is taken of those values. When micro-averaged, the
+ # metric is calculated globally by counting the total number of correctly
+ # predicted rows.
+ "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
+ # positive prediction. For multiclass this is a macro-averaged metric.
+ "threshold": 3.14, # Threshold at which the metrics are computed. For binary
+ # classification models this is the positive class threshold.
+ # For multi-class classfication models this is the confidence
+ # threshold.
+ "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+ # metric.
+ "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+ "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
+ # this is a macro-averaged metric.
+ "precision": 3.14, # Precision is the fraction of actual positive predictions that had
+ # positive actual labels. For multiclass this is a macro-averaged
+ # metric treating each class as a binary classifier.
+ "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
+ # multiclass this is a micro-averaged metric.
+ },
+ "negativeLabel": "A String", # Label representing the negative class.
+ },
+ "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+ # factorization models.
+ # factorization models.
+ "medianAbsoluteError": 3.14, # Median absolute error.
+ "meanSquaredLogError": 3.14, # Mean squared log error.
+ "meanAbsoluteError": 3.14, # Mean absolute error.
+ "meanSquaredError": 3.14, # Mean squared error.
+ "rSquared": 3.14, # R^2 score.
+ },
+ "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+ # models.
+ # feedback_type=implicit.
+ "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
+ # then averages all the precisions across all the users.
+ "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
+ # predicted confidence by comparing it to an ideal rank measured by the
+ # original ratings.
+ "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
+ # from the predicted confidence and dividing it by the original rank.
+ "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
+ # recommendation models except instead of computing the rating directly,
+ # the output from evaluate is computed against a preference which is 1 or 0
+ # depending on if the rating exists or not.
+ },
},
"trainingOptions": { # Options that were used for this training run, includes
# user specified and default options that were used.
- "dropout": 3.14, # Dropout probability for dnn models.
- "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
- "labelClassWeights": { # Weights associated with each label class, for rebalancing the
- # training data. Only applicable for classification models.
- "a_key": 3.14,
- },
- "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
- # overfitting for boosted tree models.
- "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
- # any more (compared to min_relative_progress). Used only for iterative
- # training algorithms.
- "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
- # of data will be used as training data. The format should be double.
- # Accurate to two decimal places.
- # Default value is 0.2.
- "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
- # strategy.
- "itemColumn": "A String", # Item column specified for matrix factorization models.
"inputLabelColumns": [ # Name of input label columns in training data.
"A String",
],
@@ -451,27 +433,45 @@
"numClusters": "A String", # Number of clusters for clustering models.
"dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
"minSplitLoss": 3.14, # Minimum split loss for boosted tree models.
+ "dropout": 3.14, # Dropout probability for dnn models.
+ "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
+ "labelClassWeights": { # Weights associated with each label class, for rebalancing the
+ # training data. Only applicable for classification models.
+ "a_key": 3.14,
+ },
+ "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
+ # overfitting for boosted tree models.
+ "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
+ # any more (compared to min_relative_progress). Used only for iterative
+ # training algorithms.
+ "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
+ # of data will be used as training data. The format should be double.
+ # Accurate to two decimal places.
+ # Default value is 0.2.
+ "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
+ # strategy.
+ "itemColumn": "A String", # Item column specified for matrix factorization models.
},
"dataSplitResult": { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
# actually split.
# data tables that were used to train the model.
"trainingTable": { # Table reference of the training data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
"evaluationTable": { # Table reference of the evaluation data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
},
},
],
"modelReference": { # Required. Unique identifier for this model.
+ "projectId": "A String", # [Required] The ID of the project containing this model.
"datasetId": "A String", # [Required] The ID of the dataset containing this model.
"modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
- "projectId": "A String", # [Required] The ID of the project containing this model.
},
"description": "A String", # Optional. A user-friendly description of this model.
"etag": "A String", # Output only. A hash of this resource.
@@ -486,17 +486,17 @@
</div>
<div class="method">
- <code class="details" id="list">list(projectId, datasetId, pageToken=None, maxResults=None)</code>
+ <code class="details" id="list">list(projectId, datasetId, maxResults=None, pageToken=None)</code>
<pre>Lists all models in the specified dataset. Requires the READER dataset
role.
Args:
projectId: string, Required. Project ID of the models to list. (required)
datasetId: string, Required. Dataset ID of the models to list. (required)
- pageToken: string, Page token, returned by a previous call to request the next page of
-results
maxResults: integer, The maximum number of results to return in a single response page.
Leverage the page tokens to iterate through the entire collection.
+ pageToken: string, Page token, returned by a previous call to request the next page of
+results
Returns:
An object of the form:
@@ -535,14 +535,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -562,14 +562,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -583,6 +583,14 @@
"startTime": "A String", # The start time of this training run.
"results": [ # Output of each iteration run, results.size() <= max_iterations.
{ # Information about a single iteration of the training run.
+ "clusterInfos": [ # Information about top clusters for clustering models.
+ { # Information about a single cluster for clustering model.
+ "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
+ # to each point assigned to the cluster.
+ "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
+ "centroidId": "A String", # Centroid id.
+ },
+ ],
"trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
"evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
"index": 42, # Index of the iteration, 0 based.
@@ -593,31 +601,31 @@
"arimaModelInfo": [ # This message is repeated because there are multiple arima models
# fitted in auto-arima. For non-auto-arima model, its size is one.
{ # Arima model information.
- "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
- "aic": 3.14, # AIC.
- "logLikelihood": 3.14, # Log-likelihood.
- "variance": 3.14, # Variance.
- },
- "timeSeriesId": "A String", # The id to indicate different time series.
- "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
- "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
- 3.14,
- ],
- "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
- 3.14,
- ],
- "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
- },
- "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
- # when d is not 1.
"seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported
# for one time series.
"A String",
],
"nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
- "q": "A String", # Order of the moving-average part.
"d": "A String", # Order of the differencing part.
"p": "A String", # Order of the autoregressive part.
+ "q": "A String", # Order of the moving-average part.
+ },
+ "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
+ "logLikelihood": 3.14, # Log-likelihood.
+ "variance": 3.14, # Variance.
+ "aic": 3.14, # AIC.
+ },
+ "timeSeriesId": "A String", # The id to indicate different time series.
+ "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
+ # when d is not 1.
+ "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
+ "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
+ 3.14,
+ ],
+ "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
+ "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
+ 3.14,
+ ],
},
},
],
@@ -626,86 +634,12 @@
"A String",
],
},
- "clusterInfos": [ # Information about top clusters for clustering models.
- { # Information about a single cluster for clustering model.
- "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
- "centroidId": "A String", # Centroid id.
- "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
- # to each point assigned to the cluster.
- },
- ],
},
],
"evaluationMetrics": { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
# end of training.
# data or just the eval data based on whether eval data was used during
# training. These are not present for imported models.
- "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
- "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
- # models, the metrics are either macro-averaged or micro-averaged. When
- # macro-averaged, the metrics are calculated for each label and then an
- # unweighted average is taken of those values. When micro-averaged, the
- # metric is calculated globally by counting the total number of correctly
- # predicted rows.
- "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
- # positive prediction. For multiclass this is a macro-averaged metric.
- "threshold": 3.14, # Threshold at which the metrics are computed. For binary
- # classification models this is the positive class threshold.
- # For multi-class classfication models this is the confidence
- # threshold.
- "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
- # metric.
- "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
- "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
- # this is a macro-averaged metric.
- "precision": 3.14, # Precision is the fraction of actual positive predictions that had
- # positive actual labels. For multiclass this is a macro-averaged
- # metric treating each class as a binary classifier.
- "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
- # multiclass this is a micro-averaged metric.
- },
- "negativeLabel": "A String", # Label representing the negative class.
- "positiveLabel": "A String", # Label representing the positive class.
- "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
- { # Confusion matrix for binary classification models.
- "falseNegatives": "A String", # Number of false samples predicted as false.
- "falsePositives": "A String", # Number of false samples predicted as true.
- "trueNegatives": "A String", # Number of true samples predicted as false.
- "f1Score": 3.14, # The equally weighted average of recall and precision.
- "precision": 3.14, # The fraction of actual positive predictions that had positive actual
- # labels.
- "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
- "accuracy": 3.14, # The fraction of predictions given the correct label.
- "truePositives": "A String", # Number of true samples predicted as true.
- "recall": 3.14, # The fraction of actual positive labels that were given a positive
- # prediction.
- },
- ],
- },
- "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
- # factorization models.
- # factorization models.
- "meanSquaredError": 3.14, # Mean squared error.
- "rSquared": 3.14, # R^2 score.
- "medianAbsoluteError": 3.14, # Median absolute error.
- "meanSquaredLogError": 3.14, # Mean squared log error.
- "meanAbsoluteError": 3.14, # Mean absolute error.
- },
- "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
- # models.
- # feedback_type=implicit.
- "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
- # then averages all the precisions across all the users.
- "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
- # predicted confidence by comparing it to an ideal rank measured by the
- # original ratings.
- "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
- # from the predicted confidence and dividing it by the original rank.
- "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
- # recommendation models except instead of computing the rating directly,
- # the output from evaluate is computed against a preference which is 1 or 0
- # depending on if the rating exists or not.
- },
"multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
"aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
# models, the metrics are either macro-averaged or micro-averaged. When
@@ -736,15 +670,15 @@
# confusion matrix.
"rows": [ # One row per actual label.
{ # A single row in the confusion matrix.
+ "actualLabel": "A String", # The original label of this row.
"entries": [ # Info describing predicted label distribution.
{ # A single entry in the confusion matrix.
+ "itemCount": "A String", # Number of items being predicted as this label.
"predictedLabel": "A String", # The predicted label. For confidence_threshold > 0, we will
# also add an entry indicating the number of items under the
# confidence threshold.
- "itemCount": "A String", # Number of items being predicted as this label.
},
],
- "actualLabel": "A String", # The original label of this row.
},
],
},
@@ -755,11 +689,10 @@
"daviesBouldinIndex": 3.14, # Davies-Bouldin index.
"clusters": [ # [Beta] Information for all clusters.
{ # Message containing the information about one cluster.
+ "centroidId": "A String", # Centroid id.
"count": "A String", # Count of training data rows that were assigned to this cluster.
"featureValues": [ # Values of highly variant features for this cluster.
{ # Representative value of a single feature within the cluster.
- "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
- # feature.
"featureColumn": "A String", # The feature column name.
"categoricalValue": { # Representative value of a categorical feature. # The categorical feature value.
"categoryCounts": [ # Counts of all categories for the categorical feature. If there are
@@ -773,33 +706,82 @@
},
],
},
+ "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
+ # feature.
},
],
- "centroidId": "A String", # Centroid id.
},
],
},
+ "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+ "positiveLabel": "A String", # Label representing the positive class.
+ "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
+ { # Confusion matrix for binary classification models.
+ "f1Score": 3.14, # The equally weighted average of recall and precision.
+ "precision": 3.14, # The fraction of actual positive predictions that had positive actual
+ # labels.
+ "accuracy": 3.14, # The fraction of predictions given the correct label.
+ "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
+ "truePositives": "A String", # Number of true samples predicted as true.
+ "recall": 3.14, # The fraction of actual positive labels that were given a positive
+ # prediction.
+ "falseNegatives": "A String", # Number of false samples predicted as false.
+ "trueNegatives": "A String", # Number of true samples predicted as false.
+ "falsePositives": "A String", # Number of false samples predicted as true.
+ },
+ ],
+ "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+ # models, the metrics are either macro-averaged or micro-averaged. When
+ # macro-averaged, the metrics are calculated for each label and then an
+ # unweighted average is taken of those values. When micro-averaged, the
+ # metric is calculated globally by counting the total number of correctly
+ # predicted rows.
+ "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
+ # positive prediction. For multiclass this is a macro-averaged metric.
+ "threshold": 3.14, # Threshold at which the metrics are computed. For binary
+ # classification models this is the positive class threshold.
+ # For multi-class classfication models this is the confidence
+ # threshold.
+ "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+ # metric.
+ "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+ "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
+ # this is a macro-averaged metric.
+ "precision": 3.14, # Precision is the fraction of actual positive predictions that had
+ # positive actual labels. For multiclass this is a macro-averaged
+ # metric treating each class as a binary classifier.
+ "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
+ # multiclass this is a micro-averaged metric.
+ },
+ "negativeLabel": "A String", # Label representing the negative class.
+ },
+ "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+ # factorization models.
+ # factorization models.
+ "medianAbsoluteError": 3.14, # Median absolute error.
+ "meanSquaredLogError": 3.14, # Mean squared log error.
+ "meanAbsoluteError": 3.14, # Mean absolute error.
+ "meanSquaredError": 3.14, # Mean squared error.
+ "rSquared": 3.14, # R^2 score.
+ },
+ "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+ # models.
+ # feedback_type=implicit.
+ "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
+ # then averages all the precisions across all the users.
+ "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
+ # predicted confidence by comparing it to an ideal rank measured by the
+ # original ratings.
+ "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
+ # from the predicted confidence and dividing it by the original rank.
+ "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
+ # recommendation models except instead of computing the rating directly,
+ # the output from evaluate is computed against a preference which is 1 or 0
+ # depending on if the rating exists or not.
+ },
},
"trainingOptions": { # Options that were used for this training run, includes
# user specified and default options that were used.
- "dropout": 3.14, # Dropout probability for dnn models.
- "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
- "labelClassWeights": { # Weights associated with each label class, for rebalancing the
- # training data. Only applicable for classification models.
- "a_key": 3.14,
- },
- "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
- # overfitting for boosted tree models.
- "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
- # any more (compared to min_relative_progress). Used only for iterative
- # training algorithms.
- "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
- # of data will be used as training data. The format should be double.
- # Accurate to two decimal places.
- # Default value is 0.2.
- "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
- # strategy.
- "itemColumn": "A String", # Item column specified for matrix factorization models.
"inputLabelColumns": [ # Name of input label columns in training data.
"A String",
],
@@ -844,27 +826,45 @@
"numClusters": "A String", # Number of clusters for clustering models.
"dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
"minSplitLoss": 3.14, # Minimum split loss for boosted tree models.
+ "dropout": 3.14, # Dropout probability for dnn models.
+ "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
+ "labelClassWeights": { # Weights associated with each label class, for rebalancing the
+ # training data. Only applicable for classification models.
+ "a_key": 3.14,
+ },
+ "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
+ # overfitting for boosted tree models.
+ "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
+ # any more (compared to min_relative_progress). Used only for iterative
+ # training algorithms.
+ "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
+ # of data will be used as training data. The format should be double.
+ # Accurate to two decimal places.
+ # Default value is 0.2.
+ "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
+ # strategy.
+ "itemColumn": "A String", # Item column specified for matrix factorization models.
},
"dataSplitResult": { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
# actually split.
# data tables that were used to train the model.
"trainingTable": { # Table reference of the training data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
"evaluationTable": { # Table reference of the evaluation data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
},
},
],
"modelReference": { # Required. Unique identifier for this model.
+ "projectId": "A String", # [Required] The ID of the project containing this model.
"datasetId": "A String", # [Required] The ID of the dataset containing this model.
"modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
- "projectId": "A String", # [Required] The ID of the project containing this model.
},
"description": "A String", # Optional. A user-friendly description of this model.
"etag": "A String", # Output only. A hash of this resource.
@@ -934,14 +934,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -961,14 +961,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -982,6 +982,14 @@
"startTime": "A String", # The start time of this training run.
"results": [ # Output of each iteration run, results.size() <= max_iterations.
{ # Information about a single iteration of the training run.
+ "clusterInfos": [ # Information about top clusters for clustering models.
+ { # Information about a single cluster for clustering model.
+ "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
+ # to each point assigned to the cluster.
+ "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
+ "centroidId": "A String", # Centroid id.
+ },
+ ],
"trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
"evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
"index": 42, # Index of the iteration, 0 based.
@@ -992,31 +1000,31 @@
"arimaModelInfo": [ # This message is repeated because there are multiple arima models
# fitted in auto-arima. For non-auto-arima model, its size is one.
{ # Arima model information.
- "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
- "aic": 3.14, # AIC.
- "logLikelihood": 3.14, # Log-likelihood.
- "variance": 3.14, # Variance.
- },
- "timeSeriesId": "A String", # The id to indicate different time series.
- "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
- "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
- 3.14,
- ],
- "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
- 3.14,
- ],
- "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
- },
- "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
- # when d is not 1.
"seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported
# for one time series.
"A String",
],
"nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
- "q": "A String", # Order of the moving-average part.
"d": "A String", # Order of the differencing part.
"p": "A String", # Order of the autoregressive part.
+ "q": "A String", # Order of the moving-average part.
+ },
+ "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
+ "logLikelihood": 3.14, # Log-likelihood.
+ "variance": 3.14, # Variance.
+ "aic": 3.14, # AIC.
+ },
+ "timeSeriesId": "A String", # The id to indicate different time series.
+ "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
+ # when d is not 1.
+ "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
+ "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
+ 3.14,
+ ],
+ "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
+ "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
+ 3.14,
+ ],
},
},
],
@@ -1025,86 +1033,12 @@
"A String",
],
},
- "clusterInfos": [ # Information about top clusters for clustering models.
- { # Information about a single cluster for clustering model.
- "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
- "centroidId": "A String", # Centroid id.
- "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
- # to each point assigned to the cluster.
- },
- ],
},
],
"evaluationMetrics": { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
# end of training.
# data or just the eval data based on whether eval data was used during
# training. These are not present for imported models.
- "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
- "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
- # models, the metrics are either macro-averaged or micro-averaged. When
- # macro-averaged, the metrics are calculated for each label and then an
- # unweighted average is taken of those values. When micro-averaged, the
- # metric is calculated globally by counting the total number of correctly
- # predicted rows.
- "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
- # positive prediction. For multiclass this is a macro-averaged metric.
- "threshold": 3.14, # Threshold at which the metrics are computed. For binary
- # classification models this is the positive class threshold.
- # For multi-class classfication models this is the confidence
- # threshold.
- "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
- # metric.
- "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
- "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
- # this is a macro-averaged metric.
- "precision": 3.14, # Precision is the fraction of actual positive predictions that had
- # positive actual labels. For multiclass this is a macro-averaged
- # metric treating each class as a binary classifier.
- "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
- # multiclass this is a micro-averaged metric.
- },
- "negativeLabel": "A String", # Label representing the negative class.
- "positiveLabel": "A String", # Label representing the positive class.
- "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
- { # Confusion matrix for binary classification models.
- "falseNegatives": "A String", # Number of false samples predicted as false.
- "falsePositives": "A String", # Number of false samples predicted as true.
- "trueNegatives": "A String", # Number of true samples predicted as false.
- "f1Score": 3.14, # The equally weighted average of recall and precision.
- "precision": 3.14, # The fraction of actual positive predictions that had positive actual
- # labels.
- "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
- "accuracy": 3.14, # The fraction of predictions given the correct label.
- "truePositives": "A String", # Number of true samples predicted as true.
- "recall": 3.14, # The fraction of actual positive labels that were given a positive
- # prediction.
- },
- ],
- },
- "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
- # factorization models.
- # factorization models.
- "meanSquaredError": 3.14, # Mean squared error.
- "rSquared": 3.14, # R^2 score.
- "medianAbsoluteError": 3.14, # Median absolute error.
- "meanSquaredLogError": 3.14, # Mean squared log error.
- "meanAbsoluteError": 3.14, # Mean absolute error.
- },
- "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
- # models.
- # feedback_type=implicit.
- "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
- # then averages all the precisions across all the users.
- "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
- # predicted confidence by comparing it to an ideal rank measured by the
- # original ratings.
- "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
- # from the predicted confidence and dividing it by the original rank.
- "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
- # recommendation models except instead of computing the rating directly,
- # the output from evaluate is computed against a preference which is 1 or 0
- # depending on if the rating exists or not.
- },
"multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
"aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
# models, the metrics are either macro-averaged or micro-averaged. When
@@ -1135,15 +1069,15 @@
# confusion matrix.
"rows": [ # One row per actual label.
{ # A single row in the confusion matrix.
+ "actualLabel": "A String", # The original label of this row.
"entries": [ # Info describing predicted label distribution.
{ # A single entry in the confusion matrix.
+ "itemCount": "A String", # Number of items being predicted as this label.
"predictedLabel": "A String", # The predicted label. For confidence_threshold > 0, we will
# also add an entry indicating the number of items under the
# confidence threshold.
- "itemCount": "A String", # Number of items being predicted as this label.
},
],
- "actualLabel": "A String", # The original label of this row.
},
],
},
@@ -1154,11 +1088,10 @@
"daviesBouldinIndex": 3.14, # Davies-Bouldin index.
"clusters": [ # [Beta] Information for all clusters.
{ # Message containing the information about one cluster.
+ "centroidId": "A String", # Centroid id.
"count": "A String", # Count of training data rows that were assigned to this cluster.
"featureValues": [ # Values of highly variant features for this cluster.
{ # Representative value of a single feature within the cluster.
- "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
- # feature.
"featureColumn": "A String", # The feature column name.
"categoricalValue": { # Representative value of a categorical feature. # The categorical feature value.
"categoryCounts": [ # Counts of all categories for the categorical feature. If there are
@@ -1172,33 +1105,82 @@
},
],
},
+ "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
+ # feature.
},
],
- "centroidId": "A String", # Centroid id.
},
],
},
+ "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+ "positiveLabel": "A String", # Label representing the positive class.
+ "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
+ { # Confusion matrix for binary classification models.
+ "f1Score": 3.14, # The equally weighted average of recall and precision.
+ "precision": 3.14, # The fraction of actual positive predictions that had positive actual
+ # labels.
+ "accuracy": 3.14, # The fraction of predictions given the correct label.
+ "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
+ "truePositives": "A String", # Number of true samples predicted as true.
+ "recall": 3.14, # The fraction of actual positive labels that were given a positive
+ # prediction.
+ "falseNegatives": "A String", # Number of false samples predicted as false.
+ "trueNegatives": "A String", # Number of true samples predicted as false.
+ "falsePositives": "A String", # Number of false samples predicted as true.
+ },
+ ],
+ "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+ # models, the metrics are either macro-averaged or micro-averaged. When
+ # macro-averaged, the metrics are calculated for each label and then an
+ # unweighted average is taken of those values. When micro-averaged, the
+ # metric is calculated globally by counting the total number of correctly
+ # predicted rows.
+ "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
+ # positive prediction. For multiclass this is a macro-averaged metric.
+ "threshold": 3.14, # Threshold at which the metrics are computed. For binary
+ # classification models this is the positive class threshold.
+ # For multi-class classfication models this is the confidence
+ # threshold.
+ "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+ # metric.
+ "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+ "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
+ # this is a macro-averaged metric.
+ "precision": 3.14, # Precision is the fraction of actual positive predictions that had
+ # positive actual labels. For multiclass this is a macro-averaged
+ # metric treating each class as a binary classifier.
+ "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
+ # multiclass this is a micro-averaged metric.
+ },
+ "negativeLabel": "A String", # Label representing the negative class.
+ },
+ "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+ # factorization models.
+ # factorization models.
+ "medianAbsoluteError": 3.14, # Median absolute error.
+ "meanSquaredLogError": 3.14, # Mean squared log error.
+ "meanAbsoluteError": 3.14, # Mean absolute error.
+ "meanSquaredError": 3.14, # Mean squared error.
+ "rSquared": 3.14, # R^2 score.
+ },
+ "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+ # models.
+ # feedback_type=implicit.
+ "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
+ # then averages all the precisions across all the users.
+ "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
+ # predicted confidence by comparing it to an ideal rank measured by the
+ # original ratings.
+ "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
+ # from the predicted confidence and dividing it by the original rank.
+ "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
+ # recommendation models except instead of computing the rating directly,
+ # the output from evaluate is computed against a preference which is 1 or 0
+ # depending on if the rating exists or not.
+ },
},
"trainingOptions": { # Options that were used for this training run, includes
# user specified and default options that were used.
- "dropout": 3.14, # Dropout probability for dnn models.
- "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
- "labelClassWeights": { # Weights associated with each label class, for rebalancing the
- # training data. Only applicable for classification models.
- "a_key": 3.14,
- },
- "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
- # overfitting for boosted tree models.
- "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
- # any more (compared to min_relative_progress). Used only for iterative
- # training algorithms.
- "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
- # of data will be used as training data. The format should be double.
- # Accurate to two decimal places.
- # Default value is 0.2.
- "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
- # strategy.
- "itemColumn": "A String", # Item column specified for matrix factorization models.
"inputLabelColumns": [ # Name of input label columns in training data.
"A String",
],
@@ -1243,27 +1225,45 @@
"numClusters": "A String", # Number of clusters for clustering models.
"dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
"minSplitLoss": 3.14, # Minimum split loss for boosted tree models.
+ "dropout": 3.14, # Dropout probability for dnn models.
+ "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
+ "labelClassWeights": { # Weights associated with each label class, for rebalancing the
+ # training data. Only applicable for classification models.
+ "a_key": 3.14,
+ },
+ "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
+ # overfitting for boosted tree models.
+ "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
+ # any more (compared to min_relative_progress). Used only for iterative
+ # training algorithms.
+ "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
+ # of data will be used as training data. The format should be double.
+ # Accurate to two decimal places.
+ # Default value is 0.2.
+ "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
+ # strategy.
+ "itemColumn": "A String", # Item column specified for matrix factorization models.
},
"dataSplitResult": { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
# actually split.
# data tables that were used to train the model.
"trainingTable": { # Table reference of the training data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
"evaluationTable": { # Table reference of the evaluation data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
},
},
],
"modelReference": { # Required. Unique identifier for this model.
+ "projectId": "A String", # [Required] The ID of the project containing this model.
"datasetId": "A String", # [Required] The ID of the dataset containing this model.
"modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
- "projectId": "A String", # [Required] The ID of the project containing this model.
},
"description": "A String", # Optional. A user-friendly description of this model.
"etag": "A String", # Output only. A hash of this resource.
@@ -1309,14 +1309,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -1336,14 +1336,14 @@
# {name="x", type={type_kind="STRING"}},
# {name="y", type={type_kind="ARRAY", array_element_type="DATE"}}
# ]}}
+ "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
+ "typeKind": "A String", # Required. The top level type of this field.
+ # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
"structType": { # The fields of this struct, in order, if type_kind = "STRUCT".
"fields": [
# Object with schema name: StandardSqlField
],
},
- "arrayElementType": # Object with schema name: StandardSqlDataType # The type of the array's elements, if type_kind = "ARRAY".
- "typeKind": "A String", # Required. The top level type of this field.
- # Can be any standard SQL data type (e.g., "INT64", "DATE", "ARRAY").
},
},
],
@@ -1357,6 +1357,14 @@
"startTime": "A String", # The start time of this training run.
"results": [ # Output of each iteration run, results.size() <= max_iterations.
{ # Information about a single iteration of the training run.
+ "clusterInfos": [ # Information about top clusters for clustering models.
+ { # Information about a single cluster for clustering model.
+ "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
+ # to each point assigned to the cluster.
+ "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
+ "centroidId": "A String", # Centroid id.
+ },
+ ],
"trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
"evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
"index": 42, # Index of the iteration, 0 based.
@@ -1367,31 +1375,31 @@
"arimaModelInfo": [ # This message is repeated because there are multiple arima models
# fitted in auto-arima. For non-auto-arima model, its size is one.
{ # Arima model information.
- "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
- "aic": 3.14, # AIC.
- "logLikelihood": 3.14, # Log-likelihood.
- "variance": 3.14, # Variance.
- },
- "timeSeriesId": "A String", # The id to indicate different time series.
- "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
- "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
- 3.14,
- ],
- "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
- 3.14,
- ],
- "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
- },
- "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
- # when d is not 1.
"seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported
# for one time series.
"A String",
],
"nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
- "q": "A String", # Order of the moving-average part.
"d": "A String", # Order of the differencing part.
"p": "A String", # Order of the autoregressive part.
+ "q": "A String", # Order of the moving-average part.
+ },
+ "arimaFittingMetrics": { # ARIMA model fitting metrics. # Arima fitting metrics.
+ "logLikelihood": 3.14, # Log-likelihood.
+ "variance": 3.14, # Variance.
+ "aic": 3.14, # AIC.
+ },
+ "timeSeriesId": "A String", # The id to indicate different time series.
+ "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false
+ # when d is not 1.
+ "arimaCoefficients": { # Arima coefficients. # Arima coefficients.
+ "autoRegressiveCoefficients": [ # Auto-regressive coefficients, an array of double.
+ 3.14,
+ ],
+ "interceptCoefficient": 3.14, # Intercept coefficient, just a double not an array.
+ "movingAverageCoefficients": [ # Moving-average coefficients, an array of double.
+ 3.14,
+ ],
},
},
],
@@ -1400,86 +1408,12 @@
"A String",
],
},
- "clusterInfos": [ # Information about top clusters for clustering models.
- { # Information about a single cluster for clustering model.
- "clusterSize": "A String", # Cluster size, the total number of points assigned to the cluster.
- "centroidId": "A String", # Centroid id.
- "clusterRadius": 3.14, # Cluster radius, the average distance from centroid
- # to each point assigned to the cluster.
- },
- ],
},
],
"evaluationMetrics": { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
# end of training.
# data or just the eval data based on whether eval data was used during
# training. These are not present for imported models.
- "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
- "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
- # models, the metrics are either macro-averaged or micro-averaged. When
- # macro-averaged, the metrics are calculated for each label and then an
- # unweighted average is taken of those values. When micro-averaged, the
- # metric is calculated globally by counting the total number of correctly
- # predicted rows.
- "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
- # positive prediction. For multiclass this is a macro-averaged metric.
- "threshold": 3.14, # Threshold at which the metrics are computed. For binary
- # classification models this is the positive class threshold.
- # For multi-class classfication models this is the confidence
- # threshold.
- "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
- # metric.
- "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
- "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
- # this is a macro-averaged metric.
- "precision": 3.14, # Precision is the fraction of actual positive predictions that had
- # positive actual labels. For multiclass this is a macro-averaged
- # metric treating each class as a binary classifier.
- "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
- # multiclass this is a micro-averaged metric.
- },
- "negativeLabel": "A String", # Label representing the negative class.
- "positiveLabel": "A String", # Label representing the positive class.
- "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
- { # Confusion matrix for binary classification models.
- "falseNegatives": "A String", # Number of false samples predicted as false.
- "falsePositives": "A String", # Number of false samples predicted as true.
- "trueNegatives": "A String", # Number of true samples predicted as false.
- "f1Score": 3.14, # The equally weighted average of recall and precision.
- "precision": 3.14, # The fraction of actual positive predictions that had positive actual
- # labels.
- "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
- "accuracy": 3.14, # The fraction of predictions given the correct label.
- "truePositives": "A String", # Number of true samples predicted as true.
- "recall": 3.14, # The fraction of actual positive labels that were given a positive
- # prediction.
- },
- ],
- },
- "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
- # factorization models.
- # factorization models.
- "meanSquaredError": 3.14, # Mean squared error.
- "rSquared": 3.14, # R^2 score.
- "medianAbsoluteError": 3.14, # Median absolute error.
- "meanSquaredLogError": 3.14, # Mean squared log error.
- "meanAbsoluteError": 3.14, # Mean absolute error.
- },
- "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
- # models.
- # feedback_type=implicit.
- "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
- # then averages all the precisions across all the users.
- "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
- # predicted confidence by comparing it to an ideal rank measured by the
- # original ratings.
- "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
- # from the predicted confidence and dividing it by the original rank.
- "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
- # recommendation models except instead of computing the rating directly,
- # the output from evaluate is computed against a preference which is 1 or 0
- # depending on if the rating exists or not.
- },
"multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
"aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
# models, the metrics are either macro-averaged or micro-averaged. When
@@ -1510,15 +1444,15 @@
# confusion matrix.
"rows": [ # One row per actual label.
{ # A single row in the confusion matrix.
+ "actualLabel": "A String", # The original label of this row.
"entries": [ # Info describing predicted label distribution.
{ # A single entry in the confusion matrix.
+ "itemCount": "A String", # Number of items being predicted as this label.
"predictedLabel": "A String", # The predicted label. For confidence_threshold > 0, we will
# also add an entry indicating the number of items under the
# confidence threshold.
- "itemCount": "A String", # Number of items being predicted as this label.
},
],
- "actualLabel": "A String", # The original label of this row.
},
],
},
@@ -1529,11 +1463,10 @@
"daviesBouldinIndex": 3.14, # Davies-Bouldin index.
"clusters": [ # [Beta] Information for all clusters.
{ # Message containing the information about one cluster.
+ "centroidId": "A String", # Centroid id.
"count": "A String", # Count of training data rows that were assigned to this cluster.
"featureValues": [ # Values of highly variant features for this cluster.
{ # Representative value of a single feature within the cluster.
- "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
- # feature.
"featureColumn": "A String", # The feature column name.
"categoricalValue": { # Representative value of a categorical feature. # The categorical feature value.
"categoryCounts": [ # Counts of all categories for the categorical feature. If there are
@@ -1547,33 +1480,82 @@
},
],
},
+ "numericalValue": 3.14, # The numerical feature value. This is the centroid value for this
+ # feature.
},
],
- "centroidId": "A String", # Centroid id.
},
],
},
+ "binaryClassificationMetrics": { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+ "positiveLabel": "A String", # Label representing the positive class.
+ "binaryConfusionMatrixList": [ # Binary confusion matrix at multiple thresholds.
+ { # Confusion matrix for binary classification models.
+ "f1Score": 3.14, # The equally weighted average of recall and precision.
+ "precision": 3.14, # The fraction of actual positive predictions that had positive actual
+ # labels.
+ "accuracy": 3.14, # The fraction of predictions given the correct label.
+ "positiveClassThreshold": 3.14, # Threshold value used when computing each of the following metric.
+ "truePositives": "A String", # Number of true samples predicted as true.
+ "recall": 3.14, # The fraction of actual positive labels that were given a positive
+ # prediction.
+ "falseNegatives": "A String", # Number of false samples predicted as false.
+ "trueNegatives": "A String", # Number of true samples predicted as false.
+ "falsePositives": "A String", # Number of false samples predicted as true.
+ },
+ ],
+ "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+ # models, the metrics are either macro-averaged or micro-averaged. When
+ # macro-averaged, the metrics are calculated for each label and then an
+ # unweighted average is taken of those values. When micro-averaged, the
+ # metric is calculated globally by counting the total number of correctly
+ # predicted rows.
+ "recall": 3.14, # Recall is the fraction of actual positive labels that were given a
+ # positive prediction. For multiclass this is a macro-averaged metric.
+ "threshold": 3.14, # Threshold at which the metrics are computed. For binary
+ # classification models this is the positive class threshold.
+ # For multi-class classfication models this is the confidence
+ # threshold.
+ "rocAuc": 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+ # metric.
+ "logLoss": 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+ "f1Score": 3.14, # The F1 score is an average of recall and precision. For multiclass
+ # this is a macro-averaged metric.
+ "precision": 3.14, # Precision is the fraction of actual positive predictions that had
+ # positive actual labels. For multiclass this is a macro-averaged
+ # metric treating each class as a binary classifier.
+ "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For
+ # multiclass this is a micro-averaged metric.
+ },
+ "negativeLabel": "A String", # Label representing the negative class.
+ },
+ "regressionMetrics": { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+ # factorization models.
+ # factorization models.
+ "medianAbsoluteError": 3.14, # Median absolute error.
+ "meanSquaredLogError": 3.14, # Mean squared log error.
+ "meanAbsoluteError": 3.14, # Mean absolute error.
+ "meanSquaredError": 3.14, # Mean squared error.
+ "rSquared": 3.14, # R^2 score.
+ },
+ "rankingMetrics": { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+ # models.
+ # feedback_type=implicit.
+ "meanAveragePrecision": 3.14, # Calculates a precision per user for all the items by ranking them and
+ # then averages all the precisions across all the users.
+ "normalizedDiscountedCumulativeGain": 3.14, # A metric to determine the goodness of a ranking calculated from the
+ # predicted confidence by comparing it to an ideal rank measured by the
+ # original ratings.
+ "averageRank": 3.14, # Determines the goodness of a ranking by computing the percentile rank
+ # from the predicted confidence and dividing it by the original rank.
+ "meanSquaredError": 3.14, # Similar to the mean squared error computed in regression and explicit
+ # recommendation models except instead of computing the rating directly,
+ # the output from evaluate is computed against a preference which is 1 or 0
+ # depending on if the rating exists or not.
+ },
},
"trainingOptions": { # Options that were used for this training run, includes
# user specified and default options that were used.
- "dropout": 3.14, # Dropout probability for dnn models.
- "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
- "labelClassWeights": { # Weights associated with each label class, for rebalancing the
- # training data. Only applicable for classification models.
- "a_key": 3.14,
- },
- "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
- # overfitting for boosted tree models.
- "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
- # any more (compared to min_relative_progress). Used only for iterative
- # training algorithms.
- "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
- # of data will be used as training data. The format should be double.
- # Accurate to two decimal places.
- # Default value is 0.2.
- "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
- # strategy.
- "itemColumn": "A String", # Item column specified for matrix factorization models.
"inputLabelColumns": [ # Name of input label columns in training data.
"A String",
],
@@ -1618,27 +1600,45 @@
"numClusters": "A String", # Number of clusters for clustering models.
"dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
"minSplitLoss": 3.14, # Minimum split loss for boosted tree models.
+ "dropout": 3.14, # Dropout probability for dnn models.
+ "learnRate": 3.14, # Learning rate in training. Used only for iterative training algorithms.
+ "labelClassWeights": { # Weights associated with each label class, for rebalancing the
+ # training data. Only applicable for classification models.
+ "a_key": 3.14,
+ },
+ "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent
+ # overfitting for boosted tree models.
+ "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly
+ # any more (compared to min_relative_progress). Used only for iterative
+ # training algorithms.
+ "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest
+ # of data will be used as training data. The format should be double.
+ # Accurate to two decimal places.
+ # Default value is 0.2.
+ "initialLearnRate": 3.14, # Specifies the initial learning rate for the line search learn rate
+ # strategy.
+ "itemColumn": "A String", # Item column specified for matrix factorization models.
},
"dataSplitResult": { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
# actually split.
# data tables that were used to train the model.
"trainingTable": { # Table reference of the training data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
"evaluationTable": { # Table reference of the evaluation data after split.
- "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"projectId": "A String", # [Required] The ID of the project containing this table.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
+ "tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
},
},
},
],
"modelReference": { # Required. Unique identifier for this model.
+ "projectId": "A String", # [Required] The ID of the project containing this model.
"datasetId": "A String", # [Required] The ID of the dataset containing this model.
"modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
- "projectId": "A String", # [Required] The ID of the project containing this model.
},
"description": "A String", # Optional. A user-friendly description of this model.
"etag": "A String", # Output only. A hash of this resource.