docs: update docs (#916)

* fix: re-run script

* test: fix noxfile
diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html
index 91fc1e6..6d1b63c 100644
--- a/docs/dyn/bigquery_v2.models.html
+++ b/docs/dyn/bigquery_v2.models.html
@@ -81,7 +81,7 @@
   <code><a href="#get">get(projectId, datasetId, modelId)</a></code></p>
 <p class="firstline">Gets the specified model resource by model ID.</p>
 <p class="toc_element">
-  <code><a href="#list">list(projectId, datasetId, pageToken=None, maxResults=None)</a></code></p>
+  <code><a href="#list">list(projectId, datasetId, maxResults=None, pageToken=None)</a></code></p>
 <p class="firstline">Lists all models in the specified dataset. Requires the READER dataset</p>
 <p class="toc_element">
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -142,14 +142,14 @@
             #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
             #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
             #    ]}}
+          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
           &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
             &quot;fields&quot;: [
               # Object with schema name: StandardSqlField
             ],
           },
-          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
         },
       },
     ],
@@ -169,14 +169,14 @@
             #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
             #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
             #    ]}}
+          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
           &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
             &quot;fields&quot;: [
               # Object with schema name: StandardSqlField
             ],
           },
-          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
         },
       },
     ],
@@ -190,6 +190,14 @@
         &quot;startTime&quot;: &quot;A String&quot;, # The start time of this training run.
         &quot;results&quot;: [ # Output of each iteration run, results.size() &lt;= max_iterations.
           { # Information about a single iteration of the training run.
+            &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
+              { # Information about a single cluster for clustering model.
+                &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
+                    # to each point assigned to the cluster.
+                &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
+                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
+              },
+            ],
             &quot;trainingLoss&quot;: 3.14, # Loss computed on the training data at the end of iteration.
             &quot;evalLoss&quot;: 3.14, # Loss computed on the eval data at the end of iteration.
             &quot;index&quot;: 42, # Index of the iteration, 0 based.
@@ -200,31 +208,31 @@
               &quot;arimaModelInfo&quot;: [ # This message is repeated because there are multiple arima models
                   # fitted in auto-arima. For non-auto-arima model, its size is one.
                 { # Arima model information.
-                  &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
-                    &quot;aic&quot;: 3.14, # AIC.
-                    &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
-                    &quot;variance&quot;: 3.14, # Variance.
-                  },
-                  &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
-                  &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
-                    &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
-                      3.14,
-                    ],
-                    &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
-                      3.14,
-                    ],
-                    &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
-                  },
-                  &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
-                      # when d is not 1.
                   &quot;seasonalPeriods&quot;: [ # Seasonal periods. Repeated because multiple periods are supported
                       # for one time series.
                     &quot;A String&quot;,
                   ],
                   &quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
-                    &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
                     &quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
                     &quot;p&quot;: &quot;A String&quot;, # Order of the autoregressive part.
+                    &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
+                  },
+                  &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
+                    &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
+                    &quot;variance&quot;: 3.14, # Variance.
+                    &quot;aic&quot;: 3.14, # AIC.
+                  },
+                  &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
+                  &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
+                      # when d is not 1.
+                  &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
+                    &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
+                      3.14,
+                    ],
+                    &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
+                    &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
+                      3.14,
+                    ],
                   },
                 },
               ],
@@ -233,86 +241,12 @@
                 &quot;A String&quot;,
               ],
             },
-            &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
-              { # Information about a single cluster for clustering model.
-                &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
-                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
-                &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
-                    # to each point assigned to the cluster.
-              },
-            ],
           },
         ],
         &quot;evaluationMetrics&quot;: { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
             # end of training.
             # data or just the eval data based on whether eval data was used during
             # training. These are not present for imported models.
-          &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
-            &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
-                # models, the metrics are either macro-averaged or micro-averaged. When
-                # macro-averaged, the metrics are calculated for each label and then an
-                # unweighted average is taken of those values. When micro-averaged, the
-                # metric is calculated globally by counting the total number of correctly
-                # predicted rows.
-              &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
-                  # positive prediction. For multiclass this is a macro-averaged metric.
-              &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
-                  # classification models this is the positive class threshold.
-                  # For multi-class classfication models this is the confidence
-                  # threshold.
-              &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
-                  # metric.
-              &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
-              &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
-                  # this is a macro-averaged metric.
-              &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
-                  # positive actual labels. For multiclass this is a macro-averaged
-                  # metric treating each class as a binary classifier.
-              &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
-                  # multiclass this is a micro-averaged metric.
-            },
-            &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
-            &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
-            &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
-              { # Confusion matrix for binary classification models.
-                &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
-                &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
-                &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
-                &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
-                &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
-                    # labels.
-                &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
-                &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
-                &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
-                &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
-                    # prediction.
-              },
-            ],
-          },
-          &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
-              # factorization models.
-              # factorization models.
-            &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
-            &quot;rSquared&quot;: 3.14, # R^2 score.
-            &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
-            &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
-            &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
-          },
-          &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
-              # models.
-              # feedback_type=implicit.
-            &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
-                # then averages all the precisions across all the users.
-            &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
-                # predicted confidence by comparing it to an ideal rank measured by the
-                # original ratings.
-            &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
-                # from the predicted confidence and dividing it by the original rank.
-            &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
-                # recommendation models except instead of computing the rating directly,
-                # the output from evaluate is computed against a preference which is 1 or 0
-                # depending on if the rating exists or not.
-          },
           &quot;multiClassClassificationMetrics&quot;: { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
             &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
                 # models, the metrics are either macro-averaged or micro-averaged. When
@@ -343,15 +277,15 @@
                     # confusion matrix.
                 &quot;rows&quot;: [ # One row per actual label.
                   { # A single row in the confusion matrix.
+                    &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                     &quot;entries&quot;: [ # Info describing predicted label distribution.
                       { # A single entry in the confusion matrix.
+                        &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                         &quot;predictedLabel&quot;: &quot;A String&quot;, # The predicted label. For confidence_threshold &gt; 0, we will
                             # also add an entry indicating the number of items under the
                             # confidence threshold.
-                        &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                       },
                     ],
-                    &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                   },
                 ],
               },
@@ -362,11 +296,10 @@
             &quot;daviesBouldinIndex&quot;: 3.14, # Davies-Bouldin index.
             &quot;clusters&quot;: [ # [Beta] Information for all clusters.
               { # Message containing the information about one cluster.
+                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
                 &quot;count&quot;: &quot;A String&quot;, # Count of training data rows that were assigned to this cluster.
                 &quot;featureValues&quot;: [ # Values of highly variant features for this cluster.
                   { # Representative value of a single feature within the cluster.
-                    &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
-                        # feature.
                     &quot;featureColumn&quot;: &quot;A String&quot;, # The feature column name.
                     &quot;categoricalValue&quot;: { # Representative value of a categorical feature. # The categorical feature value.
                       &quot;categoryCounts&quot;: [ # Counts of all categories for the categorical feature. If there are
@@ -380,33 +313,82 @@
                         },
                       ],
                     },
+                    &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
+                        # feature.
                   },
                 ],
-                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
               },
             ],
           },
+          &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+            &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
+            &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
+              { # Confusion matrix for binary classification models.
+                &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
+                &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
+                    # labels.
+                &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
+                &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
+                &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
+                &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
+                    # prediction.
+                &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
+                &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
+                &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
+              },
+            ],
+            &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+                # models, the metrics are either macro-averaged or micro-averaged. When
+                # macro-averaged, the metrics are calculated for each label and then an
+                # unweighted average is taken of those values. When micro-averaged, the
+                # metric is calculated globally by counting the total number of correctly
+                # predicted rows.
+              &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
+                  # positive prediction. For multiclass this is a macro-averaged metric.
+              &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
+                  # classification models this is the positive class threshold.
+                  # For multi-class classfication models this is the confidence
+                  # threshold.
+              &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+                  # metric.
+              &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+              &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
+                  # this is a macro-averaged metric.
+              &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
+                  # positive actual labels. For multiclass this is a macro-averaged
+                  # metric treating each class as a binary classifier.
+              &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
+                  # multiclass this is a micro-averaged metric.
+            },
+            &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
+          },
+          &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+              # factorization models.
+              # factorization models.
+            &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
+            &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
+            &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
+            &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
+            &quot;rSquared&quot;: 3.14, # R^2 score.
+          },
+          &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+              # models.
+              # feedback_type=implicit.
+            &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
+                # then averages all the precisions across all the users.
+            &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
+                # predicted confidence by comparing it to an ideal rank measured by the
+                # original ratings.
+            &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
+                # from the predicted confidence and dividing it by the original rank.
+            &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
+                # recommendation models except instead of computing the rating directly,
+                # the output from evaluate is computed against a preference which is 1 or 0
+                # depending on if the rating exists or not.
+          },
         },
         &quot;trainingOptions&quot;: { # Options that were used for this training run, includes
             # user specified and default options that were used.
-          &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
-          &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
-          &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
-              # training data. Only applicable for classification models.
-            &quot;a_key&quot;: 3.14,
-          },
-          &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
-              # overfitting for boosted tree models.
-          &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
-              # any more (compared to min_relative_progress). Used only for iterative
-              # training algorithms.
-          &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
-              # of data will be used as training data. The format should be double.
-              # Accurate to two decimal places.
-              # Default value is 0.2.
-          &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
-              # strategy.
-          &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
           &quot;inputLabelColumns&quot;: [ # Name of input label columns in training data.
             &quot;A String&quot;,
           ],
@@ -451,27 +433,45 @@
           &quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
           &quot;dataSplitMethod&quot;: &quot;A String&quot;, # The data split type for training and evaluation, e.g. RANDOM.
           &quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+          &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
+          &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
+          &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
+              # training data. Only applicable for classification models.
+            &quot;a_key&quot;: 3.14,
+          },
+          &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
+              # overfitting for boosted tree models.
+          &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
+              # any more (compared to min_relative_progress). Used only for iterative
+              # training algorithms.
+          &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
+              # of data will be used as training data. The format should be double.
+              # Accurate to two decimal places.
+              # Default value is 0.2.
+          &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
+              # strategy.
+          &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
         },
         &quot;dataSplitResult&quot;: { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
             # actually split.
             # data tables that were used to train the model.
           &quot;trainingTable&quot;: { # Table reference of the training data after split.
-            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
             &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
             &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
           },
           &quot;evaluationTable&quot;: { # Table reference of the evaluation data after split.
-            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
             &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
             &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
           },
         },
       },
     ],
     &quot;modelReference&quot;: { # Required. Unique identifier for this model.
+      &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
       &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this model.
       &quot;modelId&quot;: &quot;A String&quot;, # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
-      &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
     },
     &quot;description&quot;: &quot;A String&quot;, # Optional. A user-friendly description of this model.
     &quot;etag&quot;: &quot;A String&quot;, # Output only. A hash of this resource.
@@ -486,17 +486,17 @@
 </div>
 
 <div class="method">
-    <code class="details" id="list">list(projectId, datasetId, pageToken=None, maxResults=None)</code>
+    <code class="details" id="list">list(projectId, datasetId, maxResults=None, pageToken=None)</code>
   <pre>Lists all models in the specified dataset. Requires the READER dataset
 role.
 
 Args:
   projectId: string, Required. Project ID of the models to list. (required)
   datasetId: string, Required. Dataset ID of the models to list. (required)
-  pageToken: string, Page token, returned by a previous call to request the next page of
-results
   maxResults: integer, The maximum number of results to return in a single response page.
 Leverage the page tokens to iterate through the entire collection.
+  pageToken: string, Page token, returned by a previous call to request the next page of
+results
 
 Returns:
   An object of the form:
@@ -535,14 +535,14 @@
                 #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
                 #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
                 #    ]}}
+              &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+              &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+                  # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
               &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
                 &quot;fields&quot;: [
                   # Object with schema name: StandardSqlField
                 ],
               },
-              &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-              &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-                  # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
             },
           },
         ],
@@ -562,14 +562,14 @@
                 #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
                 #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
                 #    ]}}
+              &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+              &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+                  # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
               &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
                 &quot;fields&quot;: [
                   # Object with schema name: StandardSqlField
                 ],
               },
-              &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-              &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-                  # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
             },
           },
         ],
@@ -583,6 +583,14 @@
             &quot;startTime&quot;: &quot;A String&quot;, # The start time of this training run.
             &quot;results&quot;: [ # Output of each iteration run, results.size() &lt;= max_iterations.
               { # Information about a single iteration of the training run.
+                &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
+                  { # Information about a single cluster for clustering model.
+                    &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
+                        # to each point assigned to the cluster.
+                    &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
+                    &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
+                  },
+                ],
                 &quot;trainingLoss&quot;: 3.14, # Loss computed on the training data at the end of iteration.
                 &quot;evalLoss&quot;: 3.14, # Loss computed on the eval data at the end of iteration.
                 &quot;index&quot;: 42, # Index of the iteration, 0 based.
@@ -593,31 +601,31 @@
                   &quot;arimaModelInfo&quot;: [ # This message is repeated because there are multiple arima models
                       # fitted in auto-arima. For non-auto-arima model, its size is one.
                     { # Arima model information.
-                      &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
-                        &quot;aic&quot;: 3.14, # AIC.
-                        &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
-                        &quot;variance&quot;: 3.14, # Variance.
-                      },
-                      &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
-                      &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
-                        &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
-                          3.14,
-                        ],
-                        &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
-                          3.14,
-                        ],
-                        &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
-                      },
-                      &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
-                          # when d is not 1.
                       &quot;seasonalPeriods&quot;: [ # Seasonal periods. Repeated because multiple periods are supported
                           # for one time series.
                         &quot;A String&quot;,
                       ],
                       &quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
-                        &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
                         &quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
                         &quot;p&quot;: &quot;A String&quot;, # Order of the autoregressive part.
+                        &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
+                      },
+                      &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
+                        &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
+                        &quot;variance&quot;: 3.14, # Variance.
+                        &quot;aic&quot;: 3.14, # AIC.
+                      },
+                      &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
+                      &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
+                          # when d is not 1.
+                      &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
+                        &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
+                          3.14,
+                        ],
+                        &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
+                        &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
+                          3.14,
+                        ],
                       },
                     },
                   ],
@@ -626,86 +634,12 @@
                     &quot;A String&quot;,
                   ],
                 },
-                &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
-                  { # Information about a single cluster for clustering model.
-                    &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
-                    &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
-                    &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
-                        # to each point assigned to the cluster.
-                  },
-                ],
               },
             ],
             &quot;evaluationMetrics&quot;: { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
                 # end of training.
                 # data or just the eval data based on whether eval data was used during
                 # training. These are not present for imported models.
-              &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
-                &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
-                    # models, the metrics are either macro-averaged or micro-averaged. When
-                    # macro-averaged, the metrics are calculated for each label and then an
-                    # unweighted average is taken of those values. When micro-averaged, the
-                    # metric is calculated globally by counting the total number of correctly
-                    # predicted rows.
-                  &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
-                      # positive prediction. For multiclass this is a macro-averaged metric.
-                  &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
-                      # classification models this is the positive class threshold.
-                      # For multi-class classfication models this is the confidence
-                      # threshold.
-                  &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
-                      # metric.
-                  &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
-                  &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
-                      # this is a macro-averaged metric.
-                  &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
-                      # positive actual labels. For multiclass this is a macro-averaged
-                      # metric treating each class as a binary classifier.
-                  &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
-                      # multiclass this is a micro-averaged metric.
-                },
-                &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
-                &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
-                &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
-                  { # Confusion matrix for binary classification models.
-                    &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
-                    &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
-                    &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
-                    &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
-                    &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
-                        # labels.
-                    &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
-                    &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
-                    &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
-                    &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
-                        # prediction.
-                  },
-                ],
-              },
-              &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
-                  # factorization models.
-                  # factorization models.
-                &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
-                &quot;rSquared&quot;: 3.14, # R^2 score.
-                &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
-                &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
-                &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
-              },
-              &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
-                  # models.
-                  # feedback_type=implicit.
-                &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
-                    # then averages all the precisions across all the users.
-                &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
-                    # predicted confidence by comparing it to an ideal rank measured by the
-                    # original ratings.
-                &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
-                    # from the predicted confidence and dividing it by the original rank.
-                &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
-                    # recommendation models except instead of computing the rating directly,
-                    # the output from evaluate is computed against a preference which is 1 or 0
-                    # depending on if the rating exists or not.
-              },
               &quot;multiClassClassificationMetrics&quot;: { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
                 &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
                     # models, the metrics are either macro-averaged or micro-averaged. When
@@ -736,15 +670,15 @@
                         # confusion matrix.
                     &quot;rows&quot;: [ # One row per actual label.
                       { # A single row in the confusion matrix.
+                        &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                         &quot;entries&quot;: [ # Info describing predicted label distribution.
                           { # A single entry in the confusion matrix.
+                            &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                             &quot;predictedLabel&quot;: &quot;A String&quot;, # The predicted label. For confidence_threshold &gt; 0, we will
                                 # also add an entry indicating the number of items under the
                                 # confidence threshold.
-                            &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                           },
                         ],
-                        &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                       },
                     ],
                   },
@@ -755,11 +689,10 @@
                 &quot;daviesBouldinIndex&quot;: 3.14, # Davies-Bouldin index.
                 &quot;clusters&quot;: [ # [Beta] Information for all clusters.
                   { # Message containing the information about one cluster.
+                    &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
                     &quot;count&quot;: &quot;A String&quot;, # Count of training data rows that were assigned to this cluster.
                     &quot;featureValues&quot;: [ # Values of highly variant features for this cluster.
                       { # Representative value of a single feature within the cluster.
-                        &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
-                            # feature.
                         &quot;featureColumn&quot;: &quot;A String&quot;, # The feature column name.
                         &quot;categoricalValue&quot;: { # Representative value of a categorical feature. # The categorical feature value.
                           &quot;categoryCounts&quot;: [ # Counts of all categories for the categorical feature. If there are
@@ -773,33 +706,82 @@
                             },
                           ],
                         },
+                        &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
+                            # feature.
                       },
                     ],
-                    &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
                   },
                 ],
               },
+              &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+                &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
+                &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
+                  { # Confusion matrix for binary classification models.
+                    &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
+                    &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
+                        # labels.
+                    &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
+                    &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
+                    &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
+                    &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
+                        # prediction.
+                    &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
+                    &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
+                    &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
+                  },
+                ],
+                &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+                    # models, the metrics are either macro-averaged or micro-averaged. When
+                    # macro-averaged, the metrics are calculated for each label and then an
+                    # unweighted average is taken of those values. When micro-averaged, the
+                    # metric is calculated globally by counting the total number of correctly
+                    # predicted rows.
+                  &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
+                      # positive prediction. For multiclass this is a macro-averaged metric.
+                  &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
+                      # classification models this is the positive class threshold.
+                      # For multi-class classfication models this is the confidence
+                      # threshold.
+                  &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+                      # metric.
+                  &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+                  &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
+                      # this is a macro-averaged metric.
+                  &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
+                      # positive actual labels. For multiclass this is a macro-averaged
+                      # metric treating each class as a binary classifier.
+                  &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
+                      # multiclass this is a micro-averaged metric.
+                },
+                &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
+              },
+              &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+                  # factorization models.
+                  # factorization models.
+                &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
+                &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
+                &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
+                &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
+                &quot;rSquared&quot;: 3.14, # R^2 score.
+              },
+              &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+                  # models.
+                  # feedback_type=implicit.
+                &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
+                    # then averages all the precisions across all the users.
+                &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
+                    # predicted confidence by comparing it to an ideal rank measured by the
+                    # original ratings.
+                &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
+                    # from the predicted confidence and dividing it by the original rank.
+                &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
+                    # recommendation models except instead of computing the rating directly,
+                    # the output from evaluate is computed against a preference which is 1 or 0
+                    # depending on if the rating exists or not.
+              },
             },
             &quot;trainingOptions&quot;: { # Options that were used for this training run, includes
                 # user specified and default options that were used.
-              &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
-              &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
-              &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
-                  # training data. Only applicable for classification models.
-                &quot;a_key&quot;: 3.14,
-              },
-              &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
-                  # overfitting for boosted tree models.
-              &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
-                  # any more (compared to min_relative_progress). Used only for iterative
-                  # training algorithms.
-              &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
-                  # of data will be used as training data. The format should be double.
-                  # Accurate to two decimal places.
-                  # Default value is 0.2.
-              &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
-                  # strategy.
-              &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
               &quot;inputLabelColumns&quot;: [ # Name of input label columns in training data.
                 &quot;A String&quot;,
               ],
@@ -844,27 +826,45 @@
               &quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
               &quot;dataSplitMethod&quot;: &quot;A String&quot;, # The data split type for training and evaluation, e.g. RANDOM.
               &quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+              &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
+              &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
+              &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
+                  # training data. Only applicable for classification models.
+                &quot;a_key&quot;: 3.14,
+              },
+              &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
+                  # overfitting for boosted tree models.
+              &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
+                  # any more (compared to min_relative_progress). Used only for iterative
+                  # training algorithms.
+              &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
+                  # of data will be used as training data. The format should be double.
+                  # Accurate to two decimal places.
+                  # Default value is 0.2.
+              &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
+                  # strategy.
+              &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
             },
             &quot;dataSplitResult&quot;: { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
                 # actually split.
                 # data tables that were used to train the model.
               &quot;trainingTable&quot;: { # Table reference of the training data after split.
-                &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
                 &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
                 &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+                &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
               },
               &quot;evaluationTable&quot;: { # Table reference of the evaluation data after split.
-                &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
                 &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
                 &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+                &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
               },
             },
           },
         ],
         &quot;modelReference&quot;: { # Required. Unique identifier for this model.
+          &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
           &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this model.
           &quot;modelId&quot;: &quot;A String&quot;, # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
-          &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
         },
         &quot;description&quot;: &quot;A String&quot;, # Optional. A user-friendly description of this model.
         &quot;etag&quot;: &quot;A String&quot;, # Output only. A hash of this resource.
@@ -934,14 +934,14 @@
           #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
           #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
           #    ]}}
+        &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+        &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+            # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
         &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
           &quot;fields&quot;: [
             # Object with schema name: StandardSqlField
           ],
         },
-        &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-        &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-            # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
       },
     },
   ],
@@ -961,14 +961,14 @@
           #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
           #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
           #    ]}}
+        &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+        &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+            # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
         &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
           &quot;fields&quot;: [
             # Object with schema name: StandardSqlField
           ],
         },
-        &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-        &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-            # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
       },
     },
   ],
@@ -982,6 +982,14 @@
       &quot;startTime&quot;: &quot;A String&quot;, # The start time of this training run.
       &quot;results&quot;: [ # Output of each iteration run, results.size() &lt;= max_iterations.
         { # Information about a single iteration of the training run.
+          &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
+            { # Information about a single cluster for clustering model.
+              &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
+                  # to each point assigned to the cluster.
+              &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
+              &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
+            },
+          ],
           &quot;trainingLoss&quot;: 3.14, # Loss computed on the training data at the end of iteration.
           &quot;evalLoss&quot;: 3.14, # Loss computed on the eval data at the end of iteration.
           &quot;index&quot;: 42, # Index of the iteration, 0 based.
@@ -992,31 +1000,31 @@
             &quot;arimaModelInfo&quot;: [ # This message is repeated because there are multiple arima models
                 # fitted in auto-arima. For non-auto-arima model, its size is one.
               { # Arima model information.
-                &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
-                  &quot;aic&quot;: 3.14, # AIC.
-                  &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
-                  &quot;variance&quot;: 3.14, # Variance.
-                },
-                &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
-                &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
-                  &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
-                    3.14,
-                  ],
-                  &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
-                    3.14,
-                  ],
-                  &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
-                },
-                &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
-                    # when d is not 1.
                 &quot;seasonalPeriods&quot;: [ # Seasonal periods. Repeated because multiple periods are supported
                     # for one time series.
                   &quot;A String&quot;,
                 ],
                 &quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
-                  &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
                   &quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
                   &quot;p&quot;: &quot;A String&quot;, # Order of the autoregressive part.
+                  &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
+                },
+                &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
+                  &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
+                  &quot;variance&quot;: 3.14, # Variance.
+                  &quot;aic&quot;: 3.14, # AIC.
+                },
+                &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
+                &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
+                    # when d is not 1.
+                &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
+                  &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
+                    3.14,
+                  ],
+                  &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
+                  &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
+                    3.14,
+                  ],
                 },
               },
             ],
@@ -1025,86 +1033,12 @@
               &quot;A String&quot;,
             ],
           },
-          &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
-            { # Information about a single cluster for clustering model.
-              &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
-              &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
-              &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
-                  # to each point assigned to the cluster.
-            },
-          ],
         },
       ],
       &quot;evaluationMetrics&quot;: { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
           # end of training.
           # data or just the eval data based on whether eval data was used during
           # training. These are not present for imported models.
-        &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
-          &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
-              # models, the metrics are either macro-averaged or micro-averaged. When
-              # macro-averaged, the metrics are calculated for each label and then an
-              # unweighted average is taken of those values. When micro-averaged, the
-              # metric is calculated globally by counting the total number of correctly
-              # predicted rows.
-            &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
-                # positive prediction. For multiclass this is a macro-averaged metric.
-            &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
-                # classification models this is the positive class threshold.
-                # For multi-class classfication models this is the confidence
-                # threshold.
-            &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
-                # metric.
-            &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
-            &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
-                # this is a macro-averaged metric.
-            &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
-                # positive actual labels. For multiclass this is a macro-averaged
-                # metric treating each class as a binary classifier.
-            &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
-                # multiclass this is a micro-averaged metric.
-          },
-          &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
-          &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
-          &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
-            { # Confusion matrix for binary classification models.
-              &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
-              &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
-              &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
-              &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
-              &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
-                  # labels.
-              &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
-              &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
-              &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
-              &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
-                  # prediction.
-            },
-          ],
-        },
-        &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
-            # factorization models.
-            # factorization models.
-          &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
-          &quot;rSquared&quot;: 3.14, # R^2 score.
-          &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
-          &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
-          &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
-        },
-        &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
-            # models.
-            # feedback_type=implicit.
-          &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
-              # then averages all the precisions across all the users.
-          &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
-              # predicted confidence by comparing it to an ideal rank measured by the
-              # original ratings.
-          &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
-              # from the predicted confidence and dividing it by the original rank.
-          &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
-              # recommendation models except instead of computing the rating directly,
-              # the output from evaluate is computed against a preference which is 1 or 0
-              # depending on if the rating exists or not.
-        },
         &quot;multiClassClassificationMetrics&quot;: { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
           &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
               # models, the metrics are either macro-averaged or micro-averaged. When
@@ -1135,15 +1069,15 @@
                   # confusion matrix.
               &quot;rows&quot;: [ # One row per actual label.
                 { # A single row in the confusion matrix.
+                  &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                   &quot;entries&quot;: [ # Info describing predicted label distribution.
                     { # A single entry in the confusion matrix.
+                      &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                       &quot;predictedLabel&quot;: &quot;A String&quot;, # The predicted label. For confidence_threshold &gt; 0, we will
                           # also add an entry indicating the number of items under the
                           # confidence threshold.
-                      &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                     },
                   ],
-                  &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                 },
               ],
             },
@@ -1154,11 +1088,10 @@
           &quot;daviesBouldinIndex&quot;: 3.14, # Davies-Bouldin index.
           &quot;clusters&quot;: [ # [Beta] Information for all clusters.
             { # Message containing the information about one cluster.
+              &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
               &quot;count&quot;: &quot;A String&quot;, # Count of training data rows that were assigned to this cluster.
               &quot;featureValues&quot;: [ # Values of highly variant features for this cluster.
                 { # Representative value of a single feature within the cluster.
-                  &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
-                      # feature.
                   &quot;featureColumn&quot;: &quot;A String&quot;, # The feature column name.
                   &quot;categoricalValue&quot;: { # Representative value of a categorical feature. # The categorical feature value.
                     &quot;categoryCounts&quot;: [ # Counts of all categories for the categorical feature. If there are
@@ -1172,33 +1105,82 @@
                       },
                     ],
                   },
+                  &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
+                      # feature.
                 },
               ],
-              &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
             },
           ],
         },
+        &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+          &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
+          &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
+            { # Confusion matrix for binary classification models.
+              &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
+              &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
+                  # labels.
+              &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
+              &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
+              &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
+              &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
+                  # prediction.
+              &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
+              &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
+              &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
+            },
+          ],
+          &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+              # models, the metrics are either macro-averaged or micro-averaged. When
+              # macro-averaged, the metrics are calculated for each label and then an
+              # unweighted average is taken of those values. When micro-averaged, the
+              # metric is calculated globally by counting the total number of correctly
+              # predicted rows.
+            &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
+                # positive prediction. For multiclass this is a macro-averaged metric.
+            &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
+                # classification models this is the positive class threshold.
+                # For multi-class classfication models this is the confidence
+                # threshold.
+            &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+                # metric.
+            &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+            &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
+                # this is a macro-averaged metric.
+            &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
+                # positive actual labels. For multiclass this is a macro-averaged
+                # metric treating each class as a binary classifier.
+            &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
+                # multiclass this is a micro-averaged metric.
+          },
+          &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
+        },
+        &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+            # factorization models.
+            # factorization models.
+          &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
+          &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
+          &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
+          &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
+          &quot;rSquared&quot;: 3.14, # R^2 score.
+        },
+        &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+            # models.
+            # feedback_type=implicit.
+          &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
+              # then averages all the precisions across all the users.
+          &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
+              # predicted confidence by comparing it to an ideal rank measured by the
+              # original ratings.
+          &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
+              # from the predicted confidence and dividing it by the original rank.
+          &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
+              # recommendation models except instead of computing the rating directly,
+              # the output from evaluate is computed against a preference which is 1 or 0
+              # depending on if the rating exists or not.
+        },
       },
       &quot;trainingOptions&quot;: { # Options that were used for this training run, includes
           # user specified and default options that were used.
-        &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
-        &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
-        &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
-            # training data. Only applicable for classification models.
-          &quot;a_key&quot;: 3.14,
-        },
-        &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
-            # overfitting for boosted tree models.
-        &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
-            # any more (compared to min_relative_progress). Used only for iterative
-            # training algorithms.
-        &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
-            # of data will be used as training data. The format should be double.
-            # Accurate to two decimal places.
-            # Default value is 0.2.
-        &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
-            # strategy.
-        &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
         &quot;inputLabelColumns&quot;: [ # Name of input label columns in training data.
           &quot;A String&quot;,
         ],
@@ -1243,27 +1225,45 @@
         &quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
         &quot;dataSplitMethod&quot;: &quot;A String&quot;, # The data split type for training and evaluation, e.g. RANDOM.
         &quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+        &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
+        &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
+        &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
+            # training data. Only applicable for classification models.
+          &quot;a_key&quot;: 3.14,
+        },
+        &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
+            # overfitting for boosted tree models.
+        &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
+            # any more (compared to min_relative_progress). Used only for iterative
+            # training algorithms.
+        &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
+            # of data will be used as training data. The format should be double.
+            # Accurate to two decimal places.
+            # Default value is 0.2.
+        &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
+            # strategy.
+        &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
       },
       &quot;dataSplitResult&quot;: { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
           # actually split.
           # data tables that were used to train the model.
         &quot;trainingTable&quot;: { # Table reference of the training data after split.
-          &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
           &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
           &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+          &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
         },
         &quot;evaluationTable&quot;: { # Table reference of the evaluation data after split.
-          &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
           &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
           &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+          &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
         },
       },
     },
   ],
   &quot;modelReference&quot;: { # Required. Unique identifier for this model.
+    &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
     &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this model.
     &quot;modelId&quot;: &quot;A String&quot;, # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
-    &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
   },
   &quot;description&quot;: &quot;A String&quot;, # Optional. A user-friendly description of this model.
   &quot;etag&quot;: &quot;A String&quot;, # Output only. A hash of this resource.
@@ -1309,14 +1309,14 @@
             #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
             #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
             #    ]}}
+          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
           &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
             &quot;fields&quot;: [
               # Object with schema name: StandardSqlField
             ],
           },
-          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
         },
       },
     ],
@@ -1336,14 +1336,14 @@
             #      {name=&quot;x&quot;, type={type_kind=&quot;STRING&quot;}},
             #      {name=&quot;y&quot;, type={type_kind=&quot;ARRAY&quot;, array_element_type=&quot;DATE&quot;}}
             #    ]}}
+          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
+          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
+              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
           &quot;structType&quot;: { # The fields of this struct, in order, if type_kind = &quot;STRUCT&quot;.
             &quot;fields&quot;: [
               # Object with schema name: StandardSqlField
             ],
           },
-          &quot;arrayElementType&quot;: # Object with schema name: StandardSqlDataType # The type of the array&#x27;s elements, if type_kind = &quot;ARRAY&quot;.
-          &quot;typeKind&quot;: &quot;A String&quot;, # Required. The top level type of this field.
-              # Can be any standard SQL data type (e.g., &quot;INT64&quot;, &quot;DATE&quot;, &quot;ARRAY&quot;).
         },
       },
     ],
@@ -1357,6 +1357,14 @@
         &quot;startTime&quot;: &quot;A String&quot;, # The start time of this training run.
         &quot;results&quot;: [ # Output of each iteration run, results.size() &lt;= max_iterations.
           { # Information about a single iteration of the training run.
+            &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
+              { # Information about a single cluster for clustering model.
+                &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
+                    # to each point assigned to the cluster.
+                &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
+                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
+              },
+            ],
             &quot;trainingLoss&quot;: 3.14, # Loss computed on the training data at the end of iteration.
             &quot;evalLoss&quot;: 3.14, # Loss computed on the eval data at the end of iteration.
             &quot;index&quot;: 42, # Index of the iteration, 0 based.
@@ -1367,31 +1375,31 @@
               &quot;arimaModelInfo&quot;: [ # This message is repeated because there are multiple arima models
                   # fitted in auto-arima. For non-auto-arima model, its size is one.
                 { # Arima model information.
-                  &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
-                    &quot;aic&quot;: 3.14, # AIC.
-                    &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
-                    &quot;variance&quot;: 3.14, # Variance.
-                  },
-                  &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
-                  &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
-                    &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
-                      3.14,
-                    ],
-                    &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
-                      3.14,
-                    ],
-                    &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
-                  },
-                  &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
-                      # when d is not 1.
                   &quot;seasonalPeriods&quot;: [ # Seasonal periods. Repeated because multiple periods are supported
                       # for one time series.
                     &quot;A String&quot;,
                   ],
                   &quot;nonSeasonalOrder&quot;: { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
-                    &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
                     &quot;d&quot;: &quot;A String&quot;, # Order of the differencing part.
                     &quot;p&quot;: &quot;A String&quot;, # Order of the autoregressive part.
+                    &quot;q&quot;: &quot;A String&quot;, # Order of the moving-average part.
+                  },
+                  &quot;arimaFittingMetrics&quot;: { # ARIMA model fitting metrics. # Arima fitting metrics.
+                    &quot;logLikelihood&quot;: 3.14, # Log-likelihood.
+                    &quot;variance&quot;: 3.14, # Variance.
+                    &quot;aic&quot;: 3.14, # AIC.
+                  },
+                  &quot;timeSeriesId&quot;: &quot;A String&quot;, # The id to indicate different time series.
+                  &quot;hasDrift&quot;: True or False, # Whether Arima model fitted with drift or not. It is always false
+                      # when d is not 1.
+                  &quot;arimaCoefficients&quot;: { # Arima coefficients. # Arima coefficients.
+                    &quot;autoRegressiveCoefficients&quot;: [ # Auto-regressive coefficients, an array of double.
+                      3.14,
+                    ],
+                    &quot;interceptCoefficient&quot;: 3.14, # Intercept coefficient, just a double not an array.
+                    &quot;movingAverageCoefficients&quot;: [ # Moving-average coefficients, an array of double.
+                      3.14,
+                    ],
                   },
                 },
               ],
@@ -1400,86 +1408,12 @@
                 &quot;A String&quot;,
               ],
             },
-            &quot;clusterInfos&quot;: [ # Information about top clusters for clustering models.
-              { # Information about a single cluster for clustering model.
-                &quot;clusterSize&quot;: &quot;A String&quot;, # Cluster size, the total number of points assigned to the cluster.
-                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
-                &quot;clusterRadius&quot;: 3.14, # Cluster radius, the average distance from centroid
-                    # to each point assigned to the cluster.
-              },
-            ],
           },
         ],
         &quot;evaluationMetrics&quot;: { # Evaluation metrics of a model. These are either computed on all training # The evaluation metrics over training/eval data that were computed at the
             # end of training.
             # data or just the eval data based on whether eval data was used during
             # training. These are not present for imported models.
-          &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
-            &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
-                # models, the metrics are either macro-averaged or micro-averaged. When
-                # macro-averaged, the metrics are calculated for each label and then an
-                # unweighted average is taken of those values. When micro-averaged, the
-                # metric is calculated globally by counting the total number of correctly
-                # predicted rows.
-              &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
-                  # positive prediction. For multiclass this is a macro-averaged metric.
-              &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
-                  # classification models this is the positive class threshold.
-                  # For multi-class classfication models this is the confidence
-                  # threshold.
-              &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
-                  # metric.
-              &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
-              &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
-                  # this is a macro-averaged metric.
-              &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
-                  # positive actual labels. For multiclass this is a macro-averaged
-                  # metric treating each class as a binary classifier.
-              &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
-                  # multiclass this is a micro-averaged metric.
-            },
-            &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
-            &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
-            &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
-              { # Confusion matrix for binary classification models.
-                &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
-                &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
-                &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
-                &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
-                &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
-                    # labels.
-                &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
-                &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
-                &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
-                &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
-                    # prediction.
-              },
-            ],
-          },
-          &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
-              # factorization models.
-              # factorization models.
-            &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
-            &quot;rSquared&quot;: 3.14, # R^2 score.
-            &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
-            &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
-            &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
-          },
-          &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
-              # models.
-              # feedback_type=implicit.
-            &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
-                # then averages all the precisions across all the users.
-            &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
-                # predicted confidence by comparing it to an ideal rank measured by the
-                # original ratings.
-            &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
-                # from the predicted confidence and dividing it by the original rank.
-            &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
-                # recommendation models except instead of computing the rating directly,
-                # the output from evaluate is computed against a preference which is 1 or 0
-                # depending on if the rating exists or not.
-          },
           &quot;multiClassClassificationMetrics&quot;: { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
             &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
                 # models, the metrics are either macro-averaged or micro-averaged. When
@@ -1510,15 +1444,15 @@
                     # confusion matrix.
                 &quot;rows&quot;: [ # One row per actual label.
                   { # A single row in the confusion matrix.
+                    &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                     &quot;entries&quot;: [ # Info describing predicted label distribution.
                       { # A single entry in the confusion matrix.
+                        &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                         &quot;predictedLabel&quot;: &quot;A String&quot;, # The predicted label. For confidence_threshold &gt; 0, we will
                             # also add an entry indicating the number of items under the
                             # confidence threshold.
-                        &quot;itemCount&quot;: &quot;A String&quot;, # Number of items being predicted as this label.
                       },
                     ],
-                    &quot;actualLabel&quot;: &quot;A String&quot;, # The original label of this row.
                   },
                 ],
               },
@@ -1529,11 +1463,10 @@
             &quot;daviesBouldinIndex&quot;: 3.14, # Davies-Bouldin index.
             &quot;clusters&quot;: [ # [Beta] Information for all clusters.
               { # Message containing the information about one cluster.
+                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
                 &quot;count&quot;: &quot;A String&quot;, # Count of training data rows that were assigned to this cluster.
                 &quot;featureValues&quot;: [ # Values of highly variant features for this cluster.
                   { # Representative value of a single feature within the cluster.
-                    &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
-                        # feature.
                     &quot;featureColumn&quot;: &quot;A String&quot;, # The feature column name.
                     &quot;categoricalValue&quot;: { # Representative value of a categorical feature. # The categorical feature value.
                       &quot;categoryCounts&quot;: [ # Counts of all categories for the categorical feature. If there are
@@ -1547,33 +1480,82 @@
                         },
                       ],
                     },
+                    &quot;numericalValue&quot;: 3.14, # The numerical feature value. This is the centroid value for this
+                        # feature.
                   },
                 ],
-                &quot;centroidId&quot;: &quot;A String&quot;, # Centroid id.
               },
             ],
           },
+          &quot;binaryClassificationMetrics&quot;: { # Evaluation metrics for binary classification/classifier models. # Populated for binary classification/classifier models.
+            &quot;positiveLabel&quot;: &quot;A String&quot;, # Label representing the positive class.
+            &quot;binaryConfusionMatrixList&quot;: [ # Binary confusion matrix at multiple thresholds.
+              { # Confusion matrix for binary classification models.
+                &quot;f1Score&quot;: 3.14, # The equally weighted average of recall and precision.
+                &quot;precision&quot;: 3.14, # The fraction of actual positive predictions that had positive actual
+                    # labels.
+                &quot;accuracy&quot;: 3.14, # The fraction of predictions given the correct label.
+                &quot;positiveClassThreshold&quot;: 3.14, # Threshold value used when computing each of the following metric.
+                &quot;truePositives&quot;: &quot;A String&quot;, # Number of true samples predicted as true.
+                &quot;recall&quot;: 3.14, # The fraction of actual positive labels that were given a positive
+                    # prediction.
+                &quot;falseNegatives&quot;: &quot;A String&quot;, # Number of false samples predicted as false.
+                &quot;trueNegatives&quot;: &quot;A String&quot;, # Number of true samples predicted as false.
+                &quot;falsePositives&quot;: &quot;A String&quot;, # Number of false samples predicted as true.
+              },
+            ],
+            &quot;aggregateClassificationMetrics&quot;: { # Aggregate metrics for classification/classifier models. For multi-class # Aggregate classification metrics.
+                # models, the metrics are either macro-averaged or micro-averaged. When
+                # macro-averaged, the metrics are calculated for each label and then an
+                # unweighted average is taken of those values. When micro-averaged, the
+                # metric is calculated globally by counting the total number of correctly
+                # predicted rows.
+              &quot;recall&quot;: 3.14, # Recall is the fraction of actual positive labels that were given a
+                  # positive prediction. For multiclass this is a macro-averaged metric.
+              &quot;threshold&quot;: 3.14, # Threshold at which the metrics are computed. For binary
+                  # classification models this is the positive class threshold.
+                  # For multi-class classfication models this is the confidence
+                  # threshold.
+              &quot;rocAuc&quot;: 3.14, # Area Under a ROC Curve. For multiclass this is a macro-averaged
+                  # metric.
+              &quot;logLoss&quot;: 3.14, # Logarithmic Loss. For multiclass this is a macro-averaged metric.
+              &quot;f1Score&quot;: 3.14, # The F1 score is an average of recall and precision. For multiclass
+                  # this is a macro-averaged metric.
+              &quot;precision&quot;: 3.14, # Precision is the fraction of actual positive predictions that had
+                  # positive actual labels. For multiclass this is a macro-averaged
+                  # metric treating each class as a binary classifier.
+              &quot;accuracy&quot;: 3.14, # Accuracy is the fraction of predictions given the correct label. For
+                  # multiclass this is a micro-averaged metric.
+            },
+            &quot;negativeLabel&quot;: &quot;A String&quot;, # Label representing the negative class.
+          },
+          &quot;regressionMetrics&quot;: { # Evaluation metrics for regression and explicit feedback type matrix # Populated for regression models and explicit feedback type matrix
+              # factorization models.
+              # factorization models.
+            &quot;medianAbsoluteError&quot;: 3.14, # Median absolute error.
+            &quot;meanSquaredLogError&quot;: 3.14, # Mean squared log error.
+            &quot;meanAbsoluteError&quot;: 3.14, # Mean absolute error.
+            &quot;meanSquaredError&quot;: 3.14, # Mean squared error.
+            &quot;rSquared&quot;: 3.14, # R^2 score.
+          },
+          &quot;rankingMetrics&quot;: { # Evaluation metrics used by weighted-ALS models specified by # [Alpha] Populated for implicit feedback type matrix factorization
+              # models.
+              # feedback_type=implicit.
+            &quot;meanAveragePrecision&quot;: 3.14, # Calculates a precision per user for all the items by ranking them and
+                # then averages all the precisions across all the users.
+            &quot;normalizedDiscountedCumulativeGain&quot;: 3.14, # A metric to determine the goodness of a ranking calculated from the
+                # predicted confidence by comparing it to an ideal rank measured by the
+                # original ratings.
+            &quot;averageRank&quot;: 3.14, # Determines the goodness of a ranking by computing the percentile rank
+                # from the predicted confidence and dividing it by the original rank.
+            &quot;meanSquaredError&quot;: 3.14, # Similar to the mean squared error computed in regression and explicit
+                # recommendation models except instead of computing the rating directly,
+                # the output from evaluate is computed against a preference which is 1 or 0
+                # depending on if the rating exists or not.
+          },
         },
         &quot;trainingOptions&quot;: { # Options that were used for this training run, includes
             # user specified and default options that were used.
-          &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
-          &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
-          &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
-              # training data. Only applicable for classification models.
-            &quot;a_key&quot;: 3.14,
-          },
-          &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
-              # overfitting for boosted tree models.
-          &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
-              # any more (compared to min_relative_progress). Used only for iterative
-              # training algorithms.
-          &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
-              # of data will be used as training data. The format should be double.
-              # Accurate to two decimal places.
-              # Default value is 0.2.
-          &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
-              # strategy.
-          &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
           &quot;inputLabelColumns&quot;: [ # Name of input label columns in training data.
             &quot;A String&quot;,
           ],
@@ -1618,27 +1600,45 @@
           &quot;numClusters&quot;: &quot;A String&quot;, # Number of clusters for clustering models.
           &quot;dataSplitMethod&quot;: &quot;A String&quot;, # The data split type for training and evaluation, e.g. RANDOM.
           &quot;minSplitLoss&quot;: 3.14, # Minimum split loss for boosted tree models.
+          &quot;dropout&quot;: 3.14, # Dropout probability for dnn models.
+          &quot;learnRate&quot;: 3.14, # Learning rate in training. Used only for iterative training algorithms.
+          &quot;labelClassWeights&quot;: { # Weights associated with each label class, for rebalancing the
+              # training data. Only applicable for classification models.
+            &quot;a_key&quot;: 3.14,
+          },
+          &quot;subsample&quot;: 3.14, # Subsample fraction of the training data to grow tree to prevent
+              # overfitting for boosted tree models.
+          &quot;earlyStop&quot;: True or False, # Whether to stop early when the loss doesn&#x27;t improve significantly
+              # any more (compared to min_relative_progress). Used only for iterative
+              # training algorithms.
+          &quot;dataSplitEvalFraction&quot;: 3.14, # The fraction of evaluation data over the whole input data. The rest
+              # of data will be used as training data. The format should be double.
+              # Accurate to two decimal places.
+              # Default value is 0.2.
+          &quot;initialLearnRate&quot;: 3.14, # Specifies the initial learning rate for the line search learn rate
+              # strategy.
+          &quot;itemColumn&quot;: &quot;A String&quot;, # Item column specified for matrix factorization models.
         },
         &quot;dataSplitResult&quot;: { # Data split result. This contains references to the training and evaluation # Data split result of the training run. Only set when the input data is
             # actually split.
             # data tables that were used to train the model.
           &quot;trainingTable&quot;: { # Table reference of the training data after split.
-            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
             &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
             &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
           },
           &quot;evaluationTable&quot;: { # Table reference of the evaluation data after split.
-            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
             &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this table.
             &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this table.
+            &quot;tableId&quot;: &quot;A String&quot;, # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
           },
         },
       },
     ],
     &quot;modelReference&quot;: { # Required. Unique identifier for this model.
+      &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
       &quot;datasetId&quot;: &quot;A String&quot;, # [Required] The ID of the dataset containing this model.
       &quot;modelId&quot;: &quot;A String&quot;, # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
-      &quot;projectId&quot;: &quot;A String&quot;, # [Required] The ID of the project containing this model.
     },
     &quot;description&quot;: &quot;A String&quot;, # Optional. A user-friendly description of this model.
     &quot;etag&quot;: &quot;A String&quot;, # Output only. A hash of this resource.