chore: Update discovery artifacts (#1291)

* chore: update docs/dyn/index.md
* chore(abusiveexperiencereport): update the api
* chore(acceleratedmobilepageurl): update the api
* chore(accessapproval): update the api
* chore(accesscontextmanager): update the api
* chore(adexchangebuyer2): update the api
* chore(adexperiencereport): update the api
* chore(admob): update the api
* chore(analytics): update the api
* chore(analyticsreporting): update the api
* chore(androiddeviceprovisioning): update the api
* chore(androidenterprise): update the api
* chore(androidpublisher): update the api
* chore(apigateway): update the api
* chore(artifactregistry): update the api
* chore(bigqueryconnection): update the api
* chore(bigquerydatatransfer): update the api
* chore(billingbudgets): update the api
* chore(binaryauthorization): update the api
* chore(blogger): update the api
* chore(books): update the api
* chore(calendar): update the api
* chore(chat): update the api
* chore(chromeuxreport): update the api
* chore(civicinfo): update the api
* chore(classroom): update the api
* chore(cloudbilling): update the api
* chore(cloudbuild): update the api
* chore(clouddebugger): update the api
* chore(clouderrorreporting): update the api
* chore(cloudfunctions): update the api
* chore(cloudidentity): update the api
* chore(cloudiot): update the api
* chore(cloudkms): update the api
* chore(cloudprofiler): update the api
* chore(cloudresourcemanager): update the api
* chore(cloudscheduler): update the api
* chore(cloudshell): update the api
* chore(cloudtasks): update the api
* chore(cloudtrace): update the api
* chore(composer): update the api
* chore(containeranalysis): update the api
* chore(content): update the api
* chore(customsearch): update the api
* chore(datacatalog): update the api
* chore(datafusion): update the api
* chore(datamigration): update the api
* chore(datastore): update the api
* chore(deploymentmanager): update the api
* chore(digitalassetlinks): update the api
* chore(displayvideo): update the api
* chore(dlp): update the api
* chore(dns): update the api
* chore(docs): update the api
* chore(domains): update the api
* chore(domainsrdap): update the api
* chore(doubleclickbidmanager): update the api
* chore(doubleclicksearch): update the api
* chore(drive): update the api
* chore(driveactivity): update the api
* chore(eventarc): update the api
* chore(factchecktools): update the api
* chore(fcm): update the api
* chore(file): update the api
* chore(firebase): update the api
* chore(firebasedatabase): update the api
* chore(firebasedynamiclinks): update the api
* chore(firebasehosting): update the api
* chore(firebaseml): update the api
* chore(firebaserules): update the api
* chore(firestore): update the api
* chore(fitness): update the api
* chore(gamesConfiguration): update the api
* chore(gamesManagement): update the api
* chore(gameservices): update the api
* chore(genomics): update the api
* chore(gmail): update the api
* chore(gmailpostmastertools): update the api
* chore(groupsmigration): update the api
* chore(groupssettings): update the api
* chore(healthcare): update the api
* chore(iam): update the api
* chore(iamcredentials): update the api
* chore(iap): update the api
* chore(identitytoolkit): update the api
* chore(indexing): update the api
* chore(jobs): update the api
* chore(kgsearch): update the api
* chore(language): update the api
* chore(libraryagent): update the api
* chore(licensing): update the api
* chore(lifesciences): update the api
* chore(logging): update the api
* chore(managedidentities): update the api
* chore(manufacturers): update the api
* chore(memcache): update the api
* chore(ml): update the api
* chore(monitoring): update the api
* chore(networkmanagement): update the api
* chore(osconfig): update the api
* chore(pagespeedonline): update the api
* chore(playablelocations): update the api
* chore(playcustomapp): update the api
* chore(policytroubleshooter): update the api
* chore(poly): update the api
* chore(privateca): update the api
* chore(prod_tt_sasportal): update the api
* chore(pubsub): update the api
* chore(pubsublite): update the api
* chore(realtimebidding): update the api
* chore(recommendationengine): update the api
* chore(recommender): update the api
* chore(redis): update the api
* chore(remotebuildexecution): update the api
* chore(reseller): update the api
* chore(runtimeconfig): update the api
* chore(safebrowsing): update the api
* chore(sasportal): update the api
* chore(script): update the api
* chore(searchconsole): update the api
* chore(secretmanager): update the api
* chore(servicecontrol): update the api
* chore(servicedirectory): update the api
* chore(siteVerification): update the api
* chore(slides): update the api
* chore(smartdevicemanagement): update the api
* chore(sourcerepo): update the api
* chore(sqladmin): update the api
* chore(storage): update the api
* chore(storagetransfer): update the api
* chore(streetviewpublish): update the api
* chore(sts): update the api
* chore(tagmanager): update the api
* chore(tasks): update the api
* chore(testing): update the api
* chore(texttospeech): update the api
* chore(toolresults): update the api
* chore(trafficdirector): update the api
* chore(transcoder): update the api
* chore(translate): update the api
* chore(vault): update the api
* chore(vectortile): update the api
* chore(verifiedaccess): update the api
* chore(videointelligence): update the api
* chore(vision): update the api
* chore(webfonts): update the api
* chore(webmasters): update the api
* chore(websecurityscanner): update the api
* chore(workflowexecutions): update the api
* chore(workflows): update the api
* chore(youtubeAnalytics): update the api
* chore(youtubereporting): update the api
* chore(docs): Add new discovery artifacts and reference documents
diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html
index 3afc37d..6a2e1b3 100644
--- a/docs/dyn/bigquery_v2.models.html
+++ b/docs/dyn/bigquery_v2.models.html
@@ -201,6 +201,9 @@
                 "variance": 3.14, # Variance.
               },
               "hasDrift": True or False, # Is arima model fitted with drift or not. It is always false when d is not 1.
+              "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+              "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+              "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
               "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                 "d": "A String", # Order of the differencing part.
                 "p": "A String", # Order of the autoregressive part.
@@ -210,6 +213,9 @@
                 "A String",
               ],
               "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+              "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                "A String",
+              ],
             },
           ],
           "hasDrift": [ # Whether Arima model fitted with drift or not. It is always false when d is not 1.
@@ -279,9 +285,6 @@
           "daviesBouldinIndex": 3.14, # Davies-Bouldin index.
           "meanSquaredDistance": 3.14, # Mean of squared distances between each sample to its cluster centroid.
         },
-        "dimensionalityReductionMetrics": { # Model evaluation metrics for dimensionality reduction models. # Evaluation metrics when the model is a dimensionality reduction model, which currently includes PCA.
-          "totalExplainedVarianceRatio": 3.14, # Total percentage of variance explained by the selected principal components.
-        },
         "multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
           "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class models, the metrics are either macro-averaged or micro-averaged. When macro-averaged, the metrics are calculated for each label and then an unweighted average is taken of those values. When micro-averaged, the metric is calculated globally by counting the total number of correctly predicted rows. # Aggregate classification metrics.
             "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For multiclass this is a micro-averaged metric.
@@ -354,6 +357,9 @@
                   "variance": 3.14, # Variance.
                 },
                 "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false when d is not 1.
+                "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+                "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+                "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
                 "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                   "d": "A String", # Order of the differencing part.
                   "p": "A String", # Order of the autoregressive part.
@@ -363,6 +369,9 @@
                   "A String",
                 ],
                 "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+                "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                  "A String",
+                ],
               },
             ],
             "seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported for one time series.
@@ -380,26 +389,21 @@
           "evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
           "index": 42, # Index of the iteration, 0 based.
           "learnRate": 3.14, # Learn rate used for this iteration.
-          "principalComponentInfos": [ # The information of the principal components.
-            { # Principal component infos, used only for eigen decomposition based models, e.g., PCA. Ordered by explained_variance in the descending order.
-              "cumulativeExplainedVarianceRatio": 3.14, # The explained_variance is pre-ordered in the descending order to compute the cumulative explained variance ratio.
-              "explainedVariance": 3.14, # Explained variance by this principal component, which is simply the eigenvalue.
-              "explainedVarianceRatio": 3.14, # Explained_variance over the total explained variance.
-              "principalComponentId": "A String", # Id of the principal component.
-            },
-          ],
           "trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
         },
       ],
       "startTime": "A String", # The start time of this training run.
       "trainingOptions": { # Options used in model training. # Options that were used for this training run, includes user specified and default options that were used.
+        "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series.
         "autoArima": True or False, # Whether to enable auto ARIMA or not.
         "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q.
         "batchSize": "A String", # Batch size for dnn models.
+        "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series.
         "dataFrequency": "A String", # The data frequency of a time series.
         "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
         "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
         "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
+        "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results.
         "distanceType": "A String", # Distance type for clustering models.
         "dropout": 3.14, # Dropout probability for dnn models.
         "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms.
@@ -442,6 +446,9 @@
         "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
         "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model.
         "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training.
+        "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training.
+          "A String",
+        ],
         "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model.
         "userColumn": "A String", # User column specified for matrix factorization models.
         "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.
@@ -547,6 +554,9 @@
                     "variance": 3.14, # Variance.
                   },
                   "hasDrift": True or False, # Is arima model fitted with drift or not. It is always false when d is not 1.
+                  "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+                  "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+                  "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
                   "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                     "d": "A String", # Order of the differencing part.
                     "p": "A String", # Order of the autoregressive part.
@@ -556,6 +566,9 @@
                     "A String",
                   ],
                   "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+                  "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                    "A String",
+                  ],
                 },
               ],
               "hasDrift": [ # Whether Arima model fitted with drift or not. It is always false when d is not 1.
@@ -625,9 +638,6 @@
               "daviesBouldinIndex": 3.14, # Davies-Bouldin index.
               "meanSquaredDistance": 3.14, # Mean of squared distances between each sample to its cluster centroid.
             },
-            "dimensionalityReductionMetrics": { # Model evaluation metrics for dimensionality reduction models. # Evaluation metrics when the model is a dimensionality reduction model, which currently includes PCA.
-              "totalExplainedVarianceRatio": 3.14, # Total percentage of variance explained by the selected principal components.
-            },
             "multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
               "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class models, the metrics are either macro-averaged or micro-averaged. When macro-averaged, the metrics are calculated for each label and then an unweighted average is taken of those values. When micro-averaged, the metric is calculated globally by counting the total number of correctly predicted rows. # Aggregate classification metrics.
                 "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For multiclass this is a micro-averaged metric.
@@ -700,6 +710,9 @@
                       "variance": 3.14, # Variance.
                     },
                     "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false when d is not 1.
+                    "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+                    "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+                    "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
                     "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                       "d": "A String", # Order of the differencing part.
                       "p": "A String", # Order of the autoregressive part.
@@ -709,6 +722,9 @@
                       "A String",
                     ],
                     "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+                    "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                      "A String",
+                    ],
                   },
                 ],
                 "seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported for one time series.
@@ -726,26 +742,21 @@
               "evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
               "index": 42, # Index of the iteration, 0 based.
               "learnRate": 3.14, # Learn rate used for this iteration.
-              "principalComponentInfos": [ # The information of the principal components.
-                { # Principal component infos, used only for eigen decomposition based models, e.g., PCA. Ordered by explained_variance in the descending order.
-                  "cumulativeExplainedVarianceRatio": 3.14, # The explained_variance is pre-ordered in the descending order to compute the cumulative explained variance ratio.
-                  "explainedVariance": 3.14, # Explained variance by this principal component, which is simply the eigenvalue.
-                  "explainedVarianceRatio": 3.14, # Explained_variance over the total explained variance.
-                  "principalComponentId": "A String", # Id of the principal component.
-                },
-              ],
               "trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
             },
           ],
           "startTime": "A String", # The start time of this training run.
           "trainingOptions": { # Options used in model training. # Options that were used for this training run, includes user specified and default options that were used.
+            "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series.
             "autoArima": True or False, # Whether to enable auto ARIMA or not.
             "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q.
             "batchSize": "A String", # Batch size for dnn models.
+            "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series.
             "dataFrequency": "A String", # The data frequency of a time series.
             "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
             "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
             "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
+            "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results.
             "distanceType": "A String", # Distance type for clustering models.
             "dropout": 3.14, # Dropout probability for dnn models.
             "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms.
@@ -788,6 +799,9 @@
             "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
             "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model.
             "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training.
+            "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training.
+              "A String",
+            ],
             "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model.
             "userColumn": "A String", # User column specified for matrix factorization models.
             "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.
@@ -906,6 +920,9 @@
                 "variance": 3.14, # Variance.
               },
               "hasDrift": True or False, # Is arima model fitted with drift or not. It is always false when d is not 1.
+              "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+              "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+              "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
               "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                 "d": "A String", # Order of the differencing part.
                 "p": "A String", # Order of the autoregressive part.
@@ -915,6 +932,9 @@
                 "A String",
               ],
               "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+              "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                "A String",
+              ],
             },
           ],
           "hasDrift": [ # Whether Arima model fitted with drift or not. It is always false when d is not 1.
@@ -984,9 +1004,6 @@
           "daviesBouldinIndex": 3.14, # Davies-Bouldin index.
           "meanSquaredDistance": 3.14, # Mean of squared distances between each sample to its cluster centroid.
         },
-        "dimensionalityReductionMetrics": { # Model evaluation metrics for dimensionality reduction models. # Evaluation metrics when the model is a dimensionality reduction model, which currently includes PCA.
-          "totalExplainedVarianceRatio": 3.14, # Total percentage of variance explained by the selected principal components.
-        },
         "multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
           "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class models, the metrics are either macro-averaged or micro-averaged. When macro-averaged, the metrics are calculated for each label and then an unweighted average is taken of those values. When micro-averaged, the metric is calculated globally by counting the total number of correctly predicted rows. # Aggregate classification metrics.
             "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For multiclass this is a micro-averaged metric.
@@ -1059,6 +1076,9 @@
                   "variance": 3.14, # Variance.
                 },
                 "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false when d is not 1.
+                "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+                "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+                "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
                 "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                   "d": "A String", # Order of the differencing part.
                   "p": "A String", # Order of the autoregressive part.
@@ -1068,6 +1088,9 @@
                   "A String",
                 ],
                 "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+                "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                  "A String",
+                ],
               },
             ],
             "seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported for one time series.
@@ -1085,26 +1108,21 @@
           "evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
           "index": 42, # Index of the iteration, 0 based.
           "learnRate": 3.14, # Learn rate used for this iteration.
-          "principalComponentInfos": [ # The information of the principal components.
-            { # Principal component infos, used only for eigen decomposition based models, e.g., PCA. Ordered by explained_variance in the descending order.
-              "cumulativeExplainedVarianceRatio": 3.14, # The explained_variance is pre-ordered in the descending order to compute the cumulative explained variance ratio.
-              "explainedVariance": 3.14, # Explained variance by this principal component, which is simply the eigenvalue.
-              "explainedVarianceRatio": 3.14, # Explained_variance over the total explained variance.
-              "principalComponentId": "A String", # Id of the principal component.
-            },
-          ],
           "trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
         },
       ],
       "startTime": "A String", # The start time of this training run.
       "trainingOptions": { # Options used in model training. # Options that were used for this training run, includes user specified and default options that were used.
+        "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series.
         "autoArima": True or False, # Whether to enable auto ARIMA or not.
         "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q.
         "batchSize": "A String", # Batch size for dnn models.
+        "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series.
         "dataFrequency": "A String", # The data frequency of a time series.
         "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
         "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
         "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
+        "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results.
         "distanceType": "A String", # Distance type for clustering models.
         "dropout": 3.14, # Dropout probability for dnn models.
         "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms.
@@ -1147,6 +1165,9 @@
         "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
         "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model.
         "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training.
+        "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training.
+          "A String",
+        ],
         "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model.
         "userColumn": "A String", # User column specified for matrix factorization models.
         "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.
@@ -1240,6 +1261,9 @@
                 "variance": 3.14, # Variance.
               },
               "hasDrift": True or False, # Is arima model fitted with drift or not. It is always false when d is not 1.
+              "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+              "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+              "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
               "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                 "d": "A String", # Order of the differencing part.
                 "p": "A String", # Order of the autoregressive part.
@@ -1249,6 +1273,9 @@
                 "A String",
               ],
               "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+              "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                "A String",
+              ],
             },
           ],
           "hasDrift": [ # Whether Arima model fitted with drift or not. It is always false when d is not 1.
@@ -1318,9 +1345,6 @@
           "daviesBouldinIndex": 3.14, # Davies-Bouldin index.
           "meanSquaredDistance": 3.14, # Mean of squared distances between each sample to its cluster centroid.
         },
-        "dimensionalityReductionMetrics": { # Model evaluation metrics for dimensionality reduction models. # Evaluation metrics when the model is a dimensionality reduction model, which currently includes PCA.
-          "totalExplainedVarianceRatio": 3.14, # Total percentage of variance explained by the selected principal components.
-        },
         "multiClassClassificationMetrics": { # Evaluation metrics for multi-class classification/classifier models. # Populated for multi-class classification/classifier models.
           "aggregateClassificationMetrics": { # Aggregate metrics for classification/classifier models. For multi-class models, the metrics are either macro-averaged or micro-averaged. When macro-averaged, the metrics are calculated for each label and then an unweighted average is taken of those values. When micro-averaged, the metric is calculated globally by counting the total number of correctly predicted rows. # Aggregate classification metrics.
             "accuracy": 3.14, # Accuracy is the fraction of predictions given the correct label. For multiclass this is a micro-averaged metric.
@@ -1393,6 +1417,9 @@
                   "variance": 3.14, # Variance.
                 },
                 "hasDrift": True or False, # Whether Arima model fitted with drift or not. It is always false when d is not 1.
+                "hasHolidayEffect": True or False, # If true, holiday_effect is a part of time series decomposition result.
+                "hasSpikesAndDips": True or False, # If true, spikes_and_dips is a part of time series decomposition result.
+                "hasStepChanges": True or False, # If true, step_changes is a part of time series decomposition result.
                 "nonSeasonalOrder": { # Arima order, can be used for both non-seasonal and seasonal parts. # Non-seasonal order.
                   "d": "A String", # Order of the differencing part.
                   "p": "A String", # Order of the autoregressive part.
@@ -1402,6 +1429,9 @@
                   "A String",
                 ],
                 "timeSeriesId": "A String", # The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used.
+                "timeSeriesIds": [ # The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns.
+                  "A String",
+                ],
               },
             ],
             "seasonalPeriods": [ # Seasonal periods. Repeated because multiple periods are supported for one time series.
@@ -1419,26 +1449,21 @@
           "evalLoss": 3.14, # Loss computed on the eval data at the end of iteration.
           "index": 42, # Index of the iteration, 0 based.
           "learnRate": 3.14, # Learn rate used for this iteration.
-          "principalComponentInfos": [ # The information of the principal components.
-            { # Principal component infos, used only for eigen decomposition based models, e.g., PCA. Ordered by explained_variance in the descending order.
-              "cumulativeExplainedVarianceRatio": 3.14, # The explained_variance is pre-ordered in the descending order to compute the cumulative explained variance ratio.
-              "explainedVariance": 3.14, # Explained variance by this principal component, which is simply the eigenvalue.
-              "explainedVarianceRatio": 3.14, # Explained_variance over the total explained variance.
-              "principalComponentId": "A String", # Id of the principal component.
-            },
-          ],
           "trainingLoss": 3.14, # Loss computed on the training data at the end of iteration.
         },
       ],
       "startTime": "A String", # The start time of this training run.
       "trainingOptions": { # Options used in model training. # Options that were used for this training run, includes user specified and default options that were used.
+        "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series.
         "autoArima": True or False, # Whether to enable auto ARIMA or not.
         "autoArimaMaxOrder": "A String", # The max value of non-seasonal p and q.
         "batchSize": "A String", # Batch size for dnn models.
+        "cleanSpikesAndDips": True or False, # If true, clean spikes and dips in the input time series.
         "dataFrequency": "A String", # The data frequency of a time series.
         "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
         "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
         "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM.
+        "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results.
         "distanceType": "A String", # Distance type for clustering models.
         "dropout": 3.14, # Dropout probability for dnn models.
         "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms.
@@ -1481,6 +1506,9 @@
         "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
         "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model.
         "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training.
+        "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training.
+          "A String",
+        ],
         "timeSeriesTimestampColumn": "A String", # Column to be designated as time series timestamp for ARIMA model.
         "userColumn": "A String", # User column specified for matrix factorization models.
         "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified.