docs: update generated docs (#981)
diff --git a/docs/dyn/fitness_v1.users.dataset.html b/docs/dyn/fitness_v1.users.dataset.html
index ac5a47c..f31089f 100644
--- a/docs/dyn/fitness_v1.users.dataset.html
+++ b/docs/dyn/fitness_v1.users.dataset.html
@@ -91,21 +91,32 @@
The object takes the form of:
{ # Next id: 10
- "endTimeMillis": "A String", # The end of a window of time. Data that intersects with this time
- # window will be aggregated. The time is in milliseconds since epoch,
- # inclusive.
+ "bucketByTime": { # Specifies that data be aggregated by a single time interval. Mutually
+ # exclusive of other bucketing specifications.
+ "period": {
+ "type": "A String",
+ "value": 42,
+ "timeZoneId": "A String", # org.joda.timezone.DateTimeZone
+ },
+ "durationMillis": "A String", # Specifies that result buckets aggregate data by exactly durationMillis time
+ # frames. Time frames that contain no data will be included in the response
+ # with an empty dataset.
+ },
"filteredDataQualityStandard": [ # DO NOT POPULATE THIS FIELD. It is ignored.
"A String",
],
- "bucketBySession": { # Specifies that data be aggregated by user sessions. Data that does not fall
- # within the time range of a session will not be included in the response.
- # Mutually exclusive of other bucketing specifications.
- "minDurationMillis": "A String", # Specifies that only sessions of duration longer than minDurationMillis are
- # considered and used as a container for aggregated data.
+ "bucketByActivityType": { # Specifies that data be aggregated by the type of activity being performed
+ # when the data was recorded. All data that was recorded during a certain
+ # activity type (.for the given time range) will be aggregated into the same
+ # bucket. Data that was recorded while the user was not active will not be
+ # included in the response. Mutually exclusive of other bucketing
+ # specifications.
+ "activityDataSourceId": "A String", # The default activity stream will be used if a specific activityDataSourceId
+ # is not specified.
+ "minDurationMillis": "A String", # Specifies that only activity segments of duration longer than
+ # minDurationMillis are considered and used as a container for aggregated
+ # data.
},
- "startTimeMillis": "A String", # The start of a window of time. Data that intersects with this time
- # window will be aggregated. The time is in milliseconds since epoch,
- # inclusive.
"aggregateBy": [ # The specification of data to be aggregated. At least one aggregateBy spec
# must be provided. All data that is specified will be aggregated using the
# same bucketing criteria. There will be one dataset in the response for
@@ -126,29 +137,18 @@
# dataSourceId, not both.
},
],
- "bucketByActivityType": { # Specifies that data be aggregated by the type of activity being performed
- # when the data was recorded. All data that was recorded during a certain
- # activity type (for the given time range) will be aggregated into the same
- # bucket. Data that was recorded while the user was not active will not be
- # included in the response. Mutually exclusive of other bucketing
- # specifications.
- "activityDataSourceId": "A String", # The default activity stream will be used if a specific activityDataSourceId
- # is not specified.
- "minDurationMillis": "A String", # Specifies that only activity segments of duration longer than
- # minDurationMillis are considered and used as a container for aggregated
- # data.
+ "bucketBySession": { # Specifies that data be aggregated by user sessions. Data that does not fall
+ # within the time range of a session will not be included in the response.
+ # Mutually exclusive of other bucketing specifications.
+ "minDurationMillis": "A String", # Specifies that only sessions of duration longer than minDurationMillis are
+ # considered and used as a container for aggregated data.
},
- "bucketByTime": { # Specifies that data be aggregated by a single time interval. Mutually
- # exclusive of other bucketing specifications.
- "durationMillis": "A String", # Specifies that result buckets aggregate data by exactly durationMillis time
- # frames. Time frames that contain no data will be included in the response
- # with an empty dataset.
- "period": {
- "type": "A String",
- "timeZoneId": "A String", # org.joda.timezone.DateTimeZone
- "value": 42,
- },
- },
+ "endTimeMillis": "A String", # The end of a window of time. Data that intersects with this time
+ # window will be aggregated. The time is in milliseconds since epoch,
+ # inclusive.
+ "startTimeMillis": "A String", # The start of a window of time. Data that intersects with this time
+ # window will be aggregated. The time is in milliseconds since epoch,
+ # inclusive.
"bucketByActivitySegment": { # Specifies that data be aggregated each activity segment recored for a user.
# Similar to bucketByActivitySegment, but bucketing is done for each activity
# segment rather than all segments of the same type. Mutually exclusive of
@@ -172,125 +172,125 @@
{
"bucket": [ # A list of buckets containing the aggregated data.
{
- "startTimeMillis": "A String", # The start time for the aggregated data, in milliseconds since epoch,
- # inclusive.
"session": { # Sessions contain metadata, such as a user-friendly name and time interval # Available for Bucket.Type.SESSION
# information.
"id": "A String", # A client-generated identifier that is unique across all sessions owned by
# this particular user.
+ "activityType": 42, # The type of activity this session represents.
"endTimeMillis": "A String", # An end time, in milliseconds since epoch, inclusive.
+ "description": "A String", # A description for this session.
+ "startTimeMillis": "A String", # A start time, in milliseconds since epoch, inclusive.
+ "name": "A String", # A human readable name of the session.
"application": { # The application that created the session.
"packageName": "A String", # Package name for this application. This is used as a unique
# identifier when created by Android applications, but cannot be specified
# by REST clients. REST clients will have their developer project number
# reflected into the Data Source data stream IDs, instead of the packageName.
+ "detailsUrl": "A String", # An optional URI that can be used to link back to the application.
+ "version": "A String", # Version of the application. You should update this field whenever the
+ # application changes in a way that affects the computation of the data.
"name": "A String", # The name of this application. This is required for REST clients, but we
# do not enforce uniqueness of this name. It is provided as a matter of
# convenience for other developers who would like to identify which REST
# created an Application or Data Source.
- "detailsUrl": "A String", # An optional URI that can be used to link back to the application.
- "version": "A String", # Version of the application. You should update this field whenever the
- # application changes in a way that affects the computation of the data.
},
- "description": "A String", # A description for this session.
- "startTimeMillis": "A String", # A start time, in milliseconds since epoch, inclusive.
- "activityType": 42, # The type of activity this session represents.
- "modifiedTimeMillis": "A String", # A timestamp that indicates when the session was last modified.
- "name": "A String", # A human readable name of the session.
"activeTimeMillis": "A String", # Session active time. While start_time_millis and end_time_millis define
# the full session time, the active time can be shorter and specified by
# active_time_millis.
# If the inactive time during the session is known, it should also be
# inserted via a com.google.activity.segment data point with a STILL
# activity value
+ "modifiedTimeMillis": "A String", # A timestamp that indicates when the session was last modified.
},
"activity": 42, # Available for Bucket.Type.ACTIVITY_TYPE, Bucket.Type.ACTIVITY_SEGMENT
- "dataset": [ # There will be one dataset per AggregateBy in the request.
- { # A dataset represents a projection container for data points. They do not
- # carry any info of their own. Datasets represent a set of data points from a
- # particular data source. A data point can be found in more than one dataset.
- "point": [ # A partial list of data points contained in the dataset, ordered by largest
- # endTimeNanos first. This list is considered complete when retrieving a
- # small dataset and partial when patching a dataset or retrieving a dataset
- # that is too large to include in a single response.
- { # Represents a single data point, generated by a particular data source. A
- # data point holds a value for each field, an end timestamp and an optional
- # start time. The exact semantics of each of these attributes are specified in
- # the documentation for the particular data type.
- #
- # A data point can represent an instantaneous measurement, reading or input
- # observation, as well as averages or aggregates over a time interval. Check
- # the data type documentation to determine which is the case for a particular
- # data type.
- #
- # Data points always contain one value for each field of the data type.
- "modifiedTimeMillis": "A String", # Indicates the last time this data point was modified. Useful only in
- # contexts where we are listing the data changes, rather than representing
- # the current state of the data.
- "originDataSourceId": "A String", # If the data point is contained in a dataset for a derived data source,
- # this field will be populated with the data source stream ID that created
- # the data point originally.
- #
- # WARNING: do not rely on this field for anything other than debugging. The
- # value of this field, if it is set at all, is an implementation detail and
- # is not guaranteed to remain consistent.
- "rawTimestampNanos": "A String", # The raw timestamp from the original SensorEvent.
- "dataTypeName": "A String", # The data type defining the format of the values in this data point.
- "startTimeNanos": "A String", # The start time of the interval represented by this data point, in
- # nanoseconds since epoch.
- "value": [ # Values of each data type field for the data point. It is expected that each
- # value corresponding to a data type field will occur in the same order that
- # the field is listed with in the data type specified in a data source.
- #
- # Only one of integer and floating point fields will be populated, depending
- # on the format enum value within data source's type field.
- { # Holder object for the value of a single field in a data point.
- #
- # A field value has a particular format and is only ever set to one of an
- # integer or a floating point value.
- "fpVal": 3.14, # Floating point value. When this is set, other values must not be set.
- "mapVal": [ # Map value. The valid key space and units for the corresponding value
- # of each entry should be documented as part of the data type definition.
- # Keys should be kept small whenever possible. Data streams with large keys
- # and high data frequency may be down sampled.
- {
- "value": { # Holder object for the value of an entry in a map field of a data point.
- #
- # A map value supports a subset of the formats that the regular Value supports.
- "fpVal": 3.14, # Floating point value.
- },
- "key": "A String",
- },
- ],
- "intVal": 42, # Integer value. When this is set, other values must not be set.
- "stringVal": "A String", # String value. When this is set, other values must not be set.
- # Strings should be kept small whenever possible. Data streams with large
- # string values and high data frequency may be down sampled.
- },
- ],
- "computationTimeMillis": "A String", # DO NOT USE THIS FIELD. It is ignored, and not stored.
- "endTimeNanos": "A String", # The end time of the interval represented by this data point, in
- # nanoseconds since epoch.
- },
- ],
- "minStartTimeNs": "A String", # The smallest start time of all data points in this possibly partial
- # representation of the dataset. Time is in nanoseconds from epoch. This
- # should also match the first part of the dataset identifier.
- "dataSourceId": "A String", # The data stream ID of the data source that created the points in this
- # dataset.
- "nextPageToken": "A String", # This token will be set when a dataset is received in response to a GET
- # request and the dataset is too large to be included in a single response.
- # Provide this value in a subsequent GET request to return the next page of
- # data points within this dataset.
- "maxEndTimeNs": "A String", # The largest end time of all data points in this possibly partial
- # representation of the dataset. Time is in nanoseconds from epoch. This
- # should also match the second part of the dataset identifier.
- },
- ],
- "type": "A String", # The type of a bucket signifies how the data aggregation is performed in the
- # bucket.
"endTimeMillis": "A String", # The end time for the aggregated data, in milliseconds since epoch,
# inclusive.
+ "dataset": [ # There will be one dataset per AggregateBy in the request.
+ { # A dataset represents a projection container for data points. They do not
+ # carry any info of their own. Datasets represent a set of data points from a
+ # particular data source. A data point can be found in more than one dataset.
+ "dataSourceId": "A String", # The data stream ID of the data source that created the points in this
+ # dataset.
+ "maxEndTimeNs": "A String", # The largest end time of all data points in this possibly partial
+ # representation of the dataset. Time is in nanoseconds from epoch. This
+ # should also match the second part of the dataset identifier.
+ "minStartTimeNs": "A String", # The smallest start time of all data points in this possibly partial
+ # representation of the dataset. Time is in nanoseconds from epoch. This
+ # should also match the first part of the dataset identifier.
+ "point": [ # A partial list of data points contained in the dataset, ordered by largest
+ # endTimeNanos first. This list is considered complete when retrieving a
+ # small dataset and partial when patching a dataset or retrieving a dataset
+ # that is too large to include in a single response.
+ { # Represents a single data point, generated by a particular data source. A
+ # data point holds a value for each field, an end timestamp and an optional
+ # start time. The exact semantics of each of these attributes are specified in
+ # the documentation for the particular data type.
+ #
+ # A data point can represent an instantaneous measurement, reading or input
+ # observation, as well as averages or aggregates over a time interval. Check
+ # the data type documentation to determine which is the case for a particular
+ # data type.
+ #
+ # Data points always contain one value for each field of the data type.
+ "computationTimeMillis": "A String", # DO NOT USE THIS FIELD. It is ignored, and not stored.
+ "dataTypeName": "A String", # The data type defining the format of the values in this data point.
+ "endTimeNanos": "A String", # The end time of the interval represented by this data point, in
+ # nanoseconds since epoch.
+ "startTimeNanos": "A String", # The start time of the interval represented by this data point, in
+ # nanoseconds since epoch.
+ "originDataSourceId": "A String", # If the data point is contained in a dataset for a derived data source,
+ # this field will be populated with the data source stream ID that created
+ # the data point originally.
+ #
+ # WARNING: do not rely on this field for anything other than debugging. The
+ # value of this field, if it is set at all, is an implementation detail and
+ # is not guaranteed to remain consistent.
+ "modifiedTimeMillis": "A String", # Indicates the last time this data point was modified. Useful only in
+ # contexts where we are listing the data changes, rather than representing
+ # the current state of the data.
+ "rawTimestampNanos": "A String", # The raw timestamp from the original SensorEvent.
+ "value": [ # Values of each data type field for the data point. It is expected that each
+ # value corresponding to a data type field will occur in the same order that
+ # the field is listed with in the data type specified in a data source.
+ #
+ # Only one of integer and floating point fields will be populated, depending
+ # on the format enum value within data source's type field.
+ { # Holder object for the value of a single field in a data point.
+ #
+ # A field value has a particular format and is only ever set to one of an
+ # integer or a floating point value.
+ "stringVal": "A String", # String value. When this is set, other values must not be set.
+ # Strings should be kept small whenever possible. Data streams with large
+ # string values and high data frequency may be down sampled.
+ "mapVal": [ # Map value. The valid key space and units for the corresponding value
+ # of each entry should be documented as part of the data type definition.
+ # Keys should be kept small whenever possible. Data streams with large keys
+ # and high data frequency may be down sampled.
+ {
+ "value": { # Holder object for the value of an entry in a map field of a data point.
+ #
+ # A map value supports a subset of the formats that the regular Value supports.
+ "fpVal": 3.14, # Floating point value.
+ },
+ "key": "A String",
+ },
+ ],
+ "fpVal": 3.14, # Floating point value. When this is set, other values must not be set.
+ "intVal": 42, # Integer value. When this is set, other values must not be set.
+ },
+ ],
+ },
+ ],
+ "nextPageToken": "A String", # This token will be set when a dataset is received in response to a GET
+ # request and the dataset is too large to be included in a single response.
+ # Provide this value in a subsequent GET request to return the next page of
+ # data points within this dataset.
+ },
+ ],
+ "startTimeMillis": "A String", # The start time for the aggregated data, in milliseconds since epoch,
+ # inclusive.
+ "type": "A String", # The type of a bucket signifies how the data aggregation is performed in the
+ # bucket.
},
],
}</pre>