Merge "Marks the bucket as invalid when it reaches the guard rail limit."
diff --git a/cmds/statsd/src/metrics/ValueMetricProducer.cpp b/cmds/statsd/src/metrics/ValueMetricProducer.cpp
index ac6c27a..851ae99 100644
--- a/cmds/statsd/src/metrics/ValueMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/ValueMetricProducer.cpp
@@ -450,6 +450,17 @@
}
mMatchedMetricDimensionKeys.clear();
mHasGlobalBase = true;
+
+ // If we reach the guardrail, we might have dropped some data which means the bucket is
+ // incomplete.
+ //
+ // The base also needs to be reset. If we do not have the full data, we might
+ // incorrectly compute the diff when mUseZeroDefaultBase is true since an existing key
+ // might be missing from mCurrentSlicedBucket.
+ if (hasReachedGuardRailLimit()) {
+ invalidateCurrentBucket();
+ mCurrentSlicedBucket.clear();
+ }
}
void ValueMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const {
@@ -471,6 +482,10 @@
}
}
+bool ValueMetricProducer::hasReachedGuardRailLimit() const {
+ return mCurrentSlicedBucket.size() >= mDimensionHardLimit;
+}
+
bool ValueMetricProducer::hitGuardRailLocked(const MetricDimensionKey& newKey) {
// ===========GuardRail==============
// 1. Report the tuple count if the tuple count > soft limit
@@ -481,7 +496,7 @@
size_t newTupleCount = mCurrentSlicedBucket.size() + 1;
StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount);
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
- if (newTupleCount > mDimensionHardLimit) {
+ if (hasReachedGuardRailLimit()) {
ALOGE("ValueMetric %lld dropping data for dimension key %s", (long long)mMetricId,
newKey.toString().c_str());
StatsdStats::getInstance().noteHardDimensionLimitReached(mMetricId);
diff --git a/cmds/statsd/src/metrics/ValueMetricProducer.h b/cmds/statsd/src/metrics/ValueMetricProducer.h
index d1c2315..f26ad85 100644
--- a/cmds/statsd/src/metrics/ValueMetricProducer.h
+++ b/cmds/statsd/src/metrics/ValueMetricProducer.h
@@ -158,6 +158,7 @@
// Util function to check whether the specified dimension hits the guardrail.
bool hitGuardRailLocked(const MetricDimensionKey& newKey);
+ bool hasReachedGuardRailLimit() const;
bool hitFullBucketGuardRailLocked(const MetricDimensionKey& newKey);
@@ -244,6 +245,7 @@
FRIEND_TEST(ValueMetricProducerTest, TestResetBaseOnPullFailAfterConditionChange_EndOfBucket);
FRIEND_TEST(ValueMetricProducerTest, TestResetBaseOnPullTooLate);
FRIEND_TEST(ValueMetricProducerTest, TestInvalidBucketWhenOneConditionFailed);
+ FRIEND_TEST(ValueMetricProducerTest, TestInvalidBucketWhenGuardRailHit);
FRIEND_TEST(ValueMetricProducerTest, TestInvalidBucketWhenInitialPullFailed);
FRIEND_TEST(ValueMetricProducerTest, TestInvalidBucketWhenLastPullFailed);
FRIEND_TEST(ValueMetricProducerTest, TestResetBaseOnPullDelayExceeded);
diff --git a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
index ae3cdbc..572b199 100644
--- a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
@@ -2390,6 +2390,51 @@
EXPECT_EQ(true, valueProducer.mHasGlobalBase);
}
+TEST(ValueMetricProducerTest, TestInvalidBucketWhenGuardRailHit) {
+ ValueMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.mutable_value_field()->set_field(tagId);
+ metric.mutable_value_field()->add_child()->set_field(2);
+ metric.mutable_dimensions_in_what()->set_field(tagId);
+ metric.mutable_dimensions_in_what()->add_child()->set_field(1);
+ metric.set_condition(StringToId("SCREEN_ON"));
+ metric.set_max_pull_delay_sec(INT_MAX);
+
+ UidMap uidMap;
+ SimpleAtomMatcher atomMatcher;
+ atomMatcher.set_atom_id(tagId);
+ sp<EventMatcherWizard> eventMatcherWizard =
+ new EventMatcherWizard({new SimpleLogMatchingTracker(
+ atomMatcherId, logEventMatcherIndex, atomMatcher, uidMap)});
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+ EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillRepeatedly(Return());
+
+ EXPECT_CALL(*pullerManager, Pull(tagId, _))
+ // First onConditionChanged
+ .WillOnce(Invoke([](int tagId, vector<std::shared_ptr<LogEvent>>* data) {
+ for (int i = 0; i < 2000; i++) {
+ shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucket2StartTimeNs + 1);
+ event->write(i);
+ event->write(i);
+ event->init();
+ data->push_back(event);
+ }
+ return true;
+ }));
+
+ ValueMetricProducer valueProducer(kConfigKey, metric, 1, wizard, logEventMatcherIndex,
+ eventMatcherWizard, tagId, bucketStartTimeNs,
+ bucketStartTimeNs, pullerManager);
+
+ valueProducer.mCondition = false;
+ valueProducer.onConditionChanged(true, bucket2StartTimeNs + 2);
+ EXPECT_EQ(true, valueProducer.mCurrentBucketIsInvalid);
+ EXPECT_EQ(0UL, valueProducer.mCurrentSlicedBucket.size());
+}
+
TEST(ValueMetricProducerTest, TestInvalidBucketWhenInitialPullFailed) {
ValueMetric metric;
metric.set_id(metricId);