Merge "Change enum MustExist in CertUtils to IntDef integers"
diff --git a/api/current.txt b/api/current.txt
index 1ffba87..3c3cfcc 100644
--- a/api/current.txt
+++ b/api/current.txt
@@ -16296,7 +16296,7 @@
 
   public final class TotalCaptureResult extends android.hardware.camera2.CaptureResult {
     method public java.util.List<android.hardware.camera2.CaptureResult> getPartialResults();
-    method public <T> T getPhysicalCameraKey(android.hardware.camera2.CaptureResult.Key<T>, java.lang.String);
+    method public java.util.Map<java.lang.String, android.hardware.camera2.CaptureResult> getPhysicalCameraResults();
   }
 
 }
diff --git a/api/system-current.txt b/api/system-current.txt
index 70148d9..2d3b65a 100644
--- a/api/system-current.txt
+++ b/api/system-current.txt
@@ -2516,6 +2516,7 @@
     method public int requestAudioFocus(android.media.AudioManager.OnAudioFocusChangeListener, android.media.AudioAttributes, int, int) throws java.lang.IllegalArgumentException;
     method public deprecated int requestAudioFocus(android.media.AudioManager.OnAudioFocusChangeListener, android.media.AudioAttributes, int, int, android.media.audiopolicy.AudioPolicy) throws java.lang.IllegalArgumentException;
     method public int requestAudioFocus(android.media.AudioFocusRequest, android.media.audiopolicy.AudioPolicy);
+    method public void setFocusRequestResult(android.media.AudioFocusInfo, int, android.media.audiopolicy.AudioPolicy);
     method public void unregisterAudioPolicyAsync(android.media.audiopolicy.AudioPolicy);
     field public static final int AUDIOFOCUS_FLAG_DELAY_OK = 1; // 0x1
     field public static final int AUDIOFOCUS_FLAG_LOCK = 4; // 0x4
diff --git a/cmds/statsd/src/StatsLogProcessor.h b/cmds/statsd/src/StatsLogProcessor.h
index 7642aafa..17741a8 100644
--- a/cmds/statsd/src/StatsLogProcessor.h
+++ b/cmds/statsd/src/StatsLogProcessor.h
@@ -100,7 +100,8 @@
     FRIEND_TEST(StatsLogProcessorTest, TestRateLimitBroadcast);
     FRIEND_TEST(StatsLogProcessorTest, TestDropWhenByteSizeTooLarge);
     FRIEND_TEST(StatsLogProcessorTest, TestDropWhenByteSizeTooLarge);
-    FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensions);
+    FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensionsForSumDuration);
+    FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensionsForMaxDuration);
     FRIEND_TEST(MetricConditionLinkE2eTest, TestMultiplePredicatesAndLinks);
     FRIEND_TEST(AttributionE2eTest, TestAttributionMatchAndSlice);
     FRIEND_TEST(GaugeMetricE2eTest, TestMultipleFieldsForPushedEvent);
diff --git a/cmds/statsd/src/anomaly/AnomalyTracker.cpp b/cmds/statsd/src/anomaly/AnomalyTracker.cpp
index c84a5b4..79b7d9c 100644
--- a/cmds/statsd/src/anomaly/AnomalyTracker.cpp
+++ b/cmds/statsd/src/anomaly/AnomalyTracker.cpp
@@ -35,9 +35,7 @@
 
 // TODO: Get rid of bucketNumbers, and return to the original circular array method.
 AnomalyTracker::AnomalyTracker(const Alert& alert, const ConfigKey& configKey)
-    : mAlert(alert),
-      mConfigKey(configKey),
-      mNumOfPastBuckets(mAlert.num_buckets() - 1) {
+    : mAlert(alert), mConfigKey(configKey), mNumOfPastBuckets(mAlert.num_buckets() - 1) {
     VLOG("AnomalyTracker() called");
     if (mAlert.num_buckets() <= 0) {
         ALOGE("Cannot create AnomalyTracker with %lld buckets",
diff --git a/cmds/statsd/src/metrics/CountMetricProducer.cpp b/cmds/statsd/src/metrics/CountMetricProducer.cpp
index ae4df3e..5b5b57b 100644
--- a/cmds/statsd/src/metrics/CountMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/CountMetricProducer.cpp
@@ -170,7 +170,6 @@
     protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_END_REPORT_NANOS, (long long)dumpTimeNs);
 
     mPastBuckets.clear();
-    mStartTimeNs = mCurrentBucketStartTimeNs;
 
     // TODO: Clear mDimensionKeyMap once the report is dumped.
 }
@@ -214,13 +213,11 @@
     }
 
     auto it = mCurrentSlicedCounter->find(eventKey);
-
     if (it == mCurrentSlicedCounter->end()) {
         // ===========GuardRail==============
         if (hitGuardRailLocked(eventKey)) {
             return;
         }
-
         // create a counter for the new key
         (*mCurrentSlicedCounter)[eventKey] = 1;
     } else {
@@ -228,10 +225,14 @@
         auto& count = it->second;
         count++;
     }
-
     for (auto& tracker : mAnomalyTrackers) {
+        int64_t countWholeBucket = mCurrentSlicedCounter->find(eventKey)->second;
+        auto prev = mCurrentFullCounters->find(eventKey);
+        if (prev != mCurrentFullCounters->end()) {
+            countWholeBucket += prev->second;
+        }
         tracker->detectAndDeclareAnomaly(eventTimeNs, mCurrentBucketNum, eventKey,
-                                         mCurrentSlicedCounter->find(eventKey)->second);
+                                         countWholeBucket);
     }
 
     VLOG("metric %lld %s->%lld", (long long)mMetricId, eventKey.c_str(),
@@ -241,33 +242,65 @@
 // When a new matched event comes in, we check if event falls into the current
 // bucket. If not, flush the old counter to past buckets and initialize the new bucket.
 void CountMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
-    if (eventTimeNs < mCurrentBucketStartTimeNs + mBucketSizeNs) {
+    uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+    if (eventTimeNs < currentBucketEndTimeNs) {
         return;
     }
 
+    flushCurrentBucketLocked(eventTimeNs);
+    // Setup the bucket start time and number.
+    uint64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
+    mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
+    mCurrentBucketNum += numBucketsForward;
+    VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
+         (long long)mCurrentBucketStartTimeNs);
+}
+
+void CountMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
+    uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
     CountBucket info;
     info.mBucketStartNs = mCurrentBucketStartTimeNs;
-    info.mBucketEndNs = mCurrentBucketStartTimeNs + mBucketSizeNs;
+    if (eventTimeNs < fullBucketEndTimeNs) {
+        info.mBucketEndNs = eventTimeNs;
+    } else {
+        info.mBucketEndNs = fullBucketEndTimeNs;
+    }
     info.mBucketNum = mCurrentBucketNum;
     for (const auto& counter : *mCurrentSlicedCounter) {
         info.mCount = counter.second;
         auto& bucketList = mPastBuckets[counter.first];
         bucketList.push_back(info);
-        VLOG("metric %lld, dump key value: %s -> %lld",
-            (long long)mMetricId, counter.first.c_str(), (long long)counter.second);
+        VLOG("metric %lld, dump key value: %s -> %lld", (long long)mMetricId, counter.first.c_str(),
+             (long long)counter.second);
     }
 
-    for (auto& tracker : mAnomalyTrackers) {
-        tracker->addPastBucket(mCurrentSlicedCounter, mCurrentBucketNum);
+    // If we have finished a full bucket, then send this to anomaly tracker.
+    if (eventTimeNs > fullBucketEndTimeNs) {
+        // Accumulate partial buckets with current value and then send to anomaly tracker.
+        if (mCurrentFullCounters->size() > 0) {
+            for (const auto& keyValuePair : *mCurrentSlicedCounter) {
+                (*mCurrentFullCounters)[keyValuePair.first] += keyValuePair.second;
+            }
+            for (auto& tracker : mAnomalyTrackers) {
+                tracker->addPastBucket(mCurrentFullCounters, mCurrentBucketNum);
+            }
+            mCurrentFullCounters = std::make_shared<DimToValMap>();
+        } else {
+            // Skip aggregating the partial buckets since there's no previous partial bucket.
+            for (auto& tracker : mAnomalyTrackers) {
+                tracker->addPastBucket(mCurrentSlicedCounter, mCurrentBucketNum);
+            }
+        }
+    } else {
+        // Accumulate partial bucket.
+        for (const auto& keyValuePair : *mCurrentSlicedCounter) {
+            (*mCurrentFullCounters)[keyValuePair.first] += keyValuePair.second;
+        }
     }
 
-    // Reset counters (do not clear, since the old one is still referenced in mAnomalyTrackers).
+    // Only resets the counters, but doesn't setup the times nor numbers.
+    // (Do not clear since the old one is still referenced in mAnomalyTrackers).
     mCurrentSlicedCounter = std::make_shared<DimToValMap>();
-    uint64_t numBucketsForward = (eventTimeNs - mCurrentBucketStartTimeNs) / mBucketSizeNs;
-    mCurrentBucketStartTimeNs = mCurrentBucketStartTimeNs + numBucketsForward * mBucketSizeNs;
-    mCurrentBucketNum += numBucketsForward;
-    VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
-         (long long)mCurrentBucketStartTimeNs);
 }
 
 // Rough estimate of CountMetricProducer buffer stored. This number will be
diff --git a/cmds/statsd/src/metrics/CountMetricProducer.h b/cmds/statsd/src/metrics/CountMetricProducer.h
index 8659d47..b06c77b 100644
--- a/cmds/statsd/src/metrics/CountMetricProducer.h
+++ b/cmds/statsd/src/metrics/CountMetricProducer.h
@@ -71,14 +71,20 @@
     void dumpStatesLocked(FILE* out, bool verbose) const override{};
 
     // Util function to flush the old packet.
-    void flushIfNeededLocked(const uint64_t& newEventTime);
+    void flushIfNeededLocked(const uint64_t& newEventTime) override;
+
+    void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
 
     // TODO: Add a lock to mPastBuckets.
     std::unordered_map<MetricDimensionKey, std::vector<CountBucket>> mPastBuckets;
 
-    // The current bucket.
+    // The current bucket (may be a partial bucket).
     std::shared_ptr<DimToValMap> mCurrentSlicedCounter = std::make_shared<DimToValMap>();
 
+    // The sum of previous partial buckets in the current full bucket (excluding the current
+    // partial bucket). This is only updated while flushing the current bucket.
+    std::shared_ptr<DimToValMap> mCurrentFullCounters = std::make_shared<DimToValMap>();
+
     static const size_t kBucketSize = sizeof(CountBucket{});
 
     bool hitGuardRailLocked(const MetricDimensionKey& newKey);
@@ -87,6 +93,8 @@
     FRIEND_TEST(CountMetricProducerTest, TestEventsWithNonSlicedCondition);
     FRIEND_TEST(CountMetricProducerTest, TestEventsWithSlicedCondition);
     FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced);
+    FRIEND_TEST(CountMetricProducerTest, TestEventWithAppUpgrade);
+    FRIEND_TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket);
 };
 
 }  // namespace statsd
diff --git a/cmds/statsd/src/metrics/DurationMetricProducer.cpp b/cmds/statsd/src/metrics/DurationMetricProducer.cpp
index efbdae1..2400eba1 100644
--- a/cmds/statsd/src/metrics/DurationMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/DurationMetricProducer.cpp
@@ -121,13 +121,13 @@
         case DurationMetric_AggregationType_SUM:
             return make_unique<OringDurationTracker>(
                     mConfigKey, mMetricId, eventKey, mWizard, mConditionTrackerIndex,
-                    mDimensionsInCondition, mNested,
-                    mCurrentBucketStartTimeNs, mBucketSizeNs, mConditionSliced, mAnomalyTrackers);
+                    mDimensionsInCondition, mNested, mCurrentBucketStartTimeNs, mCurrentBucketNum,
+                    mStartTimeNs, mBucketSizeNs, mConditionSliced, mAnomalyTrackers);
         case DurationMetric_AggregationType_MAX_SPARSE:
             return make_unique<MaxDurationTracker>(
                     mConfigKey, mMetricId, eventKey, mWizard, mConditionTrackerIndex,
-                    mDimensionsInCondition, mNested,
-                    mCurrentBucketStartTimeNs, mBucketSizeNs, mConditionSliced, mAnomalyTrackers);
+                    mDimensionsInCondition, mNested, mCurrentBucketStartTimeNs, mCurrentBucketNum,
+                    mStartTimeNs, mBucketSizeNs, mConditionSliced, mAnomalyTrackers);
     }
 }
 
@@ -252,17 +252,18 @@
     protoOutput->end(protoToken);
     protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_END_REPORT_NANOS, (long long)dumpTimeNs);
     mPastBuckets.clear();
-    mStartTimeNs = mCurrentBucketStartTimeNs;
 }
 
-void DurationMetricProducer::flushIfNeededLocked(const uint64_t& eventTime) {
-    if (mCurrentBucketStartTimeNs + mBucketSizeNs > eventTime) {
+void DurationMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
+    uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+
+    if (currentBucketEndTimeNs > eventTimeNs) {
         return;
     }
     VLOG("flushing...........");
     for (auto it = mCurrentSlicedDurationTrackerMap.begin();
             it != mCurrentSlicedDurationTrackerMap.end();) {
-        if (it->second->flushIfNeeded(eventTime, &mPastBuckets)) {
+        if (it->second->flushIfNeeded(eventTimeNs, &mPastBuckets)) {
             VLOG("erase bucket for key %s", it->first.c_str());
             it = mCurrentSlicedDurationTrackerMap.erase(it);
         } else {
@@ -270,11 +271,23 @@
         }
     }
 
-    int numBucketsForward = (eventTime - mCurrentBucketStartTimeNs) / mBucketSizeNs;
-    mCurrentBucketStartTimeNs += numBucketsForward * mBucketSizeNs;
+    int numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
+    mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
     mCurrentBucketNum += numBucketsForward;
 }
 
+void DurationMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
+    for (auto it = mCurrentSlicedDurationTrackerMap.begin();
+         it != mCurrentSlicedDurationTrackerMap.end();) {
+        if (it->second->flushCurrentBucket(eventTimeNs, &mPastBuckets)) {
+            VLOG("erase bucket for key %s", it->first.c_str());
+            it = mCurrentSlicedDurationTrackerMap.erase(it);
+        } else {
+            ++it;
+        }
+    }
+}
+
 void DurationMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const {
     if (mCurrentSlicedDurationTrackerMap.size() == 0) {
         return;
diff --git a/cmds/statsd/src/metrics/DurationMetricProducer.h b/cmds/statsd/src/metrics/DurationMetricProducer.h
index 152e570..a496016 100644
--- a/cmds/statsd/src/metrics/DurationMetricProducer.h
+++ b/cmds/statsd/src/metrics/DurationMetricProducer.h
@@ -73,6 +73,8 @@
     // Util function to flush the old packet.
     void flushIfNeededLocked(const uint64_t& eventTime);
 
+    void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
+
     const DurationMetric_AggregationType mAggregationType;
 
     // Index of the SimpleAtomMatcher which defines the start.
@@ -112,6 +114,10 @@
 
     FRIEND_TEST(DurationMetricTrackerTest, TestNoCondition);
     FRIEND_TEST(DurationMetricTrackerTest, TestNonSlicedCondition);
+    FRIEND_TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade);
+    FRIEND_TEST(DurationMetricTrackerTest, TestSumDurationWithUpgradeInFollowingBucket);
+    FRIEND_TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade);
+    FRIEND_TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket);
     FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicates);
 };
 
diff --git a/cmds/statsd/src/metrics/EventMetricProducer.cpp b/cmds/statsd/src/metrics/EventMetricProducer.cpp
index 820d591..a021e0a 100644
--- a/cmds/statsd/src/metrics/EventMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/EventMetricProducer.cpp
@@ -118,7 +118,6 @@
                        reinterpret_cast<char*>(buffer.get()->data()), buffer.get()->size());
 
     startNewProtoOutputStreamLocked();
-    mStartTimeNs = dumpTimeNs;
 }
 
 void EventMetricProducer::onConditionChangedLocked(const bool conditionMet,
diff --git a/cmds/statsd/src/metrics/GaugeMetricProducer.cpp b/cmds/statsd/src/metrics/GaugeMetricProducer.cpp
index d6cb189..4190f00 100644
--- a/cmds/statsd/src/metrics/GaugeMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/GaugeMetricProducer.cpp
@@ -191,10 +191,20 @@
     protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_END_REPORT_NANOS, (long long)dumpTimeNs);
 
     mPastBuckets.clear();
-    mStartTimeNs = mCurrentBucketStartTimeNs;
     // TODO: Clear mDimensionKeyMap once the report is dumped.
 }
 
+void GaugeMetricProducer::pullLocked() {
+    vector<std::shared_ptr<LogEvent>> allData;
+    if (!mStatsPullerManager->Pull(mPullTagId, &allData)) {
+        ALOGE("Stats puller failed for tag: %d", mPullTagId);
+        return;
+    }
+    for (const auto& data : allData) {
+        onMatchedLogEventLocked(0, *data);
+    }
+}
+
 void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet,
                                                    const uint64_t eventTime) {
     VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
@@ -326,7 +336,6 @@
 }
 
 void GaugeMetricProducer::updateCurrentSlicedBucketForAnomaly() {
-    mCurrentSlicedBucketForAnomaly->clear();
     status_t err = NO_ERROR;
     for (const auto& slice : *mCurrentSlicedBucket) {
         if (slice.second.empty() || slice.second.front().mFields->empty()) {
@@ -349,42 +358,57 @@
 // if data is pushed, onMatchedLogEvent will only be called through onConditionChanged() inside
 // the GaugeMetricProducer while holding the lock.
 void GaugeMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
-    if (eventTimeNs < mCurrentBucketStartTimeNs + mBucketSizeNs) {
+    uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+
+    if (eventTimeNs < currentBucketEndTimeNs) {
         VLOG("eventTime is %lld, less than next bucket start time %lld", (long long)eventTimeNs,
              (long long)(mCurrentBucketStartTimeNs + mBucketSizeNs));
         return;
     }
 
+    flushCurrentBucketLocked(eventTimeNs);
+
+    // Adjusts the bucket start and end times.
+    int64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
+    mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
+    mCurrentBucketNum += numBucketsForward;
+    VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
+         (long long)mCurrentBucketStartTimeNs);
+}
+
+void GaugeMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
+    uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
+
     GaugeBucket info;
     info.mBucketStartNs = mCurrentBucketStartTimeNs;
-    info.mBucketEndNs = mCurrentBucketStartTimeNs + mBucketSizeNs;
+    if (eventTimeNs < fullBucketEndTimeNs) {
+        info.mBucketEndNs = eventTimeNs;
+    } else {
+        info.mBucketEndNs = fullBucketEndTimeNs;
+    }
     info.mBucketNum = mCurrentBucketNum;
 
     for (const auto& slice : *mCurrentSlicedBucket) {
         info.mGaugeAtoms = slice.second;
         auto& bucketList = mPastBuckets[slice.first];
         bucketList.push_back(info);
-        VLOG("gauge metric %lld, dump key value: %s",
-            (long long)mMetricId, slice.first.c_str());
+        VLOG("gauge metric %lld, dump key value: %s", (long long)mMetricId, slice.first.c_str());
     }
 
-    // Reset counters
+    // If we have anomaly trackers, we need to update the partial bucket values.
     if (mAnomalyTrackers.size() > 0) {
         updateCurrentSlicedBucketForAnomaly();
-        for (auto& tracker : mAnomalyTrackers) {
-            tracker->addPastBucket(mCurrentSlicedBucketForAnomaly, mCurrentBucketNum);
+
+        if (eventTimeNs > fullBucketEndTimeNs) {
+            // This is known to be a full bucket, so send this data to the anomaly tracker.
+            for (auto& tracker : mAnomalyTrackers) {
+                tracker->addPastBucket(mCurrentSlicedBucketForAnomaly, mCurrentBucketNum);
+            }
+            mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>();
         }
     }
 
-    mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>();
     mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>();
-
-    // Adjusts the bucket start time
-    int64_t numBucketsForward = (eventTimeNs - mCurrentBucketStartTimeNs) / mBucketSizeNs;
-    mCurrentBucketStartTimeNs = mCurrentBucketStartTimeNs + numBucketsForward * mBucketSizeNs;
-    mCurrentBucketNum += numBucketsForward;
-    VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
-         (long long)mCurrentBucketStartTimeNs);
 }
 
 size_t GaugeMetricProducer::byteSizeLocked() const {
diff --git a/cmds/statsd/src/metrics/GaugeMetricProducer.h b/cmds/statsd/src/metrics/GaugeMetricProducer.h
index 86d0ccd..d5d34be 100644
--- a/cmds/statsd/src/metrics/GaugeMetricProducer.h
+++ b/cmds/statsd/src/metrics/GaugeMetricProducer.h
@@ -62,6 +62,22 @@
     // Handles when the pulled data arrives.
     void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data) override;
 
+    // GaugeMetric needs to immediately trigger another pull when we create the partial bucket.
+    void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+                          const int64_t version) override {
+        std::lock_guard<std::mutex> lock(mMutex);
+
+        if (eventTimeNs > getCurrentBucketEndTimeNs()) {
+            // Flush full buckets on the normal path up to the latest bucket boundary.
+            flushIfNeededLocked(eventTimeNs);
+        }
+        flushCurrentBucketLocked(eventTimeNs);
+        mCurrentBucketStartTimeNs = eventTimeNs;
+        if (mPullTagId != -1) {
+            pullLocked();
+        }
+    };
+
 protected:
     void onMatchedLogEventInternalLocked(
             const size_t matcherIndex, const MetricDimensionKey& eventKey,
@@ -91,7 +107,11 @@
     void dumpStatesLocked(FILE* out, bool verbose) const override{};
 
     // Util function to flush the old packet.
-    void flushIfNeededLocked(const uint64_t& eventTime);
+    void flushIfNeededLocked(const uint64_t& eventTime) override;
+
+    void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
+
+    void pullLocked();
 
     std::shared_ptr<StatsPullerManager> mStatsPullerManager;
     // tagId for pulled data. -1 if this is not pulled
@@ -101,13 +121,15 @@
     // TODO: Add a lock to mPastBuckets.
     std::unordered_map<MetricDimensionKey, std::vector<GaugeBucket>> mPastBuckets;
 
-    // The current bucket.
+    // The current partial bucket.
     std::shared_ptr<DimToGaugeAtomsMap> mCurrentSlicedBucket;
 
-    // The current bucket for anomaly detection.
+    // The current full bucket for anomaly detection. This is updated to the latest value seen for
+    // this slice (ie, for partial buckets, we use the last partial bucket in this full bucket).
     std::shared_ptr<DimToValMap> mCurrentSlicedBucketForAnomaly;
 
-    // Translate Atom based bucket to single numeric value bucket for anomaly
+    // Translate Atom based bucket to single numeric value bucket for anomaly and updates the map
+    // for each slice with the latest value.
     void updateCurrentSlicedBucketForAnomaly();
 
     // Whitelist of fields to report. Empty means all are reported.
@@ -125,6 +147,8 @@
 
     FRIEND_TEST(GaugeMetricProducerTest, TestWithCondition);
     FRIEND_TEST(GaugeMetricProducerTest, TestNoCondition);
+    FRIEND_TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade);
+    FRIEND_TEST(GaugeMetricProducerTest, TestPulledWithUpgrade);
     FRIEND_TEST(GaugeMetricProducerTest, TestAnomalyDetection);
 };
 
diff --git a/cmds/statsd/src/metrics/MetricProducer.h b/cmds/statsd/src/metrics/MetricProducer.h
index 3b1498f..542dd8a 100644
--- a/cmds/statsd/src/metrics/MetricProducer.h
+++ b/cmds/statsd/src/metrics/MetricProducer.h
@@ -53,15 +53,32 @@
 
     virtual ~MetricProducer(){};
 
-    void notifyAppUpgrade(const string& apk, const int uid, const int64_t version) override{
+    /**
+     * Forces this metric to split into a partial bucket right now. If we're past a full bucket, we
+     * first call the standard flushing code to flush up to the latest full bucket. Then we call
+     * the flush again when the end timestamp is forced to be now, and then after flushing, update
+     * the start timestamp to be now.
+     */
+    void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+                          const int64_t version) override {
+        std::lock_guard<std::mutex> lock(mMutex);
+
+        if (eventTimeNs > getCurrentBucketEndTimeNs()) {
+            // Flush full buckets on the normal path up to the latest bucket boundary.
+            flushIfNeededLocked(eventTimeNs);
+        }
+        // Now flush a partial bucket.
+        flushCurrentBucketLocked(eventTimeNs);
+        mCurrentBucketStartTimeNs = eventTimeNs;
+        // Don't update the current bucket number so that the anomaly tracker knows this bucket
+        // is a partial bucket and can merge it with the previous bucket.
+    };
+
+    void notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk, const int uid) override{
             // TODO: Implement me.
     };
 
-    void notifyAppRemoved(const string& apk, const int uid) override{
-            // TODO: Implement me.
-    };
-
-    void onUidMapReceived() override{
+    void onUidMapReceived(const uint64_t& eventTimeNs) override{
             // TODO: Implement me.
     };
 
@@ -87,11 +104,12 @@
     };
 
     // Output the metrics data to [protoOutput]. All metrics reports end with the same timestamp.
+    // This method clears all the past buckets.
     void onDumpReport(const uint64_t dumpTimeNs, android::util::ProtoOutputStream* protoOutput) {
         std::lock_guard<std::mutex> lock(mMutex);
         return onDumpReportLocked(dumpTimeNs, protoOutput);
     }
-
+    // This method does not clear the past buckets.
     void onDumpReport(const uint64_t dumpTimeNs, StatsLogReport* report) {
         std::lock_guard<std::mutex> lock(mMutex);
         return onDumpReportLocked(dumpTimeNs, report);
@@ -136,15 +154,43 @@
     virtual size_t byteSizeLocked() const = 0;
     virtual void dumpStatesLocked(FILE* out, bool verbose) const = 0;
 
+    /**
+     * Flushes the current bucket if the eventTime is after the current bucket's end time.
+     */
+    virtual void flushIfNeededLocked(const uint64_t& eventTime){};
+
+    /**
+     * For metrics that aggregate (ie, every metric producer except for EventMetricProducer),
+     * we need to be able to flush the current buckets on demand (ie, end the current bucket and
+     * start new bucket). If this function is called when eventTimeNs is greater than the current
+     * bucket's end timestamp, than we flush up to the end of the latest full bucket; otherwise,
+     * we assume that we want to flush a partial bucket. The bucket start timestamp and bucket
+     * number are not changed by this function. This method should only be called by
+     * flushIfNeededLocked or the app upgrade handler; the caller MUST update the bucket timestamp
+     * and bucket number as needed.
+     */
+    virtual void flushCurrentBucketLocked(const uint64_t& eventTimeNs){};
+
+    // Convenience to compute the current bucket's end time, which is always aligned with the
+    // start time of the metric.
+    uint64_t getCurrentBucketEndTimeNs() {
+        return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
+    }
+
     const int64_t mMetricId;
 
     const ConfigKey mConfigKey;
 
-    // The start time for the current in memory metrics data.
+    // The time when this metric producer was first created. The end time for the current bucket
+    // can be computed from this based on mCurrentBucketNum.
     uint64_t mStartTimeNs;
 
+    // Start time may not be aligned with the start of statsd if there is an app upgrade in the
+    // middle of a bucket.
     uint64_t mCurrentBucketStartTimeNs;
 
+    // Used by anomaly detector to track which bucket we are in. This is not sent with the produced
+    // report.
     uint64_t mCurrentBucketNum;
 
     int64_t mBucketSizeNs;
diff --git a/cmds/statsd/src/metrics/MetricsManager.cpp b/cmds/statsd/src/metrics/MetricsManager.cpp
index 5f0619a..6573a89 100644
--- a/cmds/statsd/src/metrics/MetricsManager.cpp
+++ b/cmds/statsd/src/metrics/MetricsManager.cpp
@@ -121,7 +121,8 @@
     return mConfigValid;
 }
 
-void MetricsManager::notifyAppUpgrade(const string& apk, const int uid, const int64_t version) {
+void MetricsManager::notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+                                      const int64_t version) {
     // check if we care this package
     if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) == mAllowedPkg.end()) {
         return;
@@ -131,7 +132,8 @@
     initLogSourceWhiteList();
 }
 
-void MetricsManager::notifyAppRemoved(const string& apk, const int uid) {
+void MetricsManager::notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk,
+                                      const int uid) {
     // check if we care this package
     if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) == mAllowedPkg.end()) {
         return;
@@ -141,7 +143,7 @@
     initLogSourceWhiteList();
 }
 
-void MetricsManager::onUidMapReceived() {
+void MetricsManager::onUidMapReceived(const uint64_t& eventTimeNs) {
     if (mAllowedPkg.size() == 0) {
         return;
     }
diff --git a/cmds/statsd/src/metrics/MetricsManager.h b/cmds/statsd/src/metrics/MetricsManager.h
index d4b9102..9cae70a 100644
--- a/cmds/statsd/src/metrics/MetricsManager.h
+++ b/cmds/statsd/src/metrics/MetricsManager.h
@@ -51,11 +51,12 @@
 
     void setAnomalyMonitor(const sp<AnomalyMonitor>& anomalyMonitor);
 
-    void notifyAppUpgrade(const string& apk, const int uid, const int64_t version) override;
+    void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+                          const int64_t version) override;
 
-    void notifyAppRemoved(const string& apk, const int uid) override;
+    void notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk, const int uid) override;
 
-    void onUidMapReceived() override;
+    void onUidMapReceived(const uint64_t& eventTimeNs) override;
 
     bool shouldAddUidMapListener() const {
         return !mAllowedPkg.empty();
diff --git a/cmds/statsd/src/metrics/ValueMetricProducer.cpp b/cmds/statsd/src/metrics/ValueMetricProducer.cpp
index c9cc7bb..31d9ff8 100644
--- a/cmds/statsd/src/metrics/ValueMetricProducer.cpp
+++ b/cmds/statsd/src/metrics/ValueMetricProducer.cpp
@@ -190,7 +190,6 @@
 
     VLOG("metric %lld dump report now...", (long long)mMetricId);
     mPastBuckets.clear();
-    mStartTimeNs = mCurrentBucketStartTimeNs;
     // TODO: Clear mDimensionKeyMap once the report is dumped.
 }
 
@@ -320,8 +319,13 @@
         interval.sum += value;
     }
 
+    long wholeBucketVal = interval.sum;
+    auto prev = mCurrentFullBucket.find(eventKey);
+    if (prev != mCurrentFullBucket.end()) {
+        wholeBucketVal += prev->second;
+    }
     for (auto& tracker : mAnomalyTrackers) {
-        tracker->detectAndDeclareAnomaly(eventTimeNs, mCurrentBucketNum, eventKey, interval.sum);
+        tracker->detectAndDeclareAnomaly(eventTimeNs, mCurrentBucketNum, eventKey, wholeBucketVal);
     }
 }
 
@@ -333,16 +337,39 @@
 }
 
 void ValueMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
-    if (mCurrentBucketStartTimeNs + mBucketSizeNs > eventTimeNs) {
+    uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
+
+    if (currentBucketEndTimeNs > eventTimeNs) {
         VLOG("eventTime is %lld, less than next bucket start time %lld", (long long)eventTimeNs,
-             (long long)(mCurrentBucketStartTimeNs + mBucketSizeNs));
+             (long long)(currentBucketEndTimeNs));
         return;
     }
+
+    flushCurrentBucketLocked(eventTimeNs);
+
+    int64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
+    mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
+    mCurrentBucketNum += numBucketsForward;
+
+    if (numBucketsForward > 1) {
+        VLOG("Skipping forward %lld buckets", (long long)numBucketsForward);
+    }
+    VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
+         (long long)mCurrentBucketStartTimeNs);
+}
+
+void ValueMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
     VLOG("finalizing bucket for %ld, dumping %d slices", (long)mCurrentBucketStartTimeNs,
          (int)mCurrentSlicedBucket.size());
+    uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
+
     ValueBucket info;
     info.mBucketStartNs = mCurrentBucketStartTimeNs;
-    info.mBucketEndNs = mCurrentBucketStartTimeNs + mBucketSizeNs;
+    if (eventTimeNs < fullBucketEndTimeNs) {
+        info.mBucketEndNs = eventTimeNs;
+    } else {
+        info.mBucketEndNs = fullBucketEndTimeNs;
+    }
     info.mBucketNum = mCurrentBucketNum;
 
     int tainted = 0;
@@ -352,27 +379,42 @@
         // it will auto create new vector of ValuebucketInfo if the key is not found.
         auto& bucketList = mPastBuckets[slice.first];
         bucketList.push_back(info);
-
-        for (auto& tracker : mAnomalyTrackers) {
-            if (tracker != nullptr) {
-                tracker->addPastBucket(slice.first, info.mValue, info.mBucketNum);
-            }
-        }
     }
     VLOG("%d tainted pairs in the bucket", tainted);
 
+    if (eventTimeNs > fullBucketEndTimeNs) {  // If full bucket, send to anomaly tracker.
+        // Accumulate partial buckets with current value and then send to anomaly tracker.
+        if (mCurrentFullBucket.size() > 0) {
+            for (const auto& slice : mCurrentSlicedBucket) {
+                mCurrentFullBucket[slice.first] += slice.second.sum;
+            }
+            for (const auto& slice : mCurrentFullBucket) {
+                for (auto& tracker : mAnomalyTrackers) {
+                    if (tracker != nullptr) {
+                        tracker->addPastBucket(slice.first, slice.second, mCurrentBucketNum);
+                    }
+                }
+            }
+            mCurrentFullBucket.clear();
+        } else {
+            // Skip aggregating the partial buckets since there's no previous partial bucket.
+            for (const auto& slice : mCurrentSlicedBucket) {
+                for (auto& tracker : mAnomalyTrackers) {
+                    if (tracker != nullptr) {
+                        tracker->addPastBucket(slice.first, slice.second.sum, mCurrentBucketNum);
+                    }
+                }
+            }
+        }
+    } else {
+        // Accumulate partial bucket.
+        for (const auto& slice : mCurrentSlicedBucket) {
+            mCurrentFullBucket[slice.first] += slice.second.sum;
+        }
+    }
+
     // Reset counters
     mCurrentSlicedBucket.clear();
-
-    int64_t numBucketsForward = (eventTimeNs - mCurrentBucketStartTimeNs) / mBucketSizeNs;
-    mCurrentBucketStartTimeNs = mCurrentBucketStartTimeNs + numBucketsForward * mBucketSizeNs;
-    mCurrentBucketNum += numBucketsForward;
-
-    if (numBucketsForward > 1) {
-        VLOG("Skipping forward %lld buckets", (long long)numBucketsForward);
-    }
-    VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
-         (long long)mCurrentBucketStartTimeNs);
 }
 
 size_t ValueMetricProducer::byteSizeLocked() const {
diff --git a/cmds/statsd/src/metrics/ValueMetricProducer.h b/cmds/statsd/src/metrics/ValueMetricProducer.h
index 121ec7d..bf5b7df 100644
--- a/cmds/statsd/src/metrics/ValueMetricProducer.h
+++ b/cmds/statsd/src/metrics/ValueMetricProducer.h
@@ -47,6 +47,39 @@
 
     void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data) override;
 
+    // ValueMetric needs special logic if it's a pulled atom.
+    void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
+                          const int64_t version) override {
+        std::lock_guard<std::mutex> lock(mMutex);
+
+        if (mPullTagId != -1) {
+            vector<shared_ptr<LogEvent>> allData;
+            mStatsPullerManager->Pull(mPullTagId, &allData);
+            if (allData.size() == 0) {
+                // This shouldn't happen since this valuemetric is not useful now.
+            }
+
+            // Pretend the pulled data occurs right before the app upgrade event.
+            mCondition = false;
+            for (const auto& data : allData) {
+                data->setTimestampNs(eventTimeNs - 1);
+                onMatchedLogEventLocked(0, *data);
+            }
+
+            flushCurrentBucketLocked(eventTimeNs);
+            mCurrentBucketStartTimeNs = eventTimeNs;
+
+            mCondition = true;
+            for (const auto& data : allData) {
+                data->setTimestampNs(eventTimeNs);
+                onMatchedLogEventLocked(0, *data);
+            }
+        } else {  // For pushed value metric, we simply flush and reset the current bucket start.
+            flushCurrentBucketLocked(eventTimeNs);
+            mCurrentBucketStartTimeNs = eventTimeNs;
+        }
+    };
+
 protected:
     void onMatchedLogEventInternalLocked(
             const size_t matcherIndex, const MetricDimensionKey& eventKey,
@@ -70,7 +103,9 @@
     void dumpStatesLocked(FILE* out, bool verbose) const override{};
 
     // Util function to flush the old packet.
-    void flushIfNeededLocked(const uint64_t& eventTime);
+    void flushIfNeededLocked(const uint64_t& eventTime) override;
+
+    void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
 
     const FieldMatcher mValueField;
 
@@ -101,6 +136,8 @@
 
     std::unordered_map<MetricDimensionKey, Interval> mCurrentSlicedBucket;
 
+    std::unordered_map<MetricDimensionKey, long> mCurrentFullBucket;
+
     // Save the past buckets and we can clear when the StatsLogReport is dumped.
     // TODO: Add a lock to mPastBuckets.
     std::unordered_map<MetricDimensionKey, std::vector<ValueBucket>> mPastBuckets;
@@ -114,6 +151,8 @@
 
     FRIEND_TEST(ValueMetricProducerTest, TestNonDimensionalEvents);
     FRIEND_TEST(ValueMetricProducerTest, TestEventsWithNonSlicedCondition);
+    FRIEND_TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade);
+    FRIEND_TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade);
     FRIEND_TEST(ValueMetricProducerTest, TestPushedEventsWithoutCondition);
     FRIEND_TEST(ValueMetricProducerTest, TestAnomalyDetection);
 };
diff --git a/cmds/statsd/src/metrics/duration_helper/DurationTracker.h b/cmds/statsd/src/metrics/duration_helper/DurationTracker.h
index 45735a8..356a81c 100644
--- a/cmds/statsd/src/metrics/duration_helper/DurationTracker.h
+++ b/cmds/statsd/src/metrics/duration_helper/DurationTracker.h
@@ -63,7 +63,8 @@
     DurationTracker(const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
                     sp<ConditionWizard> wizard, int conditionIndex,
                     const FieldMatcher& dimensionInCondition, bool nesting,
-                    uint64_t currentBucketStartNs, uint64_t bucketSizeNs, bool conditionSliced,
+                    uint64_t currentBucketStartNs, uint64_t currentBucketNum, uint64_t startTimeNs,
+                    uint64_t bucketSizeNs, bool conditionSliced,
                     const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
         : mConfigKey(key),
           mTrackerId(id),
@@ -75,7 +76,9 @@
           mNested(nesting),
           mCurrentBucketStartTimeNs(currentBucketStartNs),
           mDuration(0),
-          mCurrentBucketNum(0),
+          mDurationFullBucket(0),
+          mCurrentBucketNum(currentBucketNum),
+          mStartTimeNs(startTimeNs),
           mConditionSliced(conditionSliced),
           mAnomalyTrackers(anomalyTrackers){};
 
@@ -98,6 +101,12 @@
             uint64_t timestampNs,
             std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) = 0;
 
+    // Should only be called during an app upgrade or from this tracker's flushIfNeeded. If from
+    // an app upgrade, we assume that we're trying to form a partial bucket.
+    virtual bool flushCurrentBucket(
+            const uint64_t& eventTimeNs,
+            std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) = 0;
+
     // Predict the anomaly timestamp given the current status.
     virtual int64_t predictAnomalyTimestampNs(const DurationAnomalyTracker& anomalyTracker,
                                               const uint64_t currentTimestamp) const = 0;
@@ -153,6 +162,13 @@
             }
         }
     }
+
+    // Convenience to compute the current bucket's end time, which is always aligned with the
+    // start time of the metric.
+    uint64_t getCurrentBucketEndTimeNs() {
+        return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
+    }
+
     // A reference to the DurationMetricProducer's config key.
     const ConfigKey& mConfigKey;
 
@@ -172,10 +188,14 @@
 
     uint64_t mCurrentBucketStartTimeNs;
 
-    int64_t mDuration;  // current recorded duration result
+    int64_t mDuration;  // current recorded duration result (for partial bucket)
+
+    int64_t mDurationFullBucket;  // Sum of past partial buckets in current full bucket.
 
     uint64_t mCurrentBucketNum;
 
+    const uint64_t mStartTimeNs;
+
     const bool mConditionSliced;
 
     std::vector<sp<DurationAnomalyTracker>> mAnomalyTrackers;
diff --git a/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp b/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp
index db7dea4..c3bafc6 100644
--- a/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp
+++ b/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp
@@ -28,11 +28,13 @@
                                        const MetricDimensionKey& eventKey,
                                        sp<ConditionWizard> wizard, int conditionIndex,
                                        const FieldMatcher& dimensionInCondition, bool nesting,
-                                       uint64_t currentBucketStartNs, uint64_t bucketSizeNs,
+                                       uint64_t currentBucketStartNs, uint64_t currentBucketNum,
+                                       uint64_t startTimeNs, uint64_t bucketSizeNs,
                                        bool conditionSliced,
                                        const vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
     : DurationTracker(key, id, eventKey, wizard, conditionIndex, dimensionInCondition, nesting,
-                      currentBucketStartNs, bucketSizeNs, conditionSliced, anomalyTrackers) {
+                      currentBucketStartNs, currentBucketNum, startTimeNs, bucketSizeNs,
+                      conditionSliced, anomalyTrackers) {
 }
 
 unique_ptr<DurationTracker> MaxDurationTracker::clone(const uint64_t eventTime) {
@@ -102,7 +104,6 @@
 
 void MaxDurationTracker::noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
                                   bool forceStop) {
-    declareAnomalyIfAlarmExpired(eventTime);
     VLOG("MaxDuration: key %s stop", key.c_str());
     if (mInfos.find(key) == mInfos.end()) {
         // we didn't see a start event before. do nothing.
@@ -122,7 +123,7 @@
                 VLOG("Max, key %s, Stop %lld %lld %lld", key.c_str(),
                      (long long)duration.lastStartTime, (long long)eventTime,
                      (long long)durationTime);
-                duration.lastDuration = duration.lastDuration + durationTime;
+                duration.lastDuration += durationTime;
                 VLOG("  record duration: %lld ", (long long)duration.lastDuration);
             }
             break;
@@ -138,7 +139,6 @@
 
     if (duration.lastDuration > mDuration) {
         mDuration = duration.lastDuration;
-        detectAndDeclareAnomaly(eventTime, mCurrentBucketNum, mDuration);
         VLOG("Max: new max duration: %lld", (long long)mDuration);
     }
     // Once an atom duration ends, we erase it. Next time, if we see another atom event with the
@@ -158,88 +158,67 @@
     }
 }
 
-bool MaxDurationTracker::flushIfNeeded(
-        uint64_t eventTime, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
-    if (mCurrentBucketStartTimeNs + mBucketSizeNs > eventTime) {
-        return false;
-    }
-
+bool MaxDurationTracker::flushCurrentBucket(
+        const uint64_t& eventTimeNs,
+        std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
     VLOG("MaxDurationTracker flushing.....");
 
     // adjust the bucket start time
-    int numBucketsForward = (eventTime - mCurrentBucketStartTimeNs) / mBucketSizeNs;
+    int numBucketsForward = 0;
+    uint64_t fullBucketEnd = getCurrentBucketEndTimeNs();
+    uint64_t currentBucketEndTimeNs;
+    if (eventTimeNs >= fullBucketEnd) {
+        numBucketsForward = 1 + (eventTimeNs - fullBucketEnd) / mBucketSizeNs;
+        currentBucketEndTimeNs = fullBucketEnd;
+    } else {
+        // This must be a partial bucket.
+        currentBucketEndTimeNs = eventTimeNs;
+    }
 
-    uint64_t endTime = mCurrentBucketStartTimeNs + mBucketSizeNs;
-
-    DurationBucket info;
-    info.mBucketStartNs = mCurrentBucketStartTimeNs;
-    info.mBucketEndNs = endTime;
-    info.mBucketNum = mCurrentBucketNum;
-
-    uint64_t oldBucketStartTimeNs = mCurrentBucketStartTimeNs;
-    mCurrentBucketStartTimeNs += (numBucketsForward)*mBucketSizeNs;
-
-    bool hasOnGoingStartedEvent = false;  // a kStarted event last across bucket boundaries.
     bool hasPendingEvent =
             false;  // has either a kStarted or kPaused event across bucket boundaries
-                    // meaning we need to carry them over to the new bucket.
+    // meaning we need to carry them over to the new bucket.
     for (auto it = mInfos.begin(); it != mInfos.end(); ++it) {
         int64_t finalDuration = it->second.lastDuration;
-        if (it->second.state == kStarted) {
-            // the event is still on-going, duration needs to be updated.
-            // |..lastDurationTime_recorded...last_start -----|bucket_end. We need to record the
-            // duration between lastStartTime and bucketEnd.
-            int64_t durationTime = endTime - it->second.lastStartTime;
-
-            finalDuration += durationTime;
-            VLOG("  unrecorded %lld -> %lld", (long long)(durationTime), (long long)finalDuration);
-            // if the event is still on-going, we need to fill the buckets between prev_bucket and
-            // now_bucket. |prev_bucket|...|..|...|now_bucket|
-            hasOnGoingStartedEvent = true;
-        }
-
-        if (finalDuration > mDuration) {
-            mDuration = finalDuration;
-        }
-
         if (it->second.state == DurationState::kStopped) {
             // No need to keep buckets for events that were stopped before.
             mInfos.erase(it);
         } else {
             hasPendingEvent = true;
-            // for kPaused, and kStarted event, we will keep track of them, and reset the start time
-            // and duration.
-            it->second.lastStartTime = mCurrentBucketStartTimeNs;
-            it->second.lastDuration = 0;
         }
     }
 
+    // mDuration is updated in noteStop to the maximum duration that ended in the current bucket.
     if (mDuration != 0) {
+        DurationBucket info;
+        info.mBucketStartNs = mCurrentBucketStartTimeNs;
+        info.mBucketEndNs = currentBucketEndTimeNs;
+        info.mBucketNum = mCurrentBucketNum;
         info.mDuration = mDuration;
         (*output)[mEventKey].push_back(info);
-        addPastBucketToAnomalyTrackers(info.mDuration, info.mBucketNum);
         VLOG("  final duration for last bucket: %lld", (long long)mDuration);
     }
 
-    mDuration = 0;
-    if (hasOnGoingStartedEvent) {
-        for (int i = 1; i < numBucketsForward; i++) {
-            DurationBucket info;
-            info.mBucketStartNs = oldBucketStartTimeNs + mBucketSizeNs * i;
-            info.mBucketEndNs = endTime + mBucketSizeNs * i;
-            info.mBucketNum = mCurrentBucketNum + i;
-            info.mDuration = mBucketSizeNs;
-            (*output)[mEventKey].push_back(info);
-            addPastBucketToAnomalyTrackers(info.mDuration, info.mBucketNum);
-            VLOG("  filling gap bucket with duration %lld", (long long)mBucketSizeNs);
-        }
+    if (numBucketsForward > 0) {
+        mCurrentBucketStartTimeNs = fullBucketEnd + (numBucketsForward - 1) * mBucketSizeNs;
+        mCurrentBucketNum += numBucketsForward;
+    } else {  // We must be forming a partial bucket.
+        mCurrentBucketStartTimeNs = eventTimeNs;
     }
 
-    mCurrentBucketNum += numBucketsForward;
+    mDuration = 0;
     // If this tracker has no pending events, tell owner to remove.
     return !hasPendingEvent;
 }
 
+bool MaxDurationTracker::flushIfNeeded(
+        uint64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
+    if (eventTimeNs < getCurrentBucketEndTimeNs()) {
+        return false;
+    }
+    return flushCurrentBucket(eventTimeNs, output);
+}
+
 void MaxDurationTracker::onSlicedConditionMayChange(const uint64_t timestamp) {
     // Now for each of the on-going event, check if the condition has changed for them.
     for (auto& pair : mInfos) {
@@ -267,7 +246,6 @@
 
 void MaxDurationTracker::noteConditionChanged(const HashableDimensionKey& key, bool conditionMet,
                                               const uint64_t timestamp) {
-    declareAnomalyIfAlarmExpired(timestamp);
     auto it = mInfos.find(key);
     if (it == mInfos.end()) {
         return;
@@ -297,7 +275,6 @@
     }
     if (it->second.lastDuration > mDuration) {
         mDuration = it->second.lastDuration;
-        detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration);
     }
 }
 
diff --git a/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.h b/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.h
index 4d32a06..fba4119 100644
--- a/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.h
+++ b/cmds/statsd/src/metrics/duration_helper/MaxDurationTracker.h
@@ -28,10 +28,11 @@
 // they stop or bucket expires.
 class MaxDurationTracker : public DurationTracker {
 public:
-    MaxDurationTracker(const ConfigKey& key, const int64_t& id,
-                       const MetricDimensionKey& eventKey, sp<ConditionWizard> wizard,
-                       int conditionIndex, const FieldMatcher& dimensionInCondition, bool nesting,
-                       uint64_t currentBucketStartNs, uint64_t bucketSizeNs, bool conditionSliced,
+    MaxDurationTracker(const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
+                       sp<ConditionWizard> wizard, int conditionIndex,
+                       const FieldMatcher& dimensionInCondition, bool nesting,
+                       uint64_t currentBucketStartNs, uint64_t currentBucketNum,
+                       uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced,
                        const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers);
 
     MaxDurationTracker(const MaxDurationTracker& tracker) = default;
@@ -47,6 +48,9 @@
     bool flushIfNeeded(
             uint64_t timestampNs,
             std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
+    bool flushCurrentBucket(
+            const uint64_t& eventTimeNs,
+            std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>*) override;
 
     void onSlicedConditionMayChange(const uint64_t timestamp) override;
     void onConditionChanged(bool condition, const uint64_t timestamp) override;
@@ -68,7 +72,6 @@
     FRIEND_TEST(MaxDurationTrackerTest, TestCrossBucketBoundary);
     FRIEND_TEST(MaxDurationTrackerTest, TestMaxDurationWithCondition);
     FRIEND_TEST(MaxDurationTrackerTest, TestStopAll);
-    FRIEND_TEST(MaxDurationTrackerTest, TestAnomalyDetection);
 };
 
 }  // namespace statsd
diff --git a/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp b/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
index 0feae36..85f7b7c 100644
--- a/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
+++ b/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
@@ -26,12 +26,13 @@
 
 OringDurationTracker::OringDurationTracker(
         const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
-        sp<ConditionWizard> wizard, int conditionIndex,
-        const FieldMatcher& dimensionInCondition, bool nesting, uint64_t currentBucketStartNs,
-        uint64_t bucketSizeNs, bool conditionSliced,
+        sp<ConditionWizard> wizard, int conditionIndex, const FieldMatcher& dimensionInCondition,
+        bool nesting, uint64_t currentBucketStartNs, uint64_t currentBucketNum,
+        uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced,
         const vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
     : DurationTracker(key, id, eventKey, wizard, conditionIndex, dimensionInCondition, nesting,
-                      currentBucketStartNs, bucketSizeNs, conditionSliced, anomalyTrackers),
+                      currentBucketStartNs, currentBucketNum, startTimeNs, bucketSizeNs,
+                      conditionSliced, anomalyTrackers),
       mStarted(),
       mPaused() {
     mLastStartTime = 0;
@@ -100,7 +101,7 @@
         }
         if (mStarted.empty()) {
             mDuration += (timestamp - mLastStartTime);
-            detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration);
+            detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
             VLOG("record duration %lld, total %lld ", (long long)timestamp - mLastStartTime,
                  (long long)mDuration);
         }
@@ -125,7 +126,7 @@
         mDuration += (timestamp - mLastStartTime);
         VLOG("Oring Stop all: record duration %lld %lld ", (long long)timestamp - mLastStartTime,
              (long long)mDuration);
-        detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration);
+        detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
     }
 
     stopAnomalyAlarm();
@@ -134,51 +135,83 @@
     mConditionKeyMap.clear();
 }
 
-bool OringDurationTracker::flushIfNeeded(
-        uint64_t eventTime, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
-    if (eventTime < mCurrentBucketStartTimeNs + mBucketSizeNs) {
-        return false;
-    }
+bool OringDurationTracker::flushCurrentBucket(
+        const uint64_t& eventTimeNs,
+        std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
     VLOG("OringDurationTracker Flushing.............");
-    // adjust the bucket start time
-    int numBucketsForward = (eventTime - mCurrentBucketStartTimeNs) / mBucketSizeNs;
-    DurationBucket current_info;
-    current_info.mBucketStartNs = mCurrentBucketStartTimeNs;
-    current_info.mBucketEndNs = current_info.mBucketStartNs + mBucketSizeNs;
-    current_info.mBucketNum = mCurrentBucketNum;
+
+    // Note that we have to mimic the bucket time changes we do in the
+    // MetricProducer#notifyAppUpgrade.
+
+    int numBucketsForward = 0;
+    uint64_t fullBucketEnd = getCurrentBucketEndTimeNs();
+    uint64_t currentBucketEndTimeNs;
+
+    if (eventTimeNs >= fullBucketEnd) {
+        numBucketsForward = 1 + (eventTimeNs - fullBucketEnd) / mBucketSizeNs;
+        currentBucketEndTimeNs = fullBucketEnd;
+    } else {
+        // This must be a partial bucket.
+        currentBucketEndTimeNs = eventTimeNs;
+    }
+
     // Process the current bucket.
     if (mStarted.size() > 0) {
-        mDuration += (current_info.mBucketEndNs - mLastStartTime);
+        mDuration += (currentBucketEndTimeNs - mLastStartTime);
     }
     if (mDuration > 0) {
+        DurationBucket current_info;
+        current_info.mBucketStartNs = mCurrentBucketStartTimeNs;
+        current_info.mBucketEndNs = currentBucketEndTimeNs;
+        current_info.mBucketNum = mCurrentBucketNum;
         current_info.mDuration = mDuration;
         (*output)[mEventKey].push_back(current_info);
-        addPastBucketToAnomalyTrackers(current_info.mDuration, current_info.mBucketNum);
+        mDurationFullBucket += mDuration;
+        if (eventTimeNs > fullBucketEnd) {
+            // End of full bucket, can send to anomaly tracker now.
+            addPastBucketToAnomalyTrackers(mDurationFullBucket, current_info.mBucketNum);
+            mDurationFullBucket = 0;
+        }
         VLOG("  duration: %lld", (long long)current_info.mDuration);
     }
 
     if (mStarted.size() > 0) {
         for (int i = 1; i < numBucketsForward; i++) {
             DurationBucket info;
-            info.mBucketStartNs = mCurrentBucketStartTimeNs + mBucketSizeNs * i;
+            info.mBucketStartNs = fullBucketEnd + mBucketSizeNs * (i - 1);
             info.mBucketEndNs = info.mBucketStartNs + mBucketSizeNs;
             info.mBucketNum = mCurrentBucketNum + i;
             info.mDuration = mBucketSizeNs;
             (*output)[mEventKey].push_back(info);
+            // Safe to send these buckets to anomaly tracker since they must be full buckets.
+            // If it's a partial bucket, numBucketsForward would be 0.
             addPastBucketToAnomalyTrackers(info.mDuration, info.mBucketNum);
             VLOG("  add filling bucket with duration %lld", (long long)info.mDuration);
         }
     }
-    mCurrentBucketStartTimeNs += numBucketsForward * mBucketSizeNs;
-    mCurrentBucketNum += numBucketsForward;
 
-    mLastStartTime = mCurrentBucketStartTimeNs;
     mDuration = 0;
 
+    if (numBucketsForward > 0) {
+        mCurrentBucketStartTimeNs = fullBucketEnd + (numBucketsForward - 1) * mBucketSizeNs;
+        mCurrentBucketNum += numBucketsForward;
+    } else {  // We must be forming a partial bucket.
+        mCurrentBucketStartTimeNs = eventTimeNs;
+    }
+    mLastStartTime = mCurrentBucketStartTimeNs;
+
     // if all stopped, then tell owner it's safe to remove this tracker.
     return mStarted.empty() && mPaused.empty();
 }
 
+bool OringDurationTracker::flushIfNeeded(
+        uint64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
+    if (eventTimeNs < getCurrentBucketEndTimeNs()) {
+        return false;
+    }
+    return flushCurrentBucket(eventTimeNs, output);
+}
+
 void OringDurationTracker::onSlicedConditionMayChange(const uint64_t timestamp) {
     declareAnomalyIfAlarmExpired(timestamp);
     vector<pair<HashableDimensionKey, int>> startedToPaused;
@@ -211,7 +244,7 @@
             mDuration += (timestamp - mLastStartTime);
             VLOG("Duration add %lld , to %lld ", (long long)(timestamp - mLastStartTime),
                  (long long)mDuration);
-            detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration);
+            detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
         }
     }
 
@@ -275,7 +308,7 @@
             mDuration += (timestamp - mLastStartTime);
             mPaused.insert(mStarted.begin(), mStarted.end());
             mStarted.clear();
-            detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration);
+            detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
         }
     }
     if (mStarted.empty()) {
@@ -298,7 +331,7 @@
     // As we move into the future, old buckets get overwritten (so their old data is erased).
 
     // Sum of past durations. Will change as we overwrite old buckets.
-    int64_t pastNs = mDuration;
+    int64_t pastNs = mDuration + mDurationFullBucket;
     pastNs += anomalyTracker.getSumOverPastBuckets(mEventKey);
 
     // How much of the threshold is still unaccounted after considering pastNs.
diff --git a/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.h b/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.h
index 75b5a81..73e50e0 100644
--- a/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.h
+++ b/cmds/statsd/src/metrics/duration_helper/OringDurationTracker.h
@@ -30,7 +30,8 @@
     OringDurationTracker(const ConfigKey& key, const int64_t& id,
                          const MetricDimensionKey& eventKey, sp<ConditionWizard> wizard,
                          int conditionIndex, const FieldMatcher& dimensionInCondition, bool nesting,
-                         uint64_t currentBucketStartNs, uint64_t bucketSizeNs, bool conditionSliced,
+                         uint64_t currentBucketStartNs, uint64_t currentBucketNum,
+                         uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced,
                          const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers);
 
     OringDurationTracker(const OringDurationTracker& tracker) = default;
@@ -46,6 +47,9 @@
     void onSlicedConditionMayChange(const uint64_t timestamp) override;
     void onConditionChanged(bool condition, const uint64_t timestamp) override;
 
+    bool flushCurrentBucket(
+            const uint64_t& eventTimeNs,
+            std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
     bool flushIfNeeded(
             uint64_t timestampNs,
             std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
diff --git a/cmds/statsd/src/packages/PackageInfoListener.h b/cmds/statsd/src/packages/PackageInfoListener.h
index df29eb0..03cb364 100644
--- a/cmds/statsd/src/packages/PackageInfoListener.h
+++ b/cmds/statsd/src/packages/PackageInfoListener.h
@@ -28,13 +28,15 @@
 public:
     // Uid map will notify this listener that the app with apk name and uid has been upgraded to
     // the specified version.
-    virtual void notifyAppUpgrade(const std::string& apk, const int uid, const int64_t version) = 0;
+    virtual void notifyAppUpgrade(const uint64_t& eventTimeNs, const std::string& apk,
+                                  const int uid, const int64_t version) = 0;
 
     // Notify interested listeners that the given apk and uid combination no longer exits.
-    virtual void notifyAppRemoved(const std::string& apk, const int uid) = 0;
+    virtual void notifyAppRemoved(const uint64_t& eventTimeNs, const std::string& apk,
+                                  const int uid) = 0;
 
     // Notify the listener that the UidMap snapshot is available.
-    virtual void onUidMapReceived() = 0;
+    virtual void onUidMapReceived(const uint64_t& eventTimeNs) = 0;
 };
 
 }  // namespace statsd
diff --git a/cmds/statsd/src/packages/UidMap.cpp b/cmds/statsd/src/packages/UidMap.cpp
index 91279661..691423e 100644
--- a/cmds/statsd/src/packages/UidMap.cpp
+++ b/cmds/statsd/src/packages/UidMap.cpp
@@ -119,7 +119,7 @@
     for (auto weakPtr : broadcastList) {
         auto strongPtr = weakPtr.promote();
         if (strongPtr != NULL) {
-            strongPtr->onUidMapReceived();
+            strongPtr->onUidMapReceived(timestamp);
         }
     }
 }
@@ -166,7 +166,7 @@
     for (auto weakPtr : broadcastList) {
         auto strongPtr = weakPtr.promote();
         if (strongPtr != NULL) {
-            strongPtr->notifyAppUpgrade(appName, uid, versionCode);
+            strongPtr->notifyAppUpgrade(timestamp, appName, uid, versionCode);
         }
     }
 }
@@ -239,7 +239,7 @@
     for (auto weakPtr : broadcastList) {
         auto strongPtr = weakPtr.promote();
         if (strongPtr != NULL) {
-            strongPtr->notifyAppRemoved(app, uid);
+            strongPtr->notifyAppRemoved(timestamp, app, uid);
         }
     }
 }
@@ -316,13 +316,13 @@
     mLastUpdatePerConfigKey[key] = timestamp;
     int64_t newMin = getMinimumTimestampNs();
 
-    if (newMin > prevMin) {
+    if (newMin > prevMin) {  // Delete anything possible now that the minimum has moved forward.
         int64_t cutoff_nanos = newMin;
         auto snapshots = mOutput.mutable_snapshots();
         auto it_snapshots = snapshots->cbegin();
         while (it_snapshots != snapshots->cend()) {
             if (it_snapshots->timestamp_nanos() < cutoff_nanos) {
-                // it_snapshots now points to the following element.
+                // it_snapshots points to the following element after erasing.
                 it_snapshots = snapshots->erase(it_snapshots);
             } else {
                 ++it_snapshots;
@@ -332,7 +332,7 @@
         auto it_deltas = deltas->cbegin();
         while (it_deltas != deltas->cend()) {
             if (it_deltas->timestamp_nanos() < cutoff_nanos) {
-                // it_deltas now points to the following element.
+                // it_snapshots points to the following element after erasing.
                 it_deltas = deltas->erase(it_deltas);
             } else {
                 ++it_deltas;
diff --git a/cmds/statsd/tests/e2e/WakelockDuration_e2e_test.cpp b/cmds/statsd/tests/e2e/WakelockDuration_e2e_test.cpp
index 1186a16..a99dbe8 100644
--- a/cmds/statsd/tests/e2e/WakelockDuration_e2e_test.cpp
+++ b/cmds/statsd/tests/e2e/WakelockDuration_e2e_test.cpp
@@ -40,9 +40,11 @@
 
     auto holdingWakelockPredicate = CreateHoldingWakelockPredicate();
     // The predicate is dimensioning by any attribution node and both by uid and tag.
-    *holdingWakelockPredicate.mutable_simple_predicate()->mutable_dimensions() =
-        CreateAttributionUidAndTagDimensions(
+    FieldMatcher dimensions = CreateAttributionUidAndTagDimensions(
             android::util::WAKELOCK_STATE_CHANGED, {Position::FIRST, Position::LAST});
+    // Also slice by the wakelock tag
+    dimensions.add_child()->set_field(3);  // The wakelock tag is set in field 3 of the wakelock.
+    *holdingWakelockPredicate.mutable_simple_predicate()->mutable_dimensions() = dimensions;
     *config.add_predicate() = holdingWakelockPredicate;
 
     auto durationMetric = config.add_duration_metric();
@@ -58,133 +60,198 @@
     return config;
 }
 
-}  // namespace
+std::vector<AttributionNode> attributions1 = {CreateAttribution(111, "App1"),
+                                              CreateAttribution(222, "GMSCoreModule1"),
+                                              CreateAttribution(222, "GMSCoreModule2")};
 
-TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensions) {
-    ConfigKey cfgKey;
-    for (auto aggregationType : { DurationMetric::SUM, DurationMetric::MAX_SPARSE }) {
-        auto config = CreateStatsdConfig(aggregationType);
-        uint64_t bucketStartTimeNs = 10000000000;
-        uint64_t bucketSizeNs =
+std::vector<AttributionNode> attributions2 = {CreateAttribution(111, "App2"),
+                                              CreateAttribution(222, "GMSCoreModule1"),
+                                              CreateAttribution(222, "GMSCoreModule2")};
+
+/*
+Events:
+Screen off is met from (200ns,1 min+500ns].
+Acquire event for wl1 from 2ns to 1min+2ns
+Acquire event for wl2 from 1min-10ns to 2min-15ns
+*/
+void FeedEvents(StatsdConfig config, sp<StatsLogProcessor> processor) {
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs =
             TimeUnitToBucketSizeInMillis(config.duration_metric(0).bucket()) * 1000000LL;
 
-        auto processor = CreateStatsLogProcessor(bucketStartTimeNs / NS_PER_SEC, config, cfgKey);
-        EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
-        EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
-
-        auto screenTurnedOnEvent =
-            CreateScreenStateChangedEvent(android::view::DisplayStateEnum::DISPLAY_STATE_ON,
-                                          bucketStartTimeNs + 1);
-        auto screenTurnedOffEvent =
-            CreateScreenStateChangedEvent(android::view::DisplayStateEnum::DISPLAY_STATE_OFF,
-                                          bucketStartTimeNs + 200);
-        auto screenTurnedOnEvent2 =
+    auto screenTurnedOnEvent = CreateScreenStateChangedEvent(
+            android::view::DisplayStateEnum::DISPLAY_STATE_ON, bucketStartTimeNs + 1);
+    auto screenTurnedOffEvent = CreateScreenStateChangedEvent(
+            android::view::DisplayStateEnum::DISPLAY_STATE_OFF, bucketStartTimeNs + 200);
+    auto screenTurnedOnEvent2 =
             CreateScreenStateChangedEvent(android::view::DisplayStateEnum::DISPLAY_STATE_ON,
                                           bucketStartTimeNs + bucketSizeNs + 500);
 
-        std::vector<AttributionNode> attributions1 =
-            {CreateAttribution(111, "App1"), CreateAttribution(222, "GMSCoreModule1"),
-             CreateAttribution(222, "GMSCoreModule2")};
+    auto acquireEvent1 = CreateAcquireWakelockEvent(attributions1, "wl1", bucketStartTimeNs + 2);
+    auto releaseEvent1 =
+            CreateReleaseWakelockEvent(attributions1, "wl1", bucketStartTimeNs + bucketSizeNs + 2);
+    auto acquireEvent2 =
+            CreateAcquireWakelockEvent(attributions2, "wl2", bucketStartTimeNs + bucketSizeNs - 10);
+    auto releaseEvent2 = CreateReleaseWakelockEvent(attributions2, "wl2",
+                                                    bucketStartTimeNs + 2 * bucketSizeNs - 15);
 
-        std::vector<AttributionNode> attributions2 =
-            {CreateAttribution(111, "App2"), CreateAttribution(222, "GMSCoreModule1"),
-             CreateAttribution(222, "GMSCoreModule2")};
+    std::vector<std::unique_ptr<LogEvent>> events;
 
-        auto acquireEvent1 = CreateAcquireWakelockEvent(
-            attributions1, "wl1", bucketStartTimeNs + 2);
-        auto acquireEvent2 = CreateAcquireWakelockEvent(
-            attributions2, "wl2", bucketStartTimeNs + bucketSizeNs - 10);
+    events.push_back(std::move(screenTurnedOnEvent));
+    events.push_back(std::move(screenTurnedOffEvent));
+    events.push_back(std::move(screenTurnedOnEvent2));
+    events.push_back(std::move(acquireEvent1));
+    events.push_back(std::move(acquireEvent2));
+    events.push_back(std::move(releaseEvent1));
+    events.push_back(std::move(releaseEvent2));
 
-        auto releaseEvent1 = CreateReleaseWakelockEvent(
-            attributions1, "wl1", bucketStartTimeNs + bucketSizeNs + 2);
-        auto releaseEvent2 = CreateReleaseWakelockEvent(
-            attributions2, "wl2", bucketStartTimeNs + 2 * bucketSizeNs - 15);
+    sortLogEventsByTimestamp(&events);
 
-
-        std::vector<std::unique_ptr<LogEvent>> events;
-
-        events.push_back(std::move(screenTurnedOnEvent));
-        events.push_back(std::move(screenTurnedOffEvent));
-        events.push_back(std::move(screenTurnedOnEvent2));
-        events.push_back(std::move(acquireEvent1));
-        events.push_back(std::move(acquireEvent2));
-        events.push_back(std::move(releaseEvent1));
-        events.push_back(std::move(releaseEvent2));
-
-        sortLogEventsByTimestamp(&events);
-
-        for (const auto& event : events) {
-            processor->OnLogEvent(event.get());
-        }
-
-        ConfigMetricsReportList reports;
-        processor->onDumpReport(cfgKey, bucketStartTimeNs + 2 * bucketSizeNs - 1, &reports);
-        EXPECT_EQ(reports.reports_size(), 1);
-        EXPECT_EQ(reports.reports(0).metrics_size(), 1);
-        // Only 1 dimension output. The tag dimension in the predicate has been aggregated.
-        EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
-
-        auto data = reports.reports(0).metrics(0).duration_metrics().data(0);
-        // Validate dimension value.
-        ValidateAttributionUidDimension(
-            data.dimensions_in_what(),
-            android::util::WAKELOCK_STATE_CHANGED, 111);
-        // Validate bucket info.
-        EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 1);
-        data = reports.reports(0).metrics(0).duration_metrics().data(0);
-        // The wakelock holding interval starts from the screen off event and to the end of the 1st
-        // bucket.
-        EXPECT_EQ((unsigned long long)data.bucket_info(0).duration_nanos(), bucketSizeNs - 200);
-
-        reports.Clear();
-        processor->onDumpReport(cfgKey, bucketStartTimeNs + 2 * bucketSizeNs + 1, &reports);
-        EXPECT_EQ(reports.reports_size(), 1);
-        EXPECT_EQ(reports.reports(0).metrics_size(), 1);
-        EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
-        // Dump the report after the end of 2nd bucket.
-        EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 2);
-        data = reports.reports(0).metrics(0).duration_metrics().data(0);
-        // Validate dimension value.
-        ValidateAttributionUidDimension(
-            data.dimensions_in_what(), android::util::WAKELOCK_STATE_CHANGED, 111);
-        // Two output buckets.
-        // The wakelock holding interval in the 1st bucket starts from the screen off event and to
-        // the end of the 1st bucket.
-        EXPECT_EQ((unsigned long long)data.bucket_info(0).duration_nanos(),
-            bucketStartTimeNs + bucketSizeNs - (bucketStartTimeNs + 200));
-        // The wakelock holding interval in the 2nd bucket starts at the beginning of the bucket and
-        // ends at the second screen on event.
-        EXPECT_EQ((unsigned long long)data.bucket_info(1).duration_nanos(), 500UL);
-
-        events.clear();
-        events.push_back(CreateScreenStateChangedEvent(
-            android::view::DisplayStateEnum::DISPLAY_STATE_OFF,
-            bucketStartTimeNs + 2 * bucketSizeNs + 90));
-        events.push_back(CreateAcquireWakelockEvent(
-            attributions1, "wl3", bucketStartTimeNs + 2 * bucketSizeNs + 100));
-        events.push_back(CreateReleaseWakelockEvent(
-            attributions1, "wl3", bucketStartTimeNs + 5 * bucketSizeNs + 100));
-        sortLogEventsByTimestamp(&events);
-        for (const auto& event : events) {
-            processor->OnLogEvent(event.get());
-        }
-        reports.Clear();
-        processor->onDumpReport(cfgKey, bucketStartTimeNs + 6 * bucketSizeNs + 1, &reports);
-        EXPECT_EQ(reports.reports_size(), 1);
-        EXPECT_EQ(reports.reports(0).metrics_size(), 1);
-        EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
-        EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 6);
-        data = reports.reports(0).metrics(0).duration_metrics().data(0);
-        ValidateAttributionUidDimension(
-            data.dimensions_in_what(), android::util::WAKELOCK_STATE_CHANGED, 111);
-        // The last wakelock holding spans 4 buckets.
-        EXPECT_EQ((unsigned long long)data.bucket_info(2).duration_nanos(), bucketSizeNs - 100);
-        EXPECT_EQ((unsigned long long)data.bucket_info(3).duration_nanos(), bucketSizeNs);
-        EXPECT_EQ((unsigned long long)data.bucket_info(4).duration_nanos(), bucketSizeNs);
-        EXPECT_EQ((unsigned long long)data.bucket_info(5).duration_nanos(), 100UL);
+    for (const auto& event : events) {
+        processor->OnLogEvent(event.get());
     }
 }
 
+}  // namespace
+
+TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensionsForSumDuration) {
+    ConfigKey cfgKey;
+    auto config = CreateStatsdConfig(DurationMetric::SUM);
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs =
+            TimeUnitToBucketSizeInMillis(config.duration_metric(0).bucket()) * 1000000LL;
+    auto processor = CreateStatsLogProcessor(bucketStartTimeNs / NS_PER_SEC, config, cfgKey);
+    EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
+    EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
+    FeedEvents(config, processor);
+    ConfigMetricsReportList reports;
+    processor->onDumpReport(cfgKey, bucketStartTimeNs + 2 * bucketSizeNs - 1, &reports);
+
+    EXPECT_EQ(reports.reports_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics_size(), 1);
+    // Only 1 dimension output. The tag dimension in the predicate has been aggregated.
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
+
+    auto data = reports.reports(0).metrics(0).duration_metrics().data(0);
+    // Validate dimension value.
+    ValidateAttributionUidDimension(data.dimensions_in_what(),
+                                    android::util::WAKELOCK_STATE_CHANGED, 111);
+    // Validate bucket info.
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 1);
+    data = reports.reports(0).metrics(0).duration_metrics().data(0);
+    // The wakelock holding interval starts from the screen off event and to the end of the 1st
+    // bucket.
+    EXPECT_EQ((unsigned long long)data.bucket_info(0).duration_nanos(), bucketSizeNs - 200);
+
+    reports.Clear();
+    processor->onDumpReport(cfgKey, bucketStartTimeNs + 2 * bucketSizeNs + 1, &reports);
+    EXPECT_EQ(reports.reports_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
+    // Dump the report after the end of 2nd bucket.
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 2);
+    data = reports.reports(0).metrics(0).duration_metrics().data(0);
+    // Validate dimension value.
+    ValidateAttributionUidDimension(data.dimensions_in_what(),
+                                    android::util::WAKELOCK_STATE_CHANGED, 111);
+    // Two output buckets.
+    // The wakelock holding interval in the 1st bucket starts from the screen off event and to
+    // the end of the 1st bucket.
+    EXPECT_EQ((unsigned long long)data.bucket_info(0).duration_nanos(),
+              bucketStartTimeNs + bucketSizeNs - (bucketStartTimeNs + 200));
+    // The wakelock holding interval in the 2nd bucket starts at the beginning of the bucket and
+    // ends at the second screen on event.
+    EXPECT_EQ((unsigned long long)data.bucket_info(1).duration_nanos(), 500UL);
+
+    std::vector<std::unique_ptr<LogEvent>> events;
+    events.push_back(
+            CreateScreenStateChangedEvent(android::view::DisplayStateEnum::DISPLAY_STATE_OFF,
+                                          bucketStartTimeNs + 2 * bucketSizeNs + 90));
+    events.push_back(CreateAcquireWakelockEvent(attributions1, "wl3",
+                                                bucketStartTimeNs + 2 * bucketSizeNs + 100));
+    events.push_back(CreateReleaseWakelockEvent(attributions1, "wl3",
+                                                bucketStartTimeNs + 5 * bucketSizeNs + 100));
+    sortLogEventsByTimestamp(&events);
+    for (const auto& event : events) {
+        processor->OnLogEvent(event.get());
+    }
+    reports.Clear();
+    processor->onDumpReport(cfgKey, bucketStartTimeNs + 6 * bucketSizeNs + 1, &reports);
+    EXPECT_EQ(reports.reports_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 6);
+    data = reports.reports(0).metrics(0).duration_metrics().data(0);
+    ValidateAttributionUidDimension(data.dimensions_in_what(),
+                                    android::util::WAKELOCK_STATE_CHANGED, 111);
+    // The last wakelock holding spans 4 buckets.
+    EXPECT_EQ((unsigned long long)data.bucket_info(2).duration_nanos(), bucketSizeNs - 100);
+    EXPECT_EQ((unsigned long long)data.bucket_info(3).duration_nanos(), bucketSizeNs);
+    EXPECT_EQ((unsigned long long)data.bucket_info(4).duration_nanos(), bucketSizeNs);
+    EXPECT_EQ((unsigned long long)data.bucket_info(5).duration_nanos(), 100UL);
+}
+
+TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensionsForMaxDuration) {
+    ConfigKey cfgKey;
+    auto config = CreateStatsdConfig(DurationMetric::MAX_SPARSE);
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs =
+            TimeUnitToBucketSizeInMillis(config.duration_metric(0).bucket()) * 1000000LL;
+    auto processor = CreateStatsLogProcessor(bucketStartTimeNs / NS_PER_SEC, config, cfgKey);
+    EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
+    EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
+    FeedEvents(config, processor);
+    ConfigMetricsReportList reports;
+    processor->onDumpReport(cfgKey, bucketStartTimeNs + 2 * bucketSizeNs - 1, &reports);
+
+    EXPECT_EQ(reports.reports_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics_size(), 1);
+    // Nothing has ended in the first bucket.
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 0);
+
+    reports.Clear();
+    processor->onDumpReport(cfgKey, bucketStartTimeNs + 2 * bucketSizeNs + 1, &reports);
+    EXPECT_EQ(reports.reports_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
+    // Dump the report after the end of 2nd bucket. One dimension with one bucket.
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 1);
+    auto data = reports.reports(0).metrics(0).duration_metrics().data(0);
+    // Validate dimension value.
+    ValidateAttributionUidDimension(data.dimensions_in_what(),
+                                    android::util::WAKELOCK_STATE_CHANGED, 111);
+    // The max is acquire event for wl1 to screen off start.
+    EXPECT_EQ((unsigned long long)data.bucket_info(0).duration_nanos(), bucketSizeNs + 2 - 200);
+
+    std::vector<std::unique_ptr<LogEvent>> events;
+    events.push_back(
+            CreateScreenStateChangedEvent(android::view::DisplayStateEnum::DISPLAY_STATE_OFF,
+                                          bucketStartTimeNs + 2 * bucketSizeNs + 90));
+    events.push_back(CreateAcquireWakelockEvent(attributions1, "wl3",
+                                                bucketStartTimeNs + 2 * bucketSizeNs + 100));
+    events.push_back(CreateReleaseWakelockEvent(attributions1, "wl3",
+                                                bucketStartTimeNs + 5 * bucketSizeNs + 100));
+    sortLogEventsByTimestamp(&events);
+    for (const auto& event : events) {
+        processor->OnLogEvent(event.get());
+    }
+    reports.Clear();
+    processor->onDumpReport(cfgKey, bucketStartTimeNs + 6 * bucketSizeNs + 1, &reports);
+    EXPECT_EQ(reports.reports_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data_size(), 1);
+    EXPECT_EQ(reports.reports(0).metrics(0).duration_metrics().data(0).bucket_info_size(), 2);
+    data = reports.reports(0).metrics(0).duration_metrics().data(0);
+    ValidateAttributionUidDimension(data.dimensions_in_what(),
+                                    android::util::WAKELOCK_STATE_CHANGED, 111);
+    // The last wakelock holding spans 4 buckets.
+    EXPECT_EQ((unsigned long long)data.bucket_info(1).duration_nanos(), 3 * bucketSizeNs);
+    EXPECT_EQ((unsigned long long)data.bucket_info(1).start_bucket_nanos(),
+              bucketStartTimeNs + 5 * bucketSizeNs);
+    EXPECT_EQ((unsigned long long)data.bucket_info(1).end_bucket_nanos(),
+              bucketStartTimeNs + 6 * bucketSizeNs);
+}
+
 #else
 GTEST_LOG_(INFO) << "This test does nothing.\n";
 #endif
diff --git a/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp b/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
index 50b3532..87a1079 100644
--- a/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
@@ -191,6 +191,117 @@
     EXPECT_EQ(1LL, bucketInfo.mCount);
 }
 
+TEST(CountMetricProducerTest, TestEventWithAppUpgrade) {
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+    uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+
+    int tagId = 1;
+    int conditionTagId = 2;
+
+    CountMetric metric;
+    metric.set_id(1);
+    metric.set_bucket(ONE_MINUTE);
+    Alert alert;
+    alert.set_num_buckets(3);
+    alert.set_trigger_if_sum_gt(2);
+    LogEvent event1(tagId, bucketStartTimeNs + 1);
+    event1.write("111");  // uid
+    event1.init();
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    CountMetricProducer countProducer(kConfigKey, metric, -1 /* no condition */, wizard,
+                                      bucketStartTimeNs);
+    sp<AnomalyTracker> anomalyTracker = countProducer.addAnomalyTracker(alert);
+    EXPECT_TRUE(anomalyTracker != nullptr);
+
+    // Bucket is flushed yet.
+    countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
+    EXPECT_EQ(0UL, countProducer.mPastBuckets.size());
+    EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+
+    // App upgrade forces bucket flush.
+    // Check that there's a past bucket and the bucket end is not adjusted.
+    countProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((long long)bucketStartTimeNs,
+              countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
+    EXPECT_EQ((long long)eventUpgradeTimeNs,
+              countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
+    EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    // Anomaly tracker only contains full buckets.
+    EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+
+    uint64_t lastEndTimeNs = countProducer.getCurrentBucketEndTimeNs();
+    // Next event occurs in same bucket as partial bucket created.
+    LogEvent event2(tagId, bucketStartTimeNs + 59 * NS_PER_SEC + 10);
+    event2.write("222");  // uid
+    event2.init();
+    countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
+    EXPECT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+
+    // Third event in following bucket.
+    LogEvent event3(tagId, bucketStartTimeNs + 62 * NS_PER_SEC + 10);
+    event3.write("333");  // uid
+    event3.init();
+    countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
+    EXPECT_EQ(2UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(lastEndTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+}
+
+TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket) {
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+    uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+
+    int tagId = 1;
+    int conditionTagId = 2;
+
+    CountMetric metric;
+    metric.set_id(1);
+    metric.set_bucket(ONE_MINUTE);
+    LogEvent event1(tagId, bucketStartTimeNs + 1);
+    event1.write("111");  // uid
+    event1.init();
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    CountMetricProducer countProducer(kConfigKey, metric, -1 /* no condition */, wizard,
+                                      bucketStartTimeNs);
+
+    // Bucket is flushed yet.
+    countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
+    EXPECT_EQ(0UL, countProducer.mPastBuckets.size());
+
+    // App upgrade forces bucket flush.
+    // Check that there's a past bucket and the bucket end is not adjusted.
+    countProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((int64_t)bucketStartTimeNs,
+              countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs,
+              (uint64_t)countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
+    EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
+
+    // Next event occurs in same bucket as partial bucket created.
+    LogEvent event2(tagId, bucketStartTimeNs + 70 * NS_PER_SEC + 10);
+    event2.write("222");  // uid
+    event2.init();
+    countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
+    EXPECT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+
+    // Third event in following bucket.
+    LogEvent event3(tagId, bucketStartTimeNs + 121 * NS_PER_SEC + 10);
+    event3.write("333");  // uid
+    event3.init();
+    countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
+    EXPECT_EQ(2UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((int64_t)eventUpgradeTimeNs,
+              countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs,
+              (uint64_t)countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketEndNs);
+}
+
 TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced) {
     Alert alert;
     alert.set_id(11);
diff --git a/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp b/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
index c9fe252..23e15f7 100644
--- a/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
@@ -116,6 +116,231 @@
     EXPECT_EQ(1ULL, buckets2[0].mDuration);
 }
 
+TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade) {
+    /**
+     * The duration starts from the first bucket, through the two partial buckets (10-70sec),
+     * another bucket, and ends at the beginning of the next full bucket.
+     * Expected buckets:
+     *  - [10,25]: 14 secs
+     *  - [25,70]: All 45 secs
+     *  - [70,130]: All 60 secs
+     *  - [130, 210]: Only 5 secs (event ended at 135sec)
+     */
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+    uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+    uint64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
+    uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+
+    int tagId = 1;
+
+    DurationMetric metric;
+    metric.set_id(1);
+    metric.set_bucket(ONE_MINUTE);
+    metric.set_aggregation_type(DurationMetric_AggregationType_SUM);
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    FieldMatcher dimensions;
+    DurationMetricProducer durationProducer(
+            kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
+            3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
+
+    durationProducer.onMatchedLogEvent(1 /* start index*/, LogEvent(tagId, startTimeNs));
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
+    EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(1UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    std::vector<DurationBucket> buckets =
+            durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
+    EXPECT_EQ(bucketStartTimeNs, buckets[0].mBucketStartNs);
+    EXPECT_EQ(eventUpgradeTimeNs, buckets[0].mBucketEndNs);
+    EXPECT_EQ(eventUpgradeTimeNs - startTimeNs, buckets[0].mDuration);
+    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    durationProducer.onMatchedLogEvent(2 /* stop index*/, LogEvent(tagId, endTimeNs));
+    buckets = durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
+    EXPECT_EQ(3UL, buckets.size());
+    EXPECT_EQ(eventUpgradeTimeNs, buckets[1].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[1].mBucketEndNs);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - eventUpgradeTimeNs, buckets[1].mDuration);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[2].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[2].mBucketEndNs);
+    EXPECT_EQ(bucketSizeNs, buckets[2].mDuration);
+}
+
+TEST(DurationMetricTrackerTest, TestSumDurationWithUpgradeInFollowingBucket) {
+    /**
+     * Expected buckets (start at 11s, upgrade at 75s, end at 135s):
+     *  - [10,70]: 59 secs
+     *  - [70,75]: 5 sec
+     *  - [75,130]: 55 secs
+     */
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+    uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+    uint64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
+    uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+
+    int tagId = 1;
+
+    DurationMetric metric;
+    metric.set_id(1);
+    metric.set_bucket(ONE_MINUTE);
+    metric.set_aggregation_type(DurationMetric_AggregationType_SUM);
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    FieldMatcher dimensions;
+    DurationMetricProducer durationProducer(
+            kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
+            3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
+
+    durationProducer.onMatchedLogEvent(1 /* start index*/, LogEvent(tagId, startTimeNs));
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
+    EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(2UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    std::vector<DurationBucket> buckets =
+            durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
+    EXPECT_EQ(bucketStartTimeNs, buckets[0].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[0].mBucketEndNs);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - startTimeNs, buckets[0].mDuration);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[1].mBucketStartNs);
+    EXPECT_EQ(eventUpgradeTimeNs, buckets[1].mBucketEndNs);
+    EXPECT_EQ(eventUpgradeTimeNs - (bucketStartTimeNs + bucketSizeNs), buckets[1].mDuration);
+    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    durationProducer.onMatchedLogEvent(2 /* stop index*/, LogEvent(tagId, endTimeNs));
+    buckets = durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
+    EXPECT_EQ(3UL, buckets.size());
+    EXPECT_EQ(eventUpgradeTimeNs, buckets[2].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[2].mBucketEndNs);
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs - eventUpgradeTimeNs, buckets[2].mDuration);
+}
+
+TEST(DurationMetricTrackerTest, TestSumDurationAnomalyWithUpgrade) {
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+    uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+    uint64_t startTimeNs = bucketStartTimeNs + 1;
+    uint64_t endTimeNs = startTimeNs + 65 * NS_PER_SEC;
+
+    int tagId = 1;
+
+    // Setup metric with alert.
+    DurationMetric metric;
+    metric.set_id(1);
+    metric.set_bucket(ONE_MINUTE);
+    metric.set_aggregation_type(DurationMetric_AggregationType_SUM);
+    Alert alert;
+    alert.set_num_buckets(3);
+    alert.set_trigger_if_sum_gt(2);
+
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    FieldMatcher dimensions;
+    DurationMetricProducer durationProducer(
+            kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
+            3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
+    sp<AnomalyTracker> anomalyTracker = durationProducer.addAnomalyTracker(alert);
+    EXPECT_TRUE(anomalyTracker != nullptr);
+
+    durationProducer.onMatchedLogEvent(1 /* start index*/, LogEvent(tagId, startTimeNs));
+    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    durationProducer.onMatchedLogEvent(2 /* stop index*/, LogEvent(tagId, endTimeNs));
+
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - startTimeNs,
+              (uint64_t)anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+}
+
+TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade) {
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+    uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+    uint64_t startTimeNs = bucketStartTimeNs + 1;
+    uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+
+    int tagId = 1;
+
+    DurationMetric metric;
+    metric.set_id(1);
+    metric.set_bucket(ONE_MINUTE);
+    metric.set_aggregation_type(DurationMetric_AggregationType_MAX_SPARSE);
+    LogEvent event1(tagId, startTimeNs);
+    event1.write("111");  // uid
+    event1.init();
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    FieldMatcher dimensions;
+    DurationMetricProducer durationProducer(
+            kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
+            3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
+
+    durationProducer.onMatchedLogEvent(1 /* start index*/, LogEvent(tagId, startTimeNs));
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
+    EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    durationProducer.onMatchedLogEvent(2 /* stop index*/, LogEvent(tagId, endTimeNs));
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+
+    durationProducer.flushIfNeededLocked(bucketStartTimeNs + 3 * bucketSizeNs + 1);
+    std::vector<DurationBucket> buckets =
+            durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
+    EXPECT_EQ(1UL, buckets.size());
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[0].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + 3 * bucketSizeNs, buckets[0].mBucketEndNs);
+    EXPECT_EQ(endTimeNs - startTimeNs, buckets[0].mDuration);
+}
+
+TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket) {
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
+    uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+    uint64_t startTimeNs = bucketStartTimeNs + 1;
+    uint64_t endTimeNs = startTimeNs + 115 * NS_PER_SEC;
+
+    int tagId = 1;
+
+    DurationMetric metric;
+    metric.set_id(1);
+    metric.set_bucket(ONE_MINUTE);
+    metric.set_aggregation_type(DurationMetric_AggregationType_MAX_SPARSE);
+    LogEvent event1(tagId, startTimeNs);
+    event1.write("111");  // uid
+    event1.init();
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    FieldMatcher dimensions;
+    DurationMetricProducer durationProducer(
+            kConfigKey, metric, -1 /* no condition */, 1 /* start index */, 2 /* stop index */,
+            3 /* stop_all index */, false /*nesting*/, wizard, dimensions, bucketStartTimeNs);
+
+    durationProducer.onMatchedLogEvent(1 /* start index*/, LogEvent(tagId, startTimeNs));
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
+    EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    // Stop occurs in the same partial bucket as created for the app upgrade.
+    durationProducer.onMatchedLogEvent(2 /* stop index*/, LogEvent(tagId, endTimeNs));
+    EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+
+    durationProducer.flushIfNeededLocked(bucketStartTimeNs + 2 * bucketSizeNs + 1);
+    std::vector<DurationBucket> buckets =
+            durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
+    EXPECT_EQ(1UL, buckets.size());
+    EXPECT_EQ(eventUpgradeTimeNs, buckets[0].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[0].mBucketEndNs);
+    EXPECT_EQ(endTimeNs - startTimeNs, buckets[0].mDuration);
+}
+
 }  // namespace statsd
 }  // namespace os
 }  // namespace android
diff --git a/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp b/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
index 58be5b0..470d4d0 100644
--- a/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
@@ -44,6 +44,7 @@
 const int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
 const int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
 const int64_t bucket4StartTimeNs = bucketStartTimeNs + 3 * bucketSizeNs;
+const uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
 
 TEST(GaugeMetricProducerTest, TestNoCondition) {
     GaugeMetric metric;
@@ -119,6 +120,143 @@
     EXPECT_EQ(2UL, gaugeProducer.mPastBuckets.begin()->second.back().mBucketNum);
 }
 
+TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade) {
+    GaugeMetric metric;
+    metric.set_id(metricId);
+    metric.set_bucket(ONE_MINUTE);
+    metric.mutable_gauge_fields_filter()->set_include_all(true);
+
+    Alert alert;
+    alert.set_id(101);
+    alert.set_metric_id(metricId);
+    alert.set_trigger_if_sum_gt(25);
+    alert.set_num_buckets(100);
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    shared_ptr<MockStatsPullerManager> pullerManager =
+            make_shared<StrictMock<MockStatsPullerManager>>();
+    GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
+                                      -1 /* -1 means no pulling */, bucketStartTimeNs,
+                                      pullerManager);
+    sp<AnomalyTracker> anomalyTracker = gaugeProducer.addAnomalyTracker(alert);
+    EXPECT_TRUE(anomalyTracker != nullptr);
+
+    shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+    event1->write(1);
+    event1->write(10);
+    event1->init();
+    gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event1);
+    EXPECT_EQ(1UL, (*gaugeProducer.mCurrentSlicedBucket).count(DEFAULT_METRIC_DIMENSION_KEY));
+
+    gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(0UL, (*gaugeProducer.mCurrentSlicedBucket).count(DEFAULT_METRIC_DIMENSION_KEY));
+    EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
+    EXPECT_EQ(eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+    // Partial buckets are not sent to anomaly tracker.
+    EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+
+    // Create an event in the same partial bucket.
+    shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 59 * NS_PER_SEC);
+    event2->write(1);
+    event2->write(10);
+    event2->init();
+    gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
+    EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
+    EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((uint64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+    // Partial buckets are not sent to anomaly tracker.
+    EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+
+    // Next event should trigger creation of new bucket and send previous full bucket to anomaly
+    // tracker.
+    shared_ptr<LogEvent> event3 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 65 * NS_PER_SEC);
+    event3->write(1);
+    event3->write(10);
+    event3->init();
+    gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event3);
+    EXPECT_EQ(1UL, gaugeProducer.mCurrentBucketNum);
+    EXPECT_EQ(2UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((uint64_t)bucketStartTimeNs + bucketSizeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+
+    // Next event should trigger creation of new bucket.
+    shared_ptr<LogEvent> event4 =
+            make_shared<LogEvent>(tagId, bucketStartTimeNs + 125 * NS_PER_SEC);
+    event4->write(1);
+    event4->write(10);
+    event4->init();
+    gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event4);
+    EXPECT_EQ(2UL, gaugeProducer.mCurrentBucketNum);
+    EXPECT_EQ(3UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
+}
+
+TEST(GaugeMetricProducerTest, TestPulledWithUpgrade) {
+    GaugeMetric metric;
+    metric.set_id(metricId);
+    metric.set_bucket(ONE_MINUTE);
+    auto gaugeFieldMatcher = metric.mutable_gauge_fields_filter()->mutable_fields();
+    gaugeFieldMatcher->set_field(tagId);
+    gaugeFieldMatcher->add_child()->set_field(2);
+
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+
+    shared_ptr<MockStatsPullerManager> pullerManager =
+            make_shared<StrictMock<MockStatsPullerManager>>();
+    EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _)).WillOnce(Return());
+    EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+    EXPECT_CALL(*pullerManager, Pull(tagId, _))
+            .WillOnce(Invoke([](int tagId, vector<std::shared_ptr<LogEvent>>* data) {
+                data->clear();
+                shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, eventUpgradeTimeNs);
+                event->write("some value");
+                event->write(2);
+                event->init();
+                data->push_back(event);
+                return true;
+            }));
+
+    GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, wizard,
+                                      tagId, bucketStartTimeNs, pullerManager);
+
+    vector<shared_ptr<LogEvent>> allData;
+    shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 1);
+    event->write("some value");
+    event->write(1);
+    event->init();
+    allData.push_back(event);
+    gaugeProducer.onDataPulled(allData);
+    EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
+    EXPECT_EQ(1, gaugeProducer.mCurrentSlicedBucket->begin()
+                         ->second.front()
+                         .mFields->begin()
+                         ->second.value_int());
+
+    gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
+    EXPECT_EQ((uint64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
+    EXPECT_EQ(2, gaugeProducer.mCurrentSlicedBucket->begin()
+                         ->second.front()
+                         .mFields->begin()
+                         ->second.value_int());
+
+    allData.clear();
+    event = make_shared<LogEvent>(tagId, bucketStartTimeNs + bucketSizeNs + 1);
+    event->write("some value");
+    event->write(3);
+    event->init();
+    allData.push_back(event);
+    gaugeProducer.onDataPulled(allData);
+    EXPECT_EQ(2UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
+    EXPECT_EQ(3, gaugeProducer.mCurrentSlicedBucket->begin()
+                         ->second.front()
+                         .mFields->begin()
+                         ->second.value_int());
+}
+
 TEST(GaugeMetricProducerTest, TestWithCondition) {
     GaugeMetric metric;
     metric.set_id(metricId);
diff --git a/cmds/statsd/tests/metrics/MaxDurationTracker_test.cpp b/cmds/statsd/tests/metrics/MaxDurationTracker_test.cpp
index 203f028..2658e4e 100644
--- a/cmds/statsd/tests/metrics/MaxDurationTracker_test.cpp
+++ b/cmds/statsd/tests/metrics/MaxDurationTracker_test.cpp
@@ -41,6 +41,11 @@
 
 const int TagId = 1;
 
+const HashableDimensionKey eventKey = getMockedDimensionKey(TagId, 0, "1");
+const std::vector<HashableDimensionKey> conditionKey = {getMockedDimensionKey(TagId, 4, "1")};
+const HashableDimensionKey key1 = getMockedDimensionKey(TagId, 1, "1");
+const HashableDimensionKey key2 = getMockedDimensionKey(TagId, 1, "2");
+const uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
 
 TEST(MaxDurationTrackerTest, TestSimpleMaxDuration) {
     const MetricDimensionKey eventKey = getMockedMetricDimensionKey(TagId, 0, "1");
@@ -54,11 +59,13 @@
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
     uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
 
     int64_t metricId = 1;
     MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
-                               false, bucketStartTimeNs, bucketSizeNs, false, {});
+                               false, bucketStartTimeNs, bucketNum, bucketStartTimeNs, bucketSizeNs,
+                               false, {});
 
     tracker.noteStart(key1, true, bucketStartTimeNs, ConditionKey());
     // Event starts again. This would not change anything as it already starts.
@@ -87,12 +94,15 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
 
     int64_t metricId = 1;
     MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
-                               false, bucketStartTimeNs, bucketSizeNs, false, {});
+                               false, bucketStartTimeNs, bucketNum, bucketStartTimeNs, bucketSizeNs,
+                               false, {});
 
     tracker.noteStart(key1, true, bucketStartTimeNs + 1, ConditionKey());
 
@@ -101,15 +111,14 @@
     tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 40, &buckets);
     tracker.noteStopAll(bucketStartTimeNs + bucketSizeNs + 40);
     EXPECT_TRUE(tracker.mInfos.empty());
-
-    EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
-    EXPECT_EQ(1u, buckets[eventKey].size());
-    EXPECT_EQ(bucketSizeNs - 1, buckets[eventKey][0].mDuration);
+    EXPECT_TRUE(buckets.find(eventKey) == buckets.end());
 
     tracker.flushIfNeeded(bucketStartTimeNs + 3 * bucketSizeNs + 40, &buckets);
-    EXPECT_EQ(2u, buckets[eventKey].size());
-    EXPECT_EQ(bucketSizeNs - 1, buckets[eventKey][0].mDuration);
-    EXPECT_EQ(40ULL, buckets[eventKey][1].mDuration);
+    EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
+    EXPECT_EQ(1u, buckets[eventKey].size());
+    EXPECT_EQ(bucketSizeNs + 40 - 1, buckets[eventKey][0].mDuration);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[eventKey][0].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[eventKey][0].mBucketEndNs);
 }
 
 TEST(MaxDurationTrackerTest, TestCrossBucketBoundary) {
@@ -122,12 +131,15 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
 
     int64_t metricId = 1;
     MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
-                               false, bucketStartTimeNs, bucketSizeNs, false, {});
+                               false, bucketStartTimeNs, bucketNum, bucketStartTimeNs, bucketSizeNs,
+                               false, {});
 
     // The event starts.
     tracker.noteStart(DEFAULT_DIMENSION_KEY, true, bucketStartTimeNs + 1, ConditionKey());
@@ -137,14 +149,18 @@
                       ConditionKey());
 
     // The event stops at early 4th bucket.
+    // Notestop is called from DurationMetricProducer's onMatchedLogEvent, which calls
+    // flushIfneeded.
     tracker.flushIfNeeded(bucketStartTimeNs + (3 * bucketSizeNs) + 20, &buckets);
     tracker.noteStop(DEFAULT_DIMENSION_KEY, bucketStartTimeNs + (3 * bucketSizeNs) + 20,
                      false /*stop all*/);
-    EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
-    EXPECT_EQ(3u, buckets[eventKey].size());
-    EXPECT_EQ((unsigned long long)(bucketSizeNs - 1), buckets[eventKey][0].mDuration);
-    EXPECT_EQ((unsigned long long)bucketSizeNs, buckets[eventKey][1].mDuration);
-    EXPECT_EQ((unsigned long long)bucketSizeNs, buckets[eventKey][2].mDuration);
+    EXPECT_TRUE(buckets.find(eventKey) == buckets.end());
+
+    tracker.flushIfNeeded(bucketStartTimeNs + 4 * bucketSizeNs, &buckets);
+    EXPECT_EQ(1u, buckets[eventKey].size());
+    EXPECT_EQ((3 * bucketSizeNs) + 20 - 1, buckets[eventKey][0].mDuration);
+    EXPECT_EQ(bucketStartTimeNs + 3 * bucketSizeNs, buckets[eventKey][0].mBucketStartNs);
+    EXPECT_EQ(bucketStartTimeNs + 4 * bucketSizeNs, buckets[eventKey][0].mBucketEndNs);
 }
 
 TEST(MaxDurationTrackerTest, TestCrossBucketBoundary_nested) {
@@ -157,12 +173,15 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
 
     int64_t metricId = 1;
     MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
-                               true, bucketStartTimeNs, bucketSizeNs, false, {});
+                               true, bucketStartTimeNs, bucketNum, bucketStartTimeNs, bucketSizeNs,
+                               false, {});
 
     // 2 starts
     tracker.noteStart(DEFAULT_DIMENSION_KEY, true, bucketStartTimeNs + 1, ConditionKey());
@@ -171,21 +190,16 @@
     tracker.noteStop(DEFAULT_DIMENSION_KEY, bucketStartTimeNs + 20, false /*stop all*/);
 
     tracker.flushIfNeeded(bucketStartTimeNs + (2 * bucketSizeNs) + 1, &buckets);
-
-    EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
-    EXPECT_EQ(2u, buckets[eventKey].size());
-    EXPECT_EQ(bucketSizeNs - 1, buckets[eventKey][0].mDuration);
-    EXPECT_EQ(bucketSizeNs, buckets[eventKey][1].mDuration);
+    // Because of nesting, still not stopped.
+    EXPECT_TRUE(buckets.find(eventKey) == buckets.end());
 
     // real stop now.
     tracker.noteStop(DEFAULT_DIMENSION_KEY,
                      bucketStartTimeNs + (2 * bucketSizeNs) + 5, false);
     tracker.flushIfNeeded(bucketStartTimeNs + (3 * bucketSizeNs) + 1, &buckets);
 
-    EXPECT_EQ(3u, buckets[eventKey].size());
-    EXPECT_EQ(bucketSizeNs - 1, buckets[eventKey][0].mDuration);
-    EXPECT_EQ(bucketSizeNs, buckets[eventKey][1].mDuration);
-    EXPECT_EQ(5ULL, buckets[eventKey][2].mDuration);
+    EXPECT_EQ(1u, buckets[eventKey].size());
+    EXPECT_EQ(2 * bucketSizeNs + 5 - 1, buckets[eventKey][0].mDuration);
 }
 
 TEST(MaxDurationTrackerTest, TestMaxDurationWithCondition) {
@@ -204,14 +218,17 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     int64_t durationTimeNs = 2 * 1000;
 
     int64_t metricId = 1;
     MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                               false, bucketStartTimeNs, bucketSizeNs, true, {});
+                               false, bucketStartTimeNs, bucketNum, bucketStartTimeNs, bucketSizeNs,
+                               true, {});
     EXPECT_TRUE(tracker.mAnomalyTrackers.empty());
 
     tracker.noteStart(key1, true, eventStartTimeNs, conditionKey1);
@@ -226,45 +243,6 @@
     EXPECT_EQ(5ULL, buckets[eventKey][0].mDuration);
 }
 
-TEST(MaxDurationTrackerTest, TestAnomalyDetection) {
-    const MetricDimensionKey eventKey = getMockedMetricDimensionKey(TagId, 0, "1");
-    const HashableDimensionKey key1 = getMockedDimensionKey(TagId, 1, "1");
-    const HashableDimensionKey key2 = getMockedDimensionKey(TagId, 1, "2");
-    FieldMatcher dimensionInCondition;
-    int64_t metricId = 1;
-    Alert alert;
-    alert.set_id(101);
-    alert.set_metric_id(metricId);
-    alert.set_trigger_if_sum_gt(32 * NS_PER_SEC);
-    alert.set_num_buckets(2);
-    const int32_t refPeriodSec = 1;
-    alert.set_refractory_period_secs(refPeriodSec);
-
-    unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
-    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
-
-    uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
-    uint64_t bucketSizeNs = 30 * NS_PER_SEC;
-
-    sp<DurationAnomalyTracker> anomalyTracker = new DurationAnomalyTracker(alert, kConfigKey);
-    MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
-                               true, bucketStartTimeNs, bucketSizeNs, false, {anomalyTracker});
-
-    tracker.noteStart(key1, true, eventStartTimeNs, ConditionKey());
-    tracker.noteStop(key1, eventStartTimeNs + 10, false);
-    EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
-    EXPECT_EQ(10LL, tracker.mDuration);
-
-    tracker.noteStart(key2, true, eventStartTimeNs + 20, ConditionKey());
-    tracker.flushIfNeeded(eventStartTimeNs + 2 * bucketSizeNs + 3 * NS_PER_SEC, &buckets);
-    tracker.noteStop(key2, eventStartTimeNs + 2 * bucketSizeNs + 3 * NS_PER_SEC, false);
-    EXPECT_EQ((long long)(4 * NS_PER_SEC + 1LL), tracker.mDuration);
-
-    EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey),
-              (eventStartTimeNs + 2 * bucketSizeNs) / NS_PER_SEC  + 3 + refPeriodSec);
-}
-
 }  // namespace statsd
 }  // namespace os
 }  // namespace android
diff --git a/cmds/statsd/tests/metrics/OringDurationTracker_test.cpp b/cmds/statsd/tests/metrics/OringDurationTracker_test.cpp
index 80e16a1..4b579b1 100644
--- a/cmds/statsd/tests/metrics/OringDurationTracker_test.cpp
+++ b/cmds/statsd/tests/metrics/OringDurationTracker_test.cpp
@@ -38,6 +38,12 @@
 const ConfigKey kConfigKey(0, 12345);
 const int TagId = 1;
 const int64_t metricId = 123;
+const HashableDimensionKey eventKey = getMockedDimensionKey(TagId, 0, "event");
+
+const std::vector<HashableDimensionKey> kConditionKey1 = {getMockedDimensionKey(TagId, 1, "maps")};
+const HashableDimensionKey kEventKey1 = getMockedDimensionKey(TagId, 2, "maps");
+const HashableDimensionKey kEventKey2 = getMockedDimensionKey(TagId, 3, "maps");
+const uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
 
 TEST(OringDurationTrackerTest, TestDurationOverlap) {
     const MetricDimensionKey eventKey = getMockedMetricDimensionKey(TagId, 0, "event");
@@ -51,13 +57,16 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t durationTimeNs = 2 * 1000;
 
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 false, bucketStartTimeNs, bucketSizeNs, false, {});
+                                 false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, false, {});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, ConditionKey());
     EXPECT_EQ((long long)eventStartTimeNs, tracker.mLastStartTime);
@@ -84,12 +93,15 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
 
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 true, bucketStartTimeNs, bucketSizeNs, false, {});
+                                 true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, false, {});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, ConditionKey());
     tracker.noteStart(kEventKey1, true, eventStartTimeNs + 10, ConditionKey());  // overlapping wl
@@ -115,12 +127,15 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
 
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 true, bucketStartTimeNs, bucketSizeNs, false, {});
+                                 true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, false, {});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, ConditionKey());
     tracker.noteStart(kEventKey2, true, eventStartTimeNs + 10, ConditionKey());  // overlapping wl
@@ -145,13 +160,16 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t durationTimeNs = 2 * 1000;
 
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 true, bucketStartTimeNs, bucketSizeNs, false, {});
+                                 true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, false, {});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, ConditionKey());
     EXPECT_EQ((long long)eventStartTimeNs, tracker.mLastStartTime);
@@ -190,13 +208,16 @@
 
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
-    uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketStartTimeNs = 10000000000;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t durationTimeNs = 2 * 1000;
 
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 false, bucketStartTimeNs, bucketSizeNs, true, {});
+                                 false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, true, {});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, key1);
 
@@ -231,12 +252,15 @@
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
     uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t durationTimeNs = 2 * 1000;
 
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 false, bucketStartTimeNs, bucketSizeNs, true, {});
+                                 false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, true, {});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, key1);
     // condition to false; record duration 5n
@@ -271,11 +295,14 @@
     unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
 
     uint64_t bucketStartTimeNs = 10000000000;
-    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
     uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
+    uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
 
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 true, bucketStartTimeNs, bucketSizeNs, true, {});
+                                 true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, true, {});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, key1);
     tracker.noteStart(kEventKey1, true, eventStartTimeNs + 2, key1);
@@ -311,12 +338,14 @@
     sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
 
     uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
     uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
-    uint64_t bucketSizeNs = 30 * NS_PER_SEC;
 
     sp<DurationAnomalyTracker> anomalyTracker = new DurationAnomalyTracker(alert, kConfigKey);
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 true, bucketStartTimeNs, bucketSizeNs, true, {anomalyTracker});
+                                 true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, true, {anomalyTracker});
 
     // Nothing in the past bucket.
     tracker.noteStart(DEFAULT_DIMENSION_KEY, true, eventStartTimeNs, ConditionKey());
@@ -379,12 +408,14 @@
     sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
 
     uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
+    uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
+    uint64_t bucketNum = 0;
     uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
-    uint64_t bucketSizeNs = 30 * NS_PER_SEC;
 
     sp<DurationAnomalyTracker> anomalyTracker = new DurationAnomalyTracker(alert, kConfigKey);
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 true /*nesting*/, bucketStartTimeNs, bucketSizeNs, false, {anomalyTracker});
+                                 true /*nesting*/, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
+                                 bucketSizeNs, false, {anomalyTracker});
 
     tracker.noteStart(kEventKey1, true, eventStartTimeNs, ConditionKey());
     tracker.noteStop(kEventKey1, eventStartTimeNs + 10, false);
@@ -434,8 +465,8 @@
 
     sp<DurationAnomalyTracker> anomalyTracker = new DurationAnomalyTracker(alert, kConfigKey);
     OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
-                                 true /*nesting*/, bucketStartTimeNs, bucketSizeNs, false,
-                                 {anomalyTracker});
+                                 true /*nesting*/, bucketStartTimeNs, 0, bucketStartTimeNs,
+                                 bucketSizeNs, false, {anomalyTracker});
 
     tracker.noteStart(kEventKey1, true, 15 * NS_PER_SEC, conkey); // start key1
     EXPECT_EQ(1u, anomalyTracker->mAlarms.size());
diff --git a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
index 55c078d..325a372 100644
--- a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
@@ -44,6 +44,7 @@
 const int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
 const int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
 const int64_t bucket4StartTimeNs = bucketStartTimeNs + 3 * bucketSizeNs;
+const int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
 
 /*
  * Tests pulled atoms with no conditions
@@ -202,6 +203,101 @@
     EXPECT_EQ(false, curInterval.startUpdated);
 }
 
+TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade) {
+    ValueMetric metric;
+    metric.set_id(metricId);
+    metric.set_bucket(ONE_MINUTE);
+    metric.mutable_value_field()->set_field(tagId);
+    metric.mutable_value_field()->add_child()->set_field(2);
+
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    shared_ptr<MockStatsPullerManager> pullerManager =
+            make_shared<StrictMock<MockStatsPullerManager>>();
+    ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, -1, bucketStartTimeNs,
+                                      pullerManager);
+
+    shared_ptr<LogEvent> event1 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+    event1->write(1);
+    event1->write(10);
+    event1->init();
+    valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event1);
+    EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+
+    valueProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+
+    shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 59 * NS_PER_SEC);
+    event2->write(1);
+    event2->write(10);
+    event2->init();
+    valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
+    EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+
+    // Next value should create a new bucket.
+    shared_ptr<LogEvent> event3 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 65 * NS_PER_SEC);
+    event3->write(1);
+    event3->write(10);
+    event3->init();
+    valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event3);
+    EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((uint64_t)bucketStartTimeNs + bucketSizeNs, valueProducer.mCurrentBucketStartTimeNs);
+}
+
+TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
+    ValueMetric metric;
+    metric.set_id(metricId);
+    metric.set_bucket(ONE_MINUTE);
+    metric.mutable_value_field()->set_field(tagId);
+    metric.mutable_value_field()->add_child()->set_field(2);
+
+    sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+    shared_ptr<MockStatsPullerManager> pullerManager =
+            make_shared<StrictMock<MockStatsPullerManager>>();
+    EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, _, _)).WillOnce(Return());
+    EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, _)).WillOnce(Return());
+    EXPECT_CALL(*pullerManager, Pull(tagId, _))
+            .WillOnce(Invoke([](int tagId, vector<std::shared_ptr<LogEvent>>* data) {
+                data->clear();
+                shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 10);
+                event->write(tagId);
+                event->write(120);
+                event->init();
+                data->push_back(event);
+                return true;
+            }));
+    ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, tagId, bucketStartTimeNs,
+                                      pullerManager);
+
+    vector<shared_ptr<LogEvent>> allData;
+    allData.clear();
+    shared_ptr<LogEvent> event = make_shared<LogEvent>(tagId, bucketStartTimeNs + 1);
+    event->write(tagId);
+    event->write(100);
+    event->init();
+    allData.push_back(event);
+
+    valueProducer.onDataPulled(allData);
+    EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
+
+    valueProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(20L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mValue);
+
+    allData.clear();
+    event = make_shared<LogEvent>(tagId, bucket2StartTimeNs + 1);
+    event->write(tagId);
+    event->write(150);
+    event->init();
+    allData.push_back(event);
+    valueProducer.onDataPulled(allData);
+    EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ((uint64_t)bucket2StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(30L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mValue);
+}
+
 TEST(ValueMetricProducerTest, TestPushedEventsWithoutCondition) {
     ValueMetric metric;
     metric.set_id(metricId);
diff --git a/core/java/android/app/InstantAppResolverService.java b/core/java/android/app/InstantAppResolverService.java
index 76a3682..2ba4c00 100644
--- a/core/java/android/app/InstantAppResolverService.java
+++ b/core/java/android/app/InstantAppResolverService.java
@@ -88,7 +88,7 @@
     public void onGetInstantAppResolveInfo(Intent sanitizedIntent, int[] hostDigestPrefix,
             String token, InstantAppResolutionCallback callback) {
         // if not overridden, forward to old methods and filter out non-web intents
-        if (sanitizedIntent.isBrowsableWebIntent()) {
+        if (sanitizedIntent.isWebIntent()) {
             onGetInstantAppResolveInfo(hostDigestPrefix, token, callback);
         } else {
             callback.onInstantAppResolveInfo(Collections.emptyList());
@@ -107,7 +107,7 @@
             String token, InstantAppResolutionCallback callback) {
         Log.e(TAG, "New onGetInstantAppIntentFilter is not overridden");
         // if not overridden, forward to old methods and filter out non-web intents
-        if (sanitizedIntent.isBrowsableWebIntent()) {
+        if (sanitizedIntent.isWebIntent()) {
             onGetInstantAppIntentFilter(hostDigestPrefix, token, callback);
         } else {
             callback.onInstantAppResolveInfo(Collections.emptyList());
diff --git a/core/java/android/bluetooth/BluetoothSocket.java b/core/java/android/bluetooth/BluetoothSocket.java
index 09f9684..09a5b59 100644
--- a/core/java/android/bluetooth/BluetoothSocket.java
+++ b/core/java/android/bluetooth/BluetoothSocket.java
@@ -676,6 +676,35 @@
         mExcludeSdp = excludeSdp;
     }
 
+    /**
+     * Set the LE Transmit Data Length to be the maximum that the BT Controller is capable of. This
+     * parameter is used by the BT Controller to set the maximum transmission packet size on this
+     * connection. This function is currently used for testing only.
+     * @hide
+     */
+    public void requestMaximumTxDataLength() throws IOException {
+        if (mDevice == null) {
+            throw new IOException("requestMaximumTxDataLength is called on null device");
+        }
+
+        try {
+            if (mSocketState == SocketState.CLOSED) {
+                throw new IOException("socket closed");
+            }
+            IBluetooth bluetoothProxy =
+                    BluetoothAdapter.getDefaultAdapter().getBluetoothService(null);
+            if (bluetoothProxy == null) {
+                throw new IOException("Bluetooth is off");
+            }
+
+            if (DBG) Log.d(TAG, "requestMaximumTxDataLength");
+            bluetoothProxy.getSocketManager().requestMaximumTxDataLength(mDevice);
+        } catch (RemoteException e) {
+            Log.e(TAG, Log.getStackTraceString(new Throwable()));
+            throw new IOException("unable to send RPC: " + e.getMessage());
+        }
+    }
+
     private String convertAddr(final byte[] addr) {
         return String.format(Locale.US, "%02X:%02X:%02X:%02X:%02X:%02X",
                 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
diff --git a/core/java/android/content/Intent.java b/core/java/android/content/Intent.java
index 9b62f19..fa73e3c 100644
--- a/core/java/android/content/Intent.java
+++ b/core/java/android/content/Intent.java
@@ -10089,9 +10089,8 @@
     }
 
     /** @hide */
-    public boolean isBrowsableWebIntent() {
+    public boolean isWebIntent() {
         return ACTION_VIEW.equals(mAction)
-                && hasCategory(CATEGORY_BROWSABLE)
                 && hasWebURI();
     }
 
diff --git a/core/java/android/content/om/OverlayInfo.java b/core/java/android/content/om/OverlayInfo.java
index 1a207ba..8464e26 100644
--- a/core/java/android/content/om/OverlayInfo.java
+++ b/core/java/android/content/om/OverlayInfo.java
@@ -16,10 +16,14 @@
 
 package android.content.om;
 
+import android.annotation.IntDef;
 import android.annotation.NonNull;
 import android.os.Parcel;
 import android.os.Parcelable;
 
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
 /**
  * Immutable overlay information about a package. All PackageInfos that
  * represent an overlay package will have a corresponding OverlayInfo.
@@ -27,6 +31,19 @@
  * @hide
  */
 public final class OverlayInfo implements Parcelable {
+
+    @IntDef(prefix = "STATE_", value = {
+            STATE_UNKNOWN,
+            STATE_MISSING_TARGET,
+            STATE_NO_IDMAP,
+            STATE_DISABLED,
+            STATE_ENABLED,
+            STATE_TARGET_UPGRADING,
+            STATE_OVERLAY_UPGRADING,
+    })
+    @Retention(RetentionPolicy.SOURCE)
+    public @interface State {}
+
     /**
      * An internal state used as the initial state of an overlay. OverlayInfo
      * objects exposed outside the {@link
@@ -61,6 +78,18 @@
     public static final int STATE_ENABLED = 3;
 
     /**
+     * The target package is currently being upgraded; the state will change
+     * once the package installation has finished.
+     */
+    public static final int STATE_TARGET_UPGRADING = 4;
+
+    /**
+     * The overlay package is currently being upgraded; the state will change
+     * once the package installation has finished.
+     */
+    public static final int STATE_OVERLAY_UPGRADING = 5;
+
+    /**
      * Package name of the overlay package
      */
     public final String packageName;
@@ -77,13 +106,8 @@
 
     /**
      * The state of this OverlayInfo as defined by the STATE_* constants in this class.
-     *
-     * @see #STATE_MISSING_TARGET
-     * @see #STATE_NO_IDMAP
-     * @see #STATE_DISABLED
-     * @see #STATE_ENABLED
      */
-    public final int state;
+    public final @State int state;
 
     /**
      * User handle for which this overlay applies
@@ -96,13 +120,13 @@
      * @param source the source OverlayInfo to base the new instance on
      * @param state the new state for the source OverlayInfo
      */
-    public OverlayInfo(@NonNull OverlayInfo source, int state) {
+    public OverlayInfo(@NonNull OverlayInfo source, @State int state) {
         this(source.packageName, source.targetPackageName, source.baseCodePath, state,
                 source.userId);
     }
 
     public OverlayInfo(@NonNull String packageName, @NonNull String targetPackageName,
-            @NonNull String baseCodePath, int state, int userId) {
+            @NonNull String baseCodePath, @State int state, int userId) {
         this.packageName = packageName;
         this.targetPackageName = targetPackageName;
         this.baseCodePath = baseCodePath;
@@ -136,6 +160,8 @@
             case STATE_NO_IDMAP:
             case STATE_DISABLED:
             case STATE_ENABLED:
+            case STATE_TARGET_UPGRADING:
+            case STATE_OVERLAY_UPGRADING:
                 break;
             default:
                 throw new IllegalArgumentException("State " + state + " is not a valid state");
@@ -156,7 +182,8 @@
         dest.writeInt(userId);
     }
 
-    public static final Parcelable.Creator<OverlayInfo> CREATOR = new Parcelable.Creator<OverlayInfo>() {
+    public static final Parcelable.Creator<OverlayInfo> CREATOR =
+            new Parcelable.Creator<OverlayInfo>() {
         @Override
         public OverlayInfo createFromParcel(Parcel source) {
             return new OverlayInfo(source);
@@ -189,14 +216,9 @@
      * Translate a state to a human readable string. Only intended for
      * debugging purposes.
      *
-     * @see #STATE_MISSING_TARGET
-     * @see #STATE_NO_IDMAP
-     * @see #STATE_DISABLED
-     * @see #STATE_ENABLED
-     *
      * @return a human readable String representing the state.
      */
-    public static String stateToString(int state) {
+    public static String stateToString(@State int state) {
         switch (state) {
             case STATE_UNKNOWN:
                 return "STATE_UNKNOWN";
@@ -208,6 +230,10 @@
                 return "STATE_DISABLED";
             case STATE_ENABLED:
                 return "STATE_ENABLED";
+            case STATE_TARGET_UPGRADING:
+                return "STATE_TARGET_UPGRADING";
+            case STATE_OVERLAY_UPGRADING:
+                return "STATE_OVERLAY_UPGRADING";
             default:
                 return "<unknown state>";
         }
diff --git a/core/java/android/hardware/camera2/TotalCaptureResult.java b/core/java/android/hardware/camera2/TotalCaptureResult.java
index bae2d04..0be45a0 100644
--- a/core/java/android/hardware/camera2/TotalCaptureResult.java
+++ b/core/java/android/hardware/camera2/TotalCaptureResult.java
@@ -17,7 +17,6 @@
 package android.hardware.camera2;
 
 import android.annotation.NonNull;
-import android.annotation.Nullable;
 import android.hardware.camera2.impl.CameraMetadataNative;
 import android.hardware.camera2.impl.CaptureResultExtras;
 import android.hardware.camera2.impl.PhysicalCaptureResultInfo;
@@ -26,6 +25,7 @@
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 /**
  * <p>The total assembled results of a single image capture from the image sensor.</p>
@@ -49,9 +49,9 @@
  *
  * <p>For a logical multi-camera device, if the CaptureRequest contains a surface for an underlying
  * physical camera, the corresponding {@link TotalCaptureResult} object will include the metadata
- * for that physical camera. And its keys and values can be accessed by
- * {@link #getPhysicalCameraKey}. If all requested surfaces are for the logical camera, no
- * metadata for physical camera will be included.</p>
+ * for that physical camera. And the mapping between the physical camera id and result metadata
+ * can be accessed via {@link #getPhysicalCameraResults}. If all requested surfaces are for the
+ * logical camera, no metadata for physical camera will be included.</p>
  *
  * <p>{@link TotalCaptureResult} objects are immutable.</p>
  *
@@ -62,7 +62,7 @@
     private final List<CaptureResult> mPartialResults;
     private final int mSessionId;
     // The map between physical camera id and capture result
-    private final HashMap<String, CameraMetadataNative> mPhysicalCaptureResults;
+    private final HashMap<String, CaptureResult> mPhysicalCaptureResults;
 
     /**
      * Takes ownership of the passed-in camera metadata and the partial results
@@ -83,10 +83,12 @@
 
         mSessionId = sessionId;
 
-        mPhysicalCaptureResults = new HashMap<String, CameraMetadataNative>();
+        mPhysicalCaptureResults = new HashMap<String, CaptureResult>();
         for (PhysicalCaptureResultInfo onePhysicalResult : physicalResults) {
+            CaptureResult physicalResult = new CaptureResult(
+                    onePhysicalResult.getCameraMetadata(), parent, extras);
             mPhysicalCaptureResults.put(onePhysicalResult.getCameraId(),
-                    onePhysicalResult.getCameraMetadata());
+                    physicalResult);
         }
     }
 
@@ -101,7 +103,7 @@
 
         mPartialResults = new ArrayList<>();
         mSessionId = CameraCaptureSession.SESSION_ID_NONE;
-        mPhysicalCaptureResults = new HashMap<String, CameraMetadataNative>();
+        mPhysicalCaptureResults = new HashMap<String, CaptureResult>();
     }
 
     /**
@@ -132,36 +134,20 @@
     }
 
     /**
-     * Get a capture result field value for a particular physical camera id.
+     * Get the map between physical camera ids and their capture result metadata
      *
-     * <p>The field definitions can be found in {@link CaptureResult}.</p>
-     *
-     * <p>This function can be called for logical camera devices, which are devices that have
+     * <p>This function can be called for logical multi-camera devices, which are devices that have
      * REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA capability and calls to {@link
      * CameraCharacteristics#getPhysicalCameraIds} return a non-empty list of physical devices that
-     * are backing the logical camera. The camera id included in physicalCameraId argument
-     * selects an individual physical device, and returns its specific capture result field.</p>
+     * are backing the logical camera.</p>
      *
-     * <p>This function should only be called if one or more streams from the underlying
-     * 'physicalCameraId' was requested by the corresponding capture request.</p>
-     *
-     * @throws IllegalArgumentException if the key was not valid, or the physicalCameraId is not
-     * applicable to the current camera, or a stream from 'physicalCameraId' is not requested by the
-     * corresponding capture request.
-     *
-     * @param key The result field to read.
-     * @param physicalCameraId The physical camera the result originates from.
-     *
-     * @return The value of that key, or {@code null} if the field is not set.
-     */
-    @Nullable
-    public <T> T getPhysicalCameraKey(Key<T> key, @NonNull String physicalCameraId) {
-        if (!mPhysicalCaptureResults.containsKey(physicalCameraId)) {
-            throw new IllegalArgumentException(
-                    "No TotalCaptureResult exists for physical camera " + physicalCameraId);
-        }
+     * <p>If one or more streams from the underlying physical cameras were requested by the
+     * corresponding capture request, this function returns the result metadata for those physical
+     * cameras. Otherwise, an empty map is returned.</p>
 
-        CameraMetadataNative physicalMetadata = mPhysicalCaptureResults.get(physicalCameraId);
-        return physicalMetadata.get(key);
+     * @return unmodifiable map between physical camera ids and their capture result metadata
+     */
+    public Map<String, CaptureResult> getPhysicalCameraResults() {
+        return Collections.unmodifiableMap(mPhysicalCaptureResults);
     }
 }
diff --git a/core/java/android/hardware/camera2/params/OutputConfiguration.java b/core/java/android/hardware/camera2/params/OutputConfiguration.java
index eb4bced..b205e2c 100644
--- a/core/java/android/hardware/camera2/params/OutputConfiguration.java
+++ b/core/java/android/hardware/camera2/params/OutputConfiguration.java
@@ -368,7 +368,7 @@
      * desirable for the camera application to request streams from individual physical cameras.
      * This call achieves it by mapping the OutputConfiguration to the physical camera id.</p>
      *
-     * <p>The valid physical camera id can be queried by {@link
+     * <p>The valid physical camera ids can be queried by {@link
      * android.hardware.camera2.CameraCharacteristics#getPhysicalCameraIds}.
      * </p>
      *
diff --git a/core/java/android/os/Binder.java b/core/java/android/os/Binder.java
index 4aadc5b..682fdb7 100644
--- a/core/java/android/os/Binder.java
+++ b/core/java/android/os/Binder.java
@@ -784,7 +784,7 @@
         private static final int MAIN_INDEX_SIZE = 1 <<  LOG_MAIN_INDEX_SIZE;
         private static final int MAIN_INDEX_MASK = MAIN_INDEX_SIZE - 1;
         // Debuggable builds will throw an AssertionError if the number of map entries exceeds:
-        private static final int CRASH_AT_SIZE = 5_000;
+        private static final int CRASH_AT_SIZE = 20_000;
 
         /**
          * We next warn when we exceed this bucket size.
diff --git a/core/java/android/provider/Settings.java b/core/java/android/provider/Settings.java
index 567127a..f179371 100644
--- a/core/java/android/provider/Settings.java
+++ b/core/java/android/provider/Settings.java
@@ -10271,6 +10271,20 @@
         public static final String BATTERY_TIP_CONSTANTS = "battery_tip_constants";
 
         /**
+         * An integer to show the version of the anomaly config. Ex: 1, which means
+         * current version is 1.
+         * @hide
+         */
+        public static final String ANOMALY_CONFIG_VERSION = "anomaly_config_version";
+
+        /**
+         * A base64-encoded string represents anomaly stats config, used for
+         * {@link android.app.StatsManager}.
+         * @hide
+         */
+        public static final String ANOMALY_CONFIG = "anomaly_config";
+
+        /**
          * Always on display(AOD) specific settings
          * This is encoded as a key=value list, separated by commas. Ex:
          *
diff --git a/core/java/com/android/internal/app/ResolverListController.java b/core/java/com/android/internal/app/ResolverListController.java
index 1dfff5e..6bd6930 100644
--- a/core/java/com/android/internal/app/ResolverListController.java
+++ b/core/java/com/android/internal/app/ResolverListController.java
@@ -106,7 +106,7 @@
             int flags = PackageManager.MATCH_DEFAULT_ONLY
                     | (shouldGetResolvedFilter ? PackageManager.GET_RESOLVED_FILTER : 0)
                     | (shouldGetActivityMetadata ? PackageManager.GET_META_DATA : 0);
-            if (intent.isBrowsableWebIntent()
+            if (intent.isWebIntent()
                         || (intent.getFlags() & Intent.FLAG_ACTIVITY_MATCH_EXTERNAL) != 0) {
                 flags |= PackageManager.MATCH_INSTANT;
             }
diff --git a/core/java/com/android/internal/view/InputBindResult.java b/core/java/com/android/internal/view/InputBindResult.java
index 74dbaba..f05bd32 100644
--- a/core/java/com/android/internal/view/InputBindResult.java
+++ b/core/java/com/android/internal/view/InputBindResult.java
@@ -139,6 +139,10 @@
          * @see com.android.server.wm.WindowManagerService#inputMethodClientHasFocus(IInputMethodClient)
          */
         int ERROR_NOT_IME_TARGET_WINDOW = 11;
+        /**
+         * Indicates that focused view in the current window is not an editor.
+         */
+        int ERROR_NO_EDITOR = 12;
     }
 
     @ResultCode
@@ -258,6 +262,8 @@
                 return "ERROR_NULL";
             case ResultCode.ERROR_NO_IME:
                 return "ERROR_NO_IME";
+            case ResultCode.ERROR_NO_EDITOR:
+                return "ERROR_NO_EDITOR";
             case ResultCode.ERROR_INVALID_PACKAGE_NAME:
                 return "ERROR_INVALID_PACKAGE_NAME";
             case ResultCode.ERROR_SYSTEM_NOT_READY:
@@ -288,6 +294,10 @@
      */
     public static final InputBindResult NO_IME = error(ResultCode.ERROR_NO_IME);
     /**
+     * Predefined error object for {@link ResultCode#NO_EDITOR}.
+     */
+    public static final InputBindResult NO_EDITOR = error(ResultCode.ERROR_NO_EDITOR);
+    /**
      * Predefined error object for {@link ResultCode#ERROR_INVALID_PACKAGE_NAME}.
      */
     public static final InputBindResult INVALID_PACKAGE_NAME =
diff --git a/core/res/AndroidManifest.xml b/core/res/AndroidManifest.xml
index f627aaa..deefddb 100644
--- a/core/res/AndroidManifest.xml
+++ b/core/res/AndroidManifest.xml
@@ -3861,6 +3861,7 @@
                 android:excludeFromRecents="true"
                 android:label="@string/user_owner_label"
                 android:exported="true"
+                android:visibleToInstantApps="true"
                 >
         </activity>
         <activity-alias android:name="com.android.internal.app.ForwardIntentToParent"
diff --git a/core/tests/coretests/src/android/provider/SettingsBackupTest.java b/core/tests/coretests/src/android/provider/SettingsBackupTest.java
index 3e62183..e1f95a3 100644
--- a/core/tests/coretests/src/android/provider/SettingsBackupTest.java
+++ b/core/tests/coretests/src/android/provider/SettingsBackupTest.java
@@ -109,6 +109,8 @@
                     Settings.Global.ALWAYS_ON_DISPLAY_CONSTANTS,
                     Settings.Global.ANIMATOR_DURATION_SCALE,
                     Settings.Global.ANOMALY_DETECTION_CONSTANTS,
+                    Settings.Global.ANOMALY_CONFIG,
+                    Settings.Global.ANOMALY_CONFIG_VERSION,
                     Settings.Global.APN_DB_UPDATE_CONTENT_URL,
                     Settings.Global.APN_DB_UPDATE_METADATA_URL,
                     Settings.Global.APP_IDLE_CONSTANTS,
diff --git a/core/tests/overlaytests/host/Android.mk b/core/tests/overlaytests/host/Android.mk
index d8e1fc1..b48a46b 100644
--- a/core/tests/overlaytests/host/Android.mk
+++ b/core/tests/overlaytests/host/Android.mk
@@ -24,7 +24,11 @@
     OverlayHostTests_BadSignatureOverlay \
     OverlayHostTests_PlatformSignatureStaticOverlay \
     OverlayHostTests_PlatformSignatureOverlay \
-    OverlayHostTests_PlatformSignatureOverlayV2
+    OverlayHostTests_UpdateOverlay \
+    OverlayHostTests_FrameworkOverlayV1 \
+    OverlayHostTests_FrameworkOverlayV2 \
+    OverlayHostTests_AppOverlayV1 \
+    OverlayHostTests_AppOverlayV2
 include $(BUILD_HOST_JAVA_LIBRARY)
 
 # Include to build test-apps.
diff --git a/core/tests/overlaytests/host/AndroidTest.xml b/core/tests/overlaytests/host/AndroidTest.xml
index 6884623..1f750a8 100644
--- a/core/tests/overlaytests/host/AndroidTest.xml
+++ b/core/tests/overlaytests/host/AndroidTest.xml
@@ -18,6 +18,11 @@
     <option name="test-tag" value="OverlayHostTests" />
     <option name="test-suite-tag" value="apct" />
 
+    <!-- Install the device tests that will be used to verify things on device. -->
+    <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
+        <option name="test-file-name" value="OverlayHostTests_UpdateOverlay.apk" />
+    </target_preparer>
+
     <test class="com.android.tradefed.testtype.HostTest">
         <option name="class" value="com.android.server.om.hosttest.InstallOverlayTests" />
     </test>
diff --git a/core/tests/overlaytests/host/src/com/android/server/om/hosttest/InstallOverlayTests.java b/core/tests/overlaytests/host/src/com/android/server/om/hosttest/InstallOverlayTests.java
index 5093710..bf91a16 100644
--- a/core/tests/overlaytests/host/src/com/android/server/om/hosttest/InstallOverlayTests.java
+++ b/core/tests/overlaytests/host/src/com/android/server/om/hosttest/InstallOverlayTests.java
@@ -23,14 +23,48 @@
 import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
 import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
 
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 
 @RunWith(DeviceJUnit4ClassRunner.class)
 public class InstallOverlayTests extends BaseHostJUnit4Test {
-
-    private static final String OVERLAY_PACKAGE_NAME =
+    private static final String SIG_OVERLAY_PACKAGE_NAME =
             "com.android.server.om.hosttest.signature_overlay";
+    private static final String APP_OVERLAY_PACKAGE_NAME =
+            "com.android.server.om.hosttest.app_overlay";
+    private static final String FRAMEWORK_OVERLAY_PACKAGE_NAME =
+            "com.android.server.om.hosttest.framework_overlay";
+    private static final String[] ALL_PACKAGES = new String[] {
+            SIG_OVERLAY_PACKAGE_NAME, APP_OVERLAY_PACKAGE_NAME, FRAMEWORK_OVERLAY_PACKAGE_NAME
+    };
+
+    private static final String DEVICE_TEST_PKG =
+            "com.android.server.om.hosttest.update_overlay_test";
+    private static final String DEVICE_TEST_CLS = DEVICE_TEST_PKG + ".UpdateOverlayTest";
+
+    @Before
+    public void ensureNoOverlays() throws Exception {
+        // Make sure we're starting with a clean slate.
+        for (String pkg : ALL_PACKAGES) {
+            assertFalse(pkg + " should not be installed", isPackageInstalled(pkg));
+            assertFalse(pkg + " should not be registered with overlay manager service",
+                    overlayManagerContainsPackage(pkg));
+        }
+    }
+
+    /*
+    For some reason, SuiteApkInstaller is *not* uninstalling overlays, even though #installPackage()
+    claims it will auto-clean.
+    TODO(b/72877546): Remove when auto-clean is fixed.
+     */
+    @After
+    public void uninstallOverlays() throws Exception {
+        for (String pkg : ALL_PACKAGES) {
+            uninstallPackage(pkg);
+        }
+    }
 
     @Test
     public void failToInstallNonPlatformSignedOverlay() throws Exception {
@@ -40,7 +74,7 @@
         } catch (Exception e) {
             // Expected.
         }
-        assertFalse(overlayManagerContainsPackage());
+        assertFalse(overlayManagerContainsPackage(SIG_OVERLAY_PACKAGE_NAME));
     }
 
     @Test
@@ -51,28 +85,64 @@
         } catch (Exception e) {
             // Expected.
         }
-        assertFalse(overlayManagerContainsPackage());
+        assertFalse(overlayManagerContainsPackage(SIG_OVERLAY_PACKAGE_NAME));
     }
 
     @Test
-    public void succeedToInstallPlatformSignedOverlay() throws Exception {
+    public void installPlatformSignedOverlay() throws Exception {
         installPackage("OverlayHostTests_PlatformSignatureOverlay.apk");
-        assertTrue(overlayManagerContainsPackage());
+        assertTrue(overlayManagerContainsPackage(SIG_OVERLAY_PACKAGE_NAME));
     }
 
     @Test
-    public void succeedToInstallPlatformSignedOverlayAndUpdate() throws Exception {
-        installPackage("OverlayHostTests_PlatformSignatureOverlay.apk");
-        assertTrue(overlayManagerContainsPackage());
-        assertEquals("v1", getDevice().getAppPackageInfo(OVERLAY_PACKAGE_NAME).getVersionName());
+    public void installPlatformSignedAppOverlayAndUpdate() throws Exception {
+        assertTrue(runDeviceTests(DEVICE_TEST_PKG, DEVICE_TEST_CLS, "expectAppResource"));
 
-        installPackage("OverlayHostTests_PlatformSignatureOverlayV2.apk");
-        assertTrue(overlayManagerContainsPackage());
-        assertEquals("v2", getDevice().getAppPackageInfo(OVERLAY_PACKAGE_NAME).getVersionName());
+        installPackage("OverlayHostTests_AppOverlayV1.apk");
+        setOverlayEnabled(APP_OVERLAY_PACKAGE_NAME, true);
+        assertTrue(overlayManagerContainsPackage(APP_OVERLAY_PACKAGE_NAME));
+        assertEquals("v1", getDevice()
+                .getAppPackageInfo(APP_OVERLAY_PACKAGE_NAME)
+                .getVersionName());
+        assertTrue(runDeviceTests(DEVICE_TEST_PKG, DEVICE_TEST_CLS,
+                "expectAppOverlayV1Resource"));
+
+        installPackage("OverlayHostTests_AppOverlayV2.apk");
+        assertTrue(overlayManagerContainsPackage(APP_OVERLAY_PACKAGE_NAME));
+        assertEquals("v2", getDevice()
+                .getAppPackageInfo(APP_OVERLAY_PACKAGE_NAME)
+                .getVersionName());
+        assertTrue(runDeviceTests(DEVICE_TEST_PKG, DEVICE_TEST_CLS,
+                "expectAppOverlayV2Resource"));
     }
 
-    private boolean overlayManagerContainsPackage() throws Exception {
-        return getDevice().executeShellCommand("cmd overlay list")
-                .contains(OVERLAY_PACKAGE_NAME);
+    @Test
+    public void installPlatformSignedFrameworkOverlayAndUpdate() throws Exception {
+        assertTrue(runDeviceTests(DEVICE_TEST_PKG, DEVICE_TEST_CLS, "expectAppResource"));
+
+        installPackage("OverlayHostTests_FrameworkOverlayV1.apk");
+        setOverlayEnabled(FRAMEWORK_OVERLAY_PACKAGE_NAME, true);
+        assertTrue(overlayManagerContainsPackage(FRAMEWORK_OVERLAY_PACKAGE_NAME));
+        assertEquals("v1", getDevice()
+                .getAppPackageInfo(FRAMEWORK_OVERLAY_PACKAGE_NAME)
+                .getVersionName());
+        assertTrue(runDeviceTests(DEVICE_TEST_PKG, DEVICE_TEST_CLS,
+                "expectFrameworkOverlayV1Resource"));
+
+        installPackage("OverlayHostTests_FrameworkOverlayV2.apk");
+        assertTrue(overlayManagerContainsPackage(FRAMEWORK_OVERLAY_PACKAGE_NAME));
+        assertEquals("v2", getDevice()
+                .getAppPackageInfo(FRAMEWORK_OVERLAY_PACKAGE_NAME)
+                .getVersionName());
+        assertTrue(runDeviceTests(DEVICE_TEST_PKG, DEVICE_TEST_CLS,
+                "expectFrameworkOverlayV2Resource"));
+    }
+
+    private void setOverlayEnabled(String pkg, boolean enabled) throws Exception {
+        getDevice().executeShellCommand("cmd overlay " + (enabled ? "enable " : "disable ") + pkg);
+    }
+
+    private boolean overlayManagerContainsPackage(String pkg) throws Exception {
+        return getDevice().executeShellCommand("cmd overlay list").contains(pkg);
     }
 }
diff --git a/core/tests/overlaytests/host/test-apps/SignatureOverlay/Android.mk b/core/tests/overlaytests/host/test-apps/SignatureOverlay/Android.mk
index b051a82..4249549 100644
--- a/core/tests/overlaytests/host/test-apps/SignatureOverlay/Android.mk
+++ b/core/tests/overlaytests/host/test-apps/SignatureOverlay/Android.mk
@@ -40,13 +40,4 @@
 LOCAL_AAPT_FLAGS += --version-code 1 --version-name v1
 include $(BUILD_PACKAGE)
 
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_PACKAGE_NAME := OverlayHostTests_PlatformSignatureOverlayV2
-LOCAL_COMPATIBILITY_SUITE := general-tests
-LOCAL_CERTIFICATE := platform
-LOCAL_AAPT_FLAGS := --custom-package $(my_package_prefix)_v2
-LOCAL_AAPT_FLAGS += --version-code 2 --version-name v2
-include $(BUILD_PACKAGE)
-
 my_package_prefix :=
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/Android.mk b/core/tests/overlaytests/host/test-apps/UpdateOverlay/Android.mk
new file mode 100644
index 0000000..bd6d73d
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/Android.mk
@@ -0,0 +1,73 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_SRC_FILES := $(call all-java-files-under,src)
+LOCAL_PACKAGE_NAME := OverlayHostTests_UpdateOverlay
+LOCAL_COMPATIBILITY_SUITE := general-tests
+LOCAL_STATIC_JAVA_LIBRARIES := android-support-test
+include $(BUILD_PACKAGE)
+
+my_package_prefix := com.android.server.om.hosttest.framework_overlay
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_PACKAGE_NAME := OverlayHostTests_FrameworkOverlayV1
+LOCAL_COMPATIBILITY_SUITE := general-tests
+LOCAL_CERTIFICATE := platform
+LOCAL_AAPT_FLAGS := --custom-package $(my_package_prefix)_v1
+LOCAL_AAPT_FLAGS += --version-code 1 --version-name v1
+LOCAL_RESOURCE_DIR := $(LOCAL_PATH)/framework/v1/res
+LOCAL_MANIFEST_FILE := framework/AndroidManifest.xml
+include $(BUILD_PACKAGE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_PACKAGE_NAME := OverlayHostTests_FrameworkOverlayV2
+LOCAL_COMPATIBILITY_SUITE := general-tests
+LOCAL_CERTIFICATE := platform
+LOCAL_AAPT_FLAGS := --custom-package $(my_package_prefix)_v2
+LOCAL_AAPT_FLAGS += --version-code 2 --version-name v2
+LOCAL_RESOURCE_DIR := $(LOCAL_PATH)/framework/v2/res
+LOCAL_MANIFEST_FILE := framework/AndroidManifest.xml
+include $(BUILD_PACKAGE)
+
+my_package_prefix := com.android.server.om.hosttest.app_overlay
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_PACKAGE_NAME := OverlayHostTests_AppOverlayV1
+LOCAL_COMPATIBILITY_SUITE := general-tests
+LOCAL_CERTIFICATE := platform
+LOCAL_AAPT_FLAGS := --custom-package $(my_package_prefix)_v1
+LOCAL_AAPT_FLAGS += --version-code 1 --version-name v1
+LOCAL_RESOURCE_DIR := $(LOCAL_PATH)/app/v1/res
+LOCAL_MANIFEST_FILE := app/AndroidManifest.xml
+include $(BUILD_PACKAGE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_PACKAGE_NAME := OverlayHostTests_AppOverlayV2
+LOCAL_COMPATIBILITY_SUITE := general-tests
+LOCAL_CERTIFICATE := platform
+LOCAL_AAPT_FLAGS := --custom-package $(my_package_prefix)_v2
+LOCAL_AAPT_FLAGS += --version-code 2 --version-name v2
+LOCAL_RESOURCE_DIR := $(LOCAL_PATH)/app/v2/res
+LOCAL_MANIFEST_FILE := app/AndroidManifest.xml
+include $(BUILD_PACKAGE)
+
+my_package_prefix :=
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/AndroidManifest.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/AndroidManifest.xml
new file mode 100644
index 0000000..06077a7
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/AndroidManifest.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="com.android.server.om.hosttest.update_overlay_test">
+
+    <application>
+        <uses-library android:name="android.test.runner" />
+    </application>
+
+    <instrumentation android:name="android.support.test.runner.AndroidJUnitRunner"
+        android:targetPackage="com.android.server.om.hosttest.update_overlay_test"
+        android:label="Update Overlay Test"/>
+</manifest>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/AndroidManifest.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/AndroidManifest.xml
new file mode 100644
index 0000000..73804eb
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/AndroidManifest.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="com.android.server.om.hosttest.app_overlay">
+    <overlay android:targetPackage="com.android.server.om.hosttest.update_overlay_test" />
+</manifest>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/v1/res/values/values.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/v1/res/values/values.xml
new file mode 100644
index 0000000..63f85c2
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/v1/res/values/values.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<resources>
+    <string name="app_resource">App Resource Overlay V1</string>
+</resources>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/v2/res/values/values.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/v2/res/values/values.xml
new file mode 100644
index 0000000..fa4a697
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/app/v2/res/values/values.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<resources>
+    <string name="app_resource">App Resource Overlay V2</string>
+</resources>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/AndroidManifest.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/AndroidManifest.xml
new file mode 100644
index 0000000..8c8fe94
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/AndroidManifest.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="com.android.server.om.hosttest.framework_overlay">
+    <overlay android:targetPackage="android" />
+</manifest>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/v1/res/values/values.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/v1/res/values/values.xml
new file mode 100644
index 0000000..fedb2c6
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/v1/res/values/values.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<resources>
+    <string name="ok">Framework Overlay V1</string>
+</resources>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/v2/res/values/values.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/v2/res/values/values.xml
new file mode 100644
index 0000000..8aebf483
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/framework/v2/res/values/values.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<resources>
+    <string name="ok">Framework Overlay V2</string>
+</resources>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/res/values/values.xml b/core/tests/overlaytests/host/test-apps/UpdateOverlay/res/values/values.xml
new file mode 100644
index 0000000..7393166
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/res/values/values.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<resources>
+    <string name="app_resource">App Resource</string>
+</resources>
diff --git a/core/tests/overlaytests/host/test-apps/UpdateOverlay/src/com/android/server/om/hosttest/update_overlay_test/UpdateOverlayTest.java b/core/tests/overlaytests/host/test-apps/UpdateOverlay/src/com/android/server/om/hosttest/update_overlay_test/UpdateOverlayTest.java
new file mode 100644
index 0000000..d46bb37
--- /dev/null
+++ b/core/tests/overlaytests/host/test-apps/UpdateOverlay/src/com/android/server/om/hosttest/update_overlay_test/UpdateOverlayTest.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy
+ * of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package com.android.server.om.hosttest.update_overlay_test;
+
+import static org.junit.Assert.assertEquals;
+
+import android.content.res.Configuration;
+import android.content.res.Resources;
+import android.support.test.InstrumentationRegistry;
+import android.support.test.filters.SmallTest;
+import android.support.test.runner.AndroidJUnit4;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.util.Locale;
+
+@RunWith(AndroidJUnit4.class)
+@SmallTest
+public class UpdateOverlayTest {
+    private Resources mResources;
+
+    @Before
+    public void setUp() throws Exception {
+        final Configuration defaultLocaleConfiguration = new Configuration();
+        defaultLocaleConfiguration.setLocale(Locale.US);
+        mResources = InstrumentationRegistry
+                .getInstrumentation()
+                .getContext()
+                .createConfigurationContext(defaultLocaleConfiguration)
+                .getResources();
+    }
+
+    @Test
+    public void expectAppResource() throws Exception {
+        assertEquals("App Resource", mResources.getString(R.string.app_resource));
+    }
+
+    @Test
+    public void expectAppOverlayV1Resource() throws Exception {
+        assertEquals("App Resource Overlay V1", mResources.getString(R.string.app_resource));
+    }
+
+    @Test
+    public void expectAppOverlayV2Resource() throws Exception {
+        assertEquals("App Resource Overlay V2", mResources.getString(R.string.app_resource));
+    }
+
+    @Test
+    public void expectFrameworkOverlayResource() throws Exception {
+        assertEquals("OK", mResources.getString(android.R.string.ok));
+    }
+
+    @Test
+    public void expectFrameworkOverlayV1Resource() throws Exception {
+        assertEquals("Framework Overlay V1", mResources.getString(android.R.string.ok));
+    }
+
+    @Test
+    public void expectFrameworkOverlayV2Resource() throws Exception {
+        assertEquals("Framework Overlay V2", mResources.getString(android.R.string.ok));
+    }
+}
diff --git a/graphics/java/android/graphics/Typeface.java b/graphics/java/android/graphics/Typeface.java
index 04c5295..f41267e 100644
--- a/graphics/java/android/graphics/Typeface.java
+++ b/graphics/java/android/graphics/Typeface.java
@@ -60,6 +60,7 @@
 import java.io.IOException;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
+import java.io.InputStream;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.Arrays;
@@ -801,36 +802,18 @@
      * @return The new typeface.
      */
     public static Typeface createFromAsset(AssetManager mgr, String path) {
-        if (path == null) {
-            throw new NullPointerException();  // for backward compatibility
-        }
-        synchronized (sDynamicCacheLock) {
-            Typeface typeface = new Builder(mgr, path).build();
-            if (typeface != null) return typeface;
+        Preconditions.checkNotNull(path); // for backward compatibility
+        Preconditions.checkNotNull(mgr);
 
-            final String key = Builder.createAssetUid(mgr, path, 0 /* ttcIndex */,
-                    null /* axes */, RESOLVE_BY_FONT_TABLE, RESOLVE_BY_FONT_TABLE,
-                    DEFAULT_FAMILY);
-            typeface = sDynamicTypefaceCache.get(key);
-            if (typeface != null) return typeface;
-
-            final FontFamily fontFamily = new FontFamily();
-            if (fontFamily.addFontFromAssetManager(mgr, path, 0, true /* isAsset */,
-                    0 /* ttc index */, RESOLVE_BY_FONT_TABLE, RESOLVE_BY_FONT_TABLE,
-                    null /* axes */)) {
-                if (!fontFamily.freeze()) {
-                    return Typeface.DEFAULT;
-                }
-                final FontFamily[] families = { fontFamily };
-                typeface = createFromFamiliesWithDefault(families, DEFAULT_FAMILY,
-                        RESOLVE_BY_FONT_TABLE, RESOLVE_BY_FONT_TABLE);
-                sDynamicTypefaceCache.put(key, typeface);
-                return typeface;
-            } else {
-                fontFamily.abortCreation();
-            }
+        Typeface typeface = new Builder(mgr, path).build();
+        if (typeface != null) return typeface;
+        // check if the file exists, and throw an exception for backward compatibility
+        try (InputStream inputStream = mgr.open(path)) {
+        } catch (IOException e) {
+            throw new RuntimeException("Font asset not found " + path);
         }
-        throw new RuntimeException("Font asset not found " + path);
+
+        return Typeface.DEFAULT;
     }
 
     /**
@@ -848,13 +831,22 @@
     /**
      * Create a new typeface from the specified font file.
      *
-     * @param path The path to the font data.
+     * @param file The path to the font data.
      * @return The new typeface.
      */
-    public static Typeface createFromFile(@Nullable File path) {
+    public static Typeface createFromFile(@Nullable File file) {
         // For the compatibility reasons, leaving possible NPE here.
         // See android.graphics.cts.TypefaceTest#testCreateFromFileByFileReferenceNull
-        return createFromFile(path.getAbsolutePath());
+
+        Typeface typeface = new Builder(file).build();
+        if (typeface != null) return typeface;
+
+        // check if the file exists, and throw an exception for backward compatibility
+        if (!file.exists()) {
+            throw new RuntimeException("Font asset not found " + file.getAbsolutePath());
+        }
+
+        return Typeface.DEFAULT;
     }
 
     /**
@@ -864,19 +856,8 @@
      * @return The new typeface.
      */
     public static Typeface createFromFile(@Nullable String path) {
-        final FontFamily fontFamily = new FontFamily();
-        if (fontFamily.addFont(path, 0 /* ttcIndex */, null /* axes */,
-                  RESOLVE_BY_FONT_TABLE, RESOLVE_BY_FONT_TABLE)) {
-            if (!fontFamily.freeze()) {
-                return Typeface.DEFAULT;
-            }
-            FontFamily[] families = { fontFamily };
-            return createFromFamiliesWithDefault(families, DEFAULT_FAMILY,
-                    RESOLVE_BY_FONT_TABLE, RESOLVE_BY_FONT_TABLE);
-        } else {
-            fontFamily.abortCreation();
-        }
-        throw new RuntimeException("Font not found " + path);
+        Preconditions.checkNotNull(path); // for backward compatibility
+        return createFromFile(new File(path));
     }
 
     /**
diff --git a/location/java/android/location/LocationManager.java b/location/java/android/location/LocationManager.java
index c33dce1..d3c6edd 100644
--- a/location/java/android/location/LocationManager.java
+++ b/location/java/android/location/LocationManager.java
@@ -236,6 +236,62 @@
      */
     public static final String GNSS_HARDWARE_MODEL_NAME_UNKNOWN = "Model Name Unknown";
 
+    /**
+     * Broadcast intent action for Settings app to inject a footer at the bottom of location
+     * settings.
+     *
+     * <p>This broadcast is used for two things:
+     * <ol>
+     *     <li>For receivers to inject a footer with provided text. This is for use only by apps
+     *         that are included in the system image. </li>
+     *     <li>For receivers to know their footer is injected under location settings.</li>
+     * </ol>
+     *
+     * <p>To inject a footer to location settings, you must declare a broadcast receiver of
+     * {@link LocationManager#SETTINGS_FOOTER_DISPLAYED_ACTION} in the manifest as so:
+     * <pre>
+     *     &lt;receiver android:name="com.example.android.footer.MyFooterInjector"&gt;
+     *         &lt;intent-filter&gt;
+     *             &lt;action android:name="com.android.settings.location.INJECT_FOOTER" /&gt;
+     *         &lt;/intent-filter&gt;
+     *         &lt;meta-data
+     *             android:name="com.android.settings.location.FOOTER_STRING"
+     *             android:resource="@string/my_injected_footer_string" /&gt;
+     *     &lt;/receiver&gt;
+     * </pre>
+     *
+     * <p>On entering location settings, Settings app will send a
+     * {@link #SETTINGS_FOOTER_DISPLAYED_ACTION} broadcast to receivers whose footer is successfully
+     * injected. On leaving location settings, the footer becomes not visible to users. Settings app
+     * will send a {@link #SETTINGS_FOOTER_REMOVED_ACTION} broadcast to those receivers.
+     *
+     * @hide
+     */
+    public static final String SETTINGS_FOOTER_DISPLAYED_ACTION =
+            "com.android.settings.location.DISPLAYED_FOOTER";
+
+    /**
+     * Broadcast intent action when location settings footer is not visible to users.
+     *
+     * <p>See {@link #SETTINGS_FOOTER_DISPLAYED_ACTION} for more detail on how to use.
+     *
+     * @hide
+     */
+    public static final String SETTINGS_FOOTER_REMOVED_ACTION =
+            "com.android.settings.location.REMOVED_FOOTER";
+
+    /**
+     * Metadata name for {@link LocationManager#SETTINGS_FOOTER_DISPLAYED_ACTION} broadcast
+     * receivers to specify a string resource id as location settings footer text. This is for use
+     * only by apps that are included in the system image.
+     *
+     * <p>See {@link #SETTINGS_FOOTER_DISPLAYED_ACTION} for more detail on how to use.
+     *
+     * @hide
+     */
+    public static final String METADATA_SETTINGS_FOOTER_STRING =
+            "com.android.settings.location.FOOTER_STRING";
+
     // Map from LocationListeners to their associated ListenerTransport objects
     private HashMap<LocationListener,ListenerTransport> mListeners =
         new HashMap<LocationListener,ListenerTransport>();
diff --git a/media/java/android/media/AudioFocusInfo.java b/media/java/android/media/AudioFocusInfo.java
index 5d0c8e2..5467a69 100644
--- a/media/java/android/media/AudioFocusInfo.java
+++ b/media/java/android/media/AudioFocusInfo.java
@@ -38,6 +38,10 @@
     private int mLossReceived;
     private int mFlags;
 
+    // generation count for the validity of a request/response async exchange between
+    // external focus policy and MediaFocusControl
+    private long mGenCount = -1;
+
 
     /**
      * Class constructor
@@ -61,6 +65,16 @@
         mSdkTarget = sdk;
     }
 
+    /** @hide */
+    public void setGen(long g) {
+        mGenCount = g;
+    }
+
+    /** @hide */
+    public long getGen() {
+        return mGenCount;
+    }
+
 
     /**
      * The audio attributes for the audio focus request.
@@ -128,6 +142,7 @@
         dest.writeInt(mLossReceived);
         dest.writeInt(mFlags);
         dest.writeInt(mSdkTarget);
+        dest.writeLong(mGenCount);
     }
 
     @Override
@@ -168,6 +183,8 @@
         if (mSdkTarget != other.mSdkTarget) {
             return false;
         }
+        // mGenCount is not used to verify equality between two focus holds as multiple requests
+        // (hence of different generations) could correspond to the same hold
         return true;
     }
 
@@ -175,7 +192,7 @@
             = new Parcelable.Creator<AudioFocusInfo>() {
 
         public AudioFocusInfo createFromParcel(Parcel in) {
-            return new AudioFocusInfo(
+            final AudioFocusInfo afi = new AudioFocusInfo(
                     AudioAttributes.CREATOR.createFromParcel(in), //AudioAttributes aa
                     in.readInt(), // int clientUid
                     in.readString(), //String clientId
@@ -185,6 +202,8 @@
                     in.readInt(), //int flags
                     in.readInt()  //int sdkTarget
                     );
+            afi.setGen(in.readLong());
+            return afi;
         }
 
         public AudioFocusInfo[] newArray(int size) {
diff --git a/media/java/android/media/AudioManager.java b/media/java/android/media/AudioManager.java
index bf51d97..0be54ec 100644
--- a/media/java/android/media/AudioManager.java
+++ b/media/java/android/media/AudioManager.java
@@ -32,6 +32,7 @@
 import android.content.Context;
 import android.content.Intent;
 import android.media.audiopolicy.AudioPolicy;
+import android.media.audiopolicy.AudioPolicy.AudioPolicyFocusListener;
 import android.media.session.MediaController;
 import android.media.session.MediaSession;
 import android.media.session.MediaSessionLegacyHelper;
@@ -54,10 +55,13 @@
 import android.util.Slog;
 import android.view.KeyEvent;
 
+import com.android.internal.annotations.GuardedBy;
+
 import java.io.IOException;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
@@ -2338,6 +2342,20 @@
                 }
             }
         }
+
+        @Override
+        public void dispatchFocusResultFromExtPolicy(int requestResult, String clientId) {
+            synchronized (mFocusRequestsLock) {
+                // TODO use generation counter as the key instead
+                final BlockingFocusResultReceiver focusReceiver =
+                        mFocusRequestsAwaitingResult.remove(clientId);
+                if (focusReceiver != null) {
+                    focusReceiver.notifyResult(requestResult);
+                } else {
+                    Log.e(TAG, "dispatchFocusResultFromExtPolicy found no result receiver");
+                }
+            }
+        }
     };
 
     private String getIdForAudioFocusListener(OnAudioFocusChangeListener l) {
@@ -2390,6 +2408,40 @@
       */
     public static final int AUDIOFOCUS_REQUEST_DELAYED = 2;
 
+    /** @hide */
+    @IntDef(flag = false, prefix = "AUDIOFOCUS_REQUEST", value = {
+            AUDIOFOCUS_REQUEST_FAILED,
+            AUDIOFOCUS_REQUEST_GRANTED,
+            AUDIOFOCUS_REQUEST_DELAYED }
+    )
+    @Retention(RetentionPolicy.SOURCE)
+    public @interface FocusRequestResult {}
+
+    /**
+     * @hide
+     * code returned when a synchronous focus request on the client-side is to be blocked
+     * until the external audio focus policy decides on the response for the client
+     */
+    public static final int AUDIOFOCUS_REQUEST_WAITING_FOR_EXT_POLICY = 100;
+
+    /**
+     * Timeout duration in ms when waiting on an external focus policy for the result for a
+     * focus request
+     */
+    private static final int EXT_FOCUS_POLICY_TIMEOUT_MS = 200;
+
+    private static final String FOCUS_CLIENT_ID_STRING = "android_audio_focus_client_id";
+
+    private final Object mFocusRequestsLock = new Object();
+    /**
+     * Map of all receivers of focus request results, one per unresolved focus request.
+     * Receivers are added before sending the request to the external focus policy,
+     * and are removed either after receiving the result, or after the timeout.
+     * This variable is lazily initialized.
+     */
+    @GuardedBy("mFocusRequestsLock")
+    private HashMap<String, BlockingFocusResultReceiver> mFocusRequestsAwaitingResult;
+
 
     /**
      *  Request audio focus.
@@ -2656,18 +2708,100 @@
             // some tests don't have a Context
             sdk = Build.VERSION.SDK_INT;
         }
-        try {
-            status = service.requestAudioFocus(afr.getAudioAttributes(),
-                    afr.getFocusGain(), mICallBack,
-                    mAudioFocusDispatcher,
-                    getIdForAudioFocusListener(afr.getOnAudioFocusChangeListener()),
-                    getContext().getOpPackageName() /* package name */, afr.getFlags(),
-                    ap != null ? ap.cb() : null,
-                    sdk);
-        } catch (RemoteException e) {
-            throw e.rethrowFromSystemServer();
+
+        final String clientId = getIdForAudioFocusListener(afr.getOnAudioFocusChangeListener());
+        final BlockingFocusResultReceiver focusReceiver;
+        synchronized (mFocusRequestsLock) {
+            try {
+                // TODO status contains result and generation counter for ext policy
+                status = service.requestAudioFocus(afr.getAudioAttributes(),
+                        afr.getFocusGain(), mICallBack,
+                        mAudioFocusDispatcher,
+                        clientId,
+                        getContext().getOpPackageName() /* package name */, afr.getFlags(),
+                        ap != null ? ap.cb() : null,
+                        sdk);
+            } catch (RemoteException e) {
+                throw e.rethrowFromSystemServer();
+            }
+            if (status != AudioManager.AUDIOFOCUS_REQUEST_WAITING_FOR_EXT_POLICY) {
+                // default path with no external focus policy
+                return status;
+            }
+            if (mFocusRequestsAwaitingResult == null) {
+                mFocusRequestsAwaitingResult =
+                        new HashMap<String, BlockingFocusResultReceiver>(1);
+            }
+            focusReceiver = new BlockingFocusResultReceiver(clientId);
+            mFocusRequestsAwaitingResult.put(clientId, focusReceiver);
         }
-        return status;
+        focusReceiver.waitForResult(EXT_FOCUS_POLICY_TIMEOUT_MS);
+        if (DEBUG && !focusReceiver.receivedResult()) {
+            Log.e(TAG, "requestAudio response from ext policy timed out, denying request");
+        }
+        synchronized (mFocusRequestsLock) {
+            mFocusRequestsAwaitingResult.remove(clientId);
+        }
+        return focusReceiver.requestResult();
+    }
+
+    // helper class that abstracts out the handling of spurious wakeups in Object.wait()
+    private static final class SafeWaitObject {
+        private boolean mQuit = false;
+
+        public void safeNotify() {
+            synchronized (this) {
+                mQuit = true;
+                this.notify();
+            }
+        }
+
+        public void safeWait(long millis) throws InterruptedException {
+            final long timeOutTime = java.lang.System.currentTimeMillis() + millis;
+            synchronized (this) {
+                while (!mQuit) {
+                    final long timeToWait = timeOutTime - java.lang.System.currentTimeMillis();
+                    if (timeToWait < 0) { break; }
+                    this.wait(timeToWait);
+                }
+            }
+        }
+    }
+
+    private static final class BlockingFocusResultReceiver {
+        private final SafeWaitObject mLock = new SafeWaitObject();
+        @GuardedBy("mLock")
+        private boolean mResultReceived = false;
+        // request denied by default (e.g. timeout)
+        private int mFocusRequestResult = AudioManager.AUDIOFOCUS_REQUEST_FAILED;
+        private final String mFocusClientId;
+
+        BlockingFocusResultReceiver(String clientId) {
+            mFocusClientId = clientId;
+        }
+
+        boolean receivedResult() { return mResultReceived; }
+        int requestResult() { return mFocusRequestResult; }
+
+        void notifyResult(int requestResult) {
+            synchronized (mLock) {
+                mResultReceived = true;
+                mFocusRequestResult = requestResult;
+                mLock.safeNotify();
+            }
+        }
+
+        public void waitForResult(long timeOutMs) {
+            synchronized (mLock) {
+                if (mResultReceived) {
+                    // the result was received before waiting
+                    return;
+                }
+                try {
+                    mLock.safeWait(timeOutMs);
+                } catch (InterruptedException e) { }
+            }
+        }
     }
 
     /**
@@ -2714,6 +2848,32 @@
 
     /**
      * @hide
+     * Set the result to the audio focus request received through
+     * {@link AudioPolicyFocusListener#onAudioFocusRequest(AudioFocusInfo, int)}.
+     * @param afi the information about the focus requester
+     * @param requestResult the result to the focus request to be passed to the requester
+     * @param ap a valid registered {@link AudioPolicy} configured as a focus policy.
+     */
+    @SystemApi
+    @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
+    public void setFocusRequestResult(@NonNull AudioFocusInfo afi,
+            @FocusRequestResult int requestResult, @NonNull AudioPolicy ap) {
+        if (afi == null) {
+            throw new IllegalArgumentException("Illegal null AudioFocusInfo");
+        }
+        if (ap == null) {
+            throw new IllegalArgumentException("Illegal null AudioPolicy");
+        }
+        final IAudioService service = getService();
+        try {
+            service.setFocusRequestResultFromExtPolicy(afi, requestResult, ap.cb());
+        } catch (RemoteException e) {
+            throw e.rethrowFromSystemServer();
+        }
+    }
+
+    /**
+     * @hide
      * Notifies an application with a focus listener of gain or loss of audio focus.
      * This method can only be used by owners of an {@link AudioPolicy} configured with
      * {@link AudioPolicy.Builder#setIsAudioFocusPolicy(boolean)} set to true.
diff --git a/media/java/android/media/IAudioFocusDispatcher.aidl b/media/java/android/media/IAudioFocusDispatcher.aidl
index 09575f7..3b33c5b 100644
--- a/media/java/android/media/IAudioFocusDispatcher.aidl
+++ b/media/java/android/media/IAudioFocusDispatcher.aidl
@@ -25,4 +25,6 @@
 
     void dispatchAudioFocusChange(int focusChange, String clientId);
 
+    void dispatchFocusResultFromExtPolicy(int requestResult, String clientId);
+
 }
diff --git a/media/java/android/media/IAudioService.aidl b/media/java/android/media/IAudioService.aidl
index 88d0a60..cd4143c 100644
--- a/media/java/android/media/IAudioService.aidl
+++ b/media/java/android/media/IAudioService.aidl
@@ -207,5 +207,8 @@
     int setBluetoothA2dpDeviceConnectionStateSuppressNoisyIntent(in BluetoothDevice device,
             int state, int profile, boolean suppressNoisyIntent);
 
+    oneway void setFocusRequestResultFromExtPolicy(in AudioFocusInfo afi, int requestResult,
+            in IAudioPolicyCallback pcb);
+
     // WARNING: read warning at top of file, it is recommended to add new methods at the end
 }
diff --git a/media/java/android/media/audiopolicy/AudioPolicy.java b/media/java/android/media/audiopolicy/AudioPolicy.java
index 4de731a..2190635 100644
--- a/media/java/android/media/audiopolicy/AudioPolicy.java
+++ b/media/java/android/media/audiopolicy/AudioPolicy.java
@@ -463,9 +463,9 @@
          * Only ever called if the {@link AudioPolicy} was built with
          * {@link AudioPolicy.Builder#setIsAudioFocusPolicy(boolean)} set to {@code true}.
          * @param afi information about the focus request and the requester
-         * @param requestResult the result that was returned synchronously by the framework to the
-         *     application, {@link #AUDIOFOCUS_REQUEST_FAILED},or
-         *     {@link #AUDIOFOCUS_REQUEST_DELAYED}.
+         * @param requestResult deprecated after the addition of
+         *     {@link AudioManager#setFocusRequestResult(AudioFocusInfo, int, AudioPolicy)}
+         *     in Android P, always equal to {@link #AUDIOFOCUS_REQUEST_GRANTED}.
          */
         public void onAudioFocusRequest(AudioFocusInfo afi, int requestResult) {}
         /**
@@ -534,7 +534,7 @@
             sendMsg(MSG_FOCUS_REQUEST, afi, requestResult);
             if (DEBUG) {
                 Log.v(TAG, "notifyAudioFocusRequest: pack=" + afi.getPackageName() + " client="
-                        + afi.getClientId() + "reqRes=" + requestResult);
+                        + afi.getClientId() + " gen=" + afi.getGen());
             }
         }
 
diff --git a/packages/SettingsLib/common.mk b/packages/SettingsLib/common.mk
index 741db34..3c2ca2d 100644
--- a/packages/SettingsLib/common.mk
+++ b/packages/SettingsLib/common.mk
@@ -16,11 +16,11 @@
 ifeq ($(LOCAL_USE_AAPT2),true)
 LOCAL_STATIC_JAVA_LIBRARIES += \
     android-support-annotations \
-    android-arch-lifecycle-common
+    apptoolkit-lifecycle-common
 
 LOCAL_STATIC_ANDROID_LIBRARIES += \
     android-support-v4 \
-    android-arch-lifecycle-runtime \
+    apptoolkit-lifecycle-runtime \
     android-support-v7-recyclerview \
     android-support-v7-preference \
     android-support-v7-appcompat \
diff --git a/services/core/java/com/android/server/AlarmManagerService.java b/services/core/java/com/android/server/AlarmManagerService.java
index f49cd67..b4af432 100644
--- a/services/core/java/com/android/server/AlarmManagerService.java
+++ b/services/core/java/com/android/server/AlarmManagerService.java
@@ -58,6 +58,7 @@
 import android.text.TextUtils;
 import android.text.format.DateFormat;
 import android.util.ArrayMap;
+import android.util.ArraySet;
 import android.util.KeyValueListParser;
 import android.util.Log;
 import android.util.Pair;
@@ -79,7 +80,6 @@
 import java.util.Comparator;
 import java.util.Date;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.Locale;
 import java.util.Random;
@@ -224,10 +224,12 @@
 
     interface Stats {
         int REBATCH_ALL_ALARMS = 0;
+        int REORDER_ALARMS_FOR_STANDBY = 1;
     }
 
     private final StatLogger mStatLogger = new StatLogger(new String[] {
             "REBATCH_ALL_ALARMS",
+            "REORDER_ALARMS_FOR_STANDBY",
     });
 
     /**
@@ -522,6 +524,10 @@
             return newStart;
         }
 
+        boolean remove(Alarm alarm) {
+            return remove(a -> (a == alarm));
+        }
+
         boolean remove(Predicate<Alarm> predicate) {
             boolean didRemove = false;
             long newStart = 0;  // recalculate endpoints as we go
@@ -741,6 +747,23 @@
         return (index == 0);
     }
 
+    private void insertAndBatchAlarmLocked(Alarm alarm) {
+        final int whichBatch = ((alarm.flags & AlarmManager.FLAG_STANDALONE) != 0) ? -1
+                : attemptCoalesceLocked(alarm.whenElapsed, alarm.maxWhenElapsed);
+
+        if (whichBatch < 0) {
+            addBatchLocked(mAlarmBatches, new Batch(alarm));
+        } else {
+            final Batch batch = mAlarmBatches.get(whichBatch);
+            if (batch.add(alarm)) {
+                // The start time of this batch advanced, so batch ordering may
+                // have just been broken.  Move it to where it now belongs.
+                mAlarmBatches.remove(whichBatch);
+                addBatchLocked(mAlarmBatches, batch);
+            }
+        }
+    }
+
     // Return the index of the matching batch, or -1 if none found.
     int attemptCoalesceLocked(long whenElapsed, long maxWhen) {
         final int N = mAlarmBatches.size();
@@ -794,7 +817,7 @@
     }
 
     void rebatchAllAlarmsLocked(boolean doValidate) {
-        long start = mStatLogger.getTime();
+        final long start = mStatLogger.getTime();
         final int oldCount =
                 getAlarmCount(mAlarmBatches) + ArrayUtils.size(mPendingWhileIdleAlarms);
         final boolean oldHasTick = haveBatchesTimeTickAlarm(mAlarmBatches)
@@ -837,6 +860,44 @@
         mStatLogger.logDurationStat(Stats.REBATCH_ALL_ALARMS, start);
     }
 
+    /**
+     * Re-orders the alarm batches based on newly evaluated send times based on the current
+     * app-standby buckets
+     * @param targetPackages [Package, User] pairs for which alarms need to be re-evaluated,
+     *                       null indicates all
+     * @return True if there was any reordering done to the current list.
+     */
+    boolean reorderAlarmsBasedOnStandbyBuckets(ArraySet<Pair<String, Integer>> targetPackages) {
+        final long start = mStatLogger.getTime();
+        final ArrayList<Alarm> rescheduledAlarms = new ArrayList<>();
+
+        for (int batchIndex = mAlarmBatches.size() - 1; batchIndex >= 0; batchIndex--) {
+            final Batch batch = mAlarmBatches.get(batchIndex);
+            for (int alarmIndex = batch.size() - 1; alarmIndex >= 0; alarmIndex--) {
+                final Alarm alarm = batch.get(alarmIndex);
+                final Pair<String, Integer> packageUser =
+                        Pair.create(alarm.sourcePackage, UserHandle.getUserId(alarm.creatorUid));
+                if (targetPackages != null && !targetPackages.contains(packageUser)) {
+                    continue;
+                }
+                if (adjustDeliveryTimeBasedOnStandbyBucketLocked(alarm)) {
+                    batch.remove(alarm);
+                    rescheduledAlarms.add(alarm);
+                }
+            }
+            if (batch.size() == 0) {
+                mAlarmBatches.remove(batchIndex);
+            }
+        }
+        for (int i = 0; i < rescheduledAlarms.size(); i++) {
+            final Alarm a = rescheduledAlarms.get(i);
+            insertAndBatchAlarmLocked(a);
+        }
+
+        mStatLogger.logDurationStat(Stats.REORDER_ALARMS_FOR_STANDBY, start);
+        return rescheduledAlarms.size() > 0;
+    }
+
     void reAddAlarmLocked(Alarm a, long nowElapsed, boolean doValidate) {
         a.when = a.origWhen;
         long whenElapsed = convertToElapsed(a.when, a.type);
@@ -1442,18 +1503,32 @@
         else return mConstants.APP_STANDBY_MIN_DELAYS[0];
     }
 
-    private void adjustDeliveryTimeBasedOnStandbyBucketLocked(Alarm alarm) {
+    /**
+     * Adjusts the alarm delivery time based on the current app standby bucket.
+     * @param alarm The alarm to adjust
+     * @return true if the alarm delivery time was updated.
+     * TODO: Reduce the number of calls to getAppStandbyBucket by batching the calls per
+     * {package, user} pairs
+     */
+    private boolean adjustDeliveryTimeBasedOnStandbyBucketLocked(Alarm alarm) {
         if (alarm.alarmClock != null || UserHandle.isCore(alarm.creatorUid)) {
-            return;
+            return false;
+        }
+        // TODO: short term fix for b/72816079, remove after a proper fix is in place
+        if ((alarm.flags & AlarmManager.FLAG_ALLOW_WHILE_IDLE) != 0) {
+            return false;
         }
         if (mAppStandbyParole) {
             if (alarm.whenElapsed > alarm.requestedWhenElapsed) {
-                // We did throttle this alarm earlier, restore original requirements
+                // We did defer this alarm earlier, restore original requirements
                 alarm.whenElapsed = alarm.requestedWhenElapsed;
                 alarm.maxWhenElapsed = alarm.requestedMaxWhenElapsed;
             }
-            return;
+            return true;
         }
+        final long oldWhenElapsed = alarm.whenElapsed;
+        final long oldMaxWhenElapsed = alarm.maxWhenElapsed;
+
         final String sourcePackage = alarm.sourcePackage;
         final int sourceUserId = UserHandle.getUserId(alarm.creatorUid);
         final int standbyBucket = mUsageStatsManagerInternal.getAppStandbyBucket(
@@ -1465,8 +1540,14 @@
             final long minElapsed = lastElapsed + getMinDelayForBucketLocked(standbyBucket);
             if (alarm.requestedWhenElapsed < minElapsed) {
                 alarm.whenElapsed = alarm.maxWhenElapsed = minElapsed;
+            } else {
+                // app is now eligible to run alarms at the originally requested window.
+                // Restore original requirements in case they were changed earlier.
+                alarm.whenElapsed = alarm.requestedWhenElapsed;
+                alarm.maxWhenElapsed = alarm.requestedMaxWhenElapsed;
             }
         }
+        return (oldWhenElapsed != alarm.whenElapsed || oldMaxWhenElapsed != alarm.maxWhenElapsed);
     }
 
     private void setImplLocked(Alarm a, boolean rebatching, boolean doValidate) {
@@ -1521,21 +1602,7 @@
             }
         }
         adjustDeliveryTimeBasedOnStandbyBucketLocked(a);
-
-        int whichBatch = ((a.flags&AlarmManager.FLAG_STANDALONE) != 0)
-                ? -1 : attemptCoalesceLocked(a.whenElapsed, a.maxWhenElapsed);
-        if (whichBatch < 0) {
-            Batch batch = new Batch(a);
-            addBatchLocked(mAlarmBatches, batch);
-        } else {
-            Batch batch = mAlarmBatches.get(whichBatch);
-            if (batch.add(a)) {
-                // The start time of this batch advanced, so batch ordering may
-                // have just been broken.  Move it to where it now belongs.
-                mAlarmBatches.remove(whichBatch);
-                addBatchLocked(mAlarmBatches, batch);
-            }
-        }
+        insertAndBatchAlarmLocked(a);
 
         if (a.alarmClock != null) {
             mNextAlarmClockMayChange = true;
@@ -3138,7 +3205,7 @@
             final boolean isRtc = (type == RTC || type == RTC_WAKEUP);
             pw.print(prefix); pw.print("tag="); pw.println(statsTag);
             pw.print(prefix); pw.print("type="); pw.print(type);
-                    pw.print(" requestedWhenELapsed="); TimeUtils.formatDuration(
+                    pw.print(" requestedWhenElapsed="); TimeUtils.formatDuration(
                             requestedWhenElapsed, nowELAPSED, pw);
                     pw.print(" whenElapsed="); TimeUtils.formatDuration(whenElapsed,
                             nowELAPSED, pw);
@@ -3391,28 +3458,19 @@
                                 }
                                 mPendingNonWakeupAlarms.clear();
                             }
-                            boolean needRebatch = false;
-                            final HashSet<String> triggerPackages = new HashSet<>();
-                            for (int i = triggerList.size() - 1; i >= 0; i--) {
-                                triggerPackages.add(triggerList.get(i).sourcePackage);
-                            }
-                            outer:
-                            for (int i = 0; i < mAlarmBatches.size(); i++) {
-                                final Batch batch = mAlarmBatches.get(i);
-                                for (int j = 0; j < batch.size(); j++) {
-                                    if (triggerPackages.contains(batch.get(j))) {
-                                        needRebatch = true;
-                                        break outer;
-                                    }
+                            final ArraySet<Pair<String, Integer>> triggerPackages =
+                                    new ArraySet<>();
+                            for (int i = 0; i < triggerList.size(); i++) {
+                                final Alarm a = triggerList.get(i);
+                                if (!UserHandle.isCore(a.creatorUid)) {
+                                    triggerPackages.add(Pair.create(
+                                            a.sourcePackage, UserHandle.getUserId(a.creatorUid)));
                                 }
                             }
-                            if (needRebatch) {
-                                rebatchAllAlarmsLocked(false);
-                            } else {
-                                rescheduleKernelAlarmsLocked();
-                                updateNextAlarmClockLocked();
-                            }
                             deliverAlarmsLocked(triggerList, nowELAPSED);
+                            reorderAlarmsBasedOnStandbyBuckets(triggerPackages);
+                            rescheduleKernelAlarmsLocked();
+                            updateNextAlarmClockLocked();
                         }
                     }
 
@@ -3518,13 +3576,21 @@
                 case APP_STANDBY_PAROLE_CHANGED:
                     synchronized (mLock) {
                         mAppStandbyParole = (Boolean) msg.obj;
-                        rebatchAllAlarmsLocked(false);
+                        if (reorderAlarmsBasedOnStandbyBuckets(null)) {
+                            rescheduleKernelAlarmsLocked();
+                            updateNextAlarmClockLocked();
+                        }
                     }
                     break;
 
                 case APP_STANDBY_BUCKET_CHANGED:
                     synchronized (mLock) {
-                        rebatchAllAlarmsLocked(false);
+                        final ArraySet<Pair<String, Integer>> filterPackages = new ArraySet<>();
+                        filterPackages.add(Pair.create((String) msg.obj, msg.arg1));
+                        if (reorderAlarmsBasedOnStandbyBuckets(filterPackages)) {
+                            rescheduleKernelAlarmsLocked();
+                            updateNextAlarmClockLocked();
+                        }
                     }
                     break;
 
@@ -3751,7 +3817,8 @@
                         bucket);
             }
             mHandler.removeMessages(AlarmHandler.APP_STANDBY_BUCKET_CHANGED);
-            mHandler.sendEmptyMessage(AlarmHandler.APP_STANDBY_BUCKET_CHANGED);
+            mHandler.obtainMessage(AlarmHandler.APP_STANDBY_BUCKET_CHANGED, userId, -1, packageName)
+                    .sendToTarget();
         }
 
         @Override
diff --git a/services/core/java/com/android/server/InputMethodManagerService.java b/services/core/java/com/android/server/InputMethodManagerService.java
index 2f42585..93f7f1d 100644
--- a/services/core/java/com/android/server/InputMethodManagerService.java
+++ b/services/core/java/com/android/server/InputMethodManagerService.java
@@ -2970,12 +2970,19 @@
                         break;
                 }
 
-                if (!didStart && attribute != null) {
-                    if (!DebugFlags.FLAG_OPTIMIZE_START_INPUT.value()
-                            || (controlFlags
-                                    & InputMethodManager.CONTROL_WINDOW_IS_TEXT_EDITOR) != 0) {
-                        res = startInputUncheckedLocked(cs, inputContext, missingMethods, attribute,
-                                controlFlags, startInputReason);
+                if (!didStart) {
+                    if (attribute != null) {
+                        if (!DebugFlags.FLAG_OPTIMIZE_START_INPUT.value()
+                                || (controlFlags
+                                & InputMethodManager.CONTROL_WINDOW_IS_TEXT_EDITOR) != 0) {
+                            res = startInputUncheckedLocked(cs, inputContext, missingMethods,
+                                    attribute,
+                                    controlFlags, startInputReason);
+                        } else {
+                            res = InputBindResult.NO_EDITOR;
+                        }
+                    } else {
+                        res = InputBindResult.NULL_EDITOR_INFO;
                     }
                 }
             }
diff --git a/services/core/java/com/android/server/am/ActivityStackSupervisor.java b/services/core/java/com/android/server/am/ActivityStackSupervisor.java
index 77f6d44..a0f31cd 100644
--- a/services/core/java/com/android/server/am/ActivityStackSupervisor.java
+++ b/services/core/java/com/android/server/am/ActivityStackSupervisor.java
@@ -1266,7 +1266,7 @@
                 Trace.traceBegin(TRACE_TAG_ACTIVITY_MANAGER, "resolveIntent");
                 int modifiedFlags = flags
                         | PackageManager.MATCH_DEFAULT_ONLY | ActivityManagerService.STOCK_PM_FLAGS;
-                if (intent.isBrowsableWebIntent()
+                if (intent.isWebIntent()
                             || (intent.getFlags() & Intent.FLAG_ACTIVITY_MATCH_EXTERNAL) != 0) {
                     modifiedFlags |= PackageManager.MATCH_INSTANT;
                 }
diff --git a/services/core/java/com/android/server/audio/AudioService.java b/services/core/java/com/android/server/audio/AudioService.java
index 1825db8..56d66de 100644
--- a/services/core/java/com/android/server/audio/AudioService.java
+++ b/services/core/java/com/android/server/audio/AudioService.java
@@ -7296,6 +7296,12 @@
     //======================
     /**  */
     public int dispatchFocusChange(AudioFocusInfo afi, int focusChange, IAudioPolicyCallback pcb) {
+        if (afi == null) {
+            throw new IllegalArgumentException("Illegal null AudioFocusInfo");
+        }
+        if (pcb == null) {
+            throw new IllegalArgumentException("Illegal null AudioPolicy callback");
+        }
         synchronized (mAudioPolicies) {
             if (!mAudioPolicies.containsKey(pcb.asBinder())) {
                 throw new IllegalStateException("Unregistered AudioPolicy for focus dispatch");
@@ -7304,6 +7310,23 @@
         }
     }
 
+    public void setFocusRequestResultFromExtPolicy(AudioFocusInfo afi, int requestResult,
+            IAudioPolicyCallback pcb) {
+        if (afi == null) {
+            throw new IllegalArgumentException("Illegal null AudioFocusInfo");
+        }
+        if (pcb == null) {
+            throw new IllegalArgumentException("Illegal null AudioPolicy callback");
+        }
+        synchronized (mAudioPolicies) {
+            if (!mAudioPolicies.containsKey(pcb.asBinder())) {
+                throw new IllegalStateException("Unregistered AudioPolicy for external focus");
+            }
+            mMediaFocusControl.setFocusRequestResultFromExtPolicy(afi, requestResult);
+        }
+    }
+
+
     //======================
     // misc
     //======================
diff --git a/services/core/java/com/android/server/audio/FocusRequester.java b/services/core/java/com/android/server/audio/FocusRequester.java
index f2ef02f..99f0840 100644
--- a/services/core/java/com/android/server/audio/FocusRequester.java
+++ b/services/core/java/com/android/server/audio/FocusRequester.java
@@ -241,15 +241,15 @@
 
 
     void release() {
+        final IBinder srcRef = mSourceRef;
+        final AudioFocusDeathHandler deathHdlr = mDeathHandler;
         try {
-            if (mSourceRef != null && mDeathHandler != null) {
-                mSourceRef.unlinkToDeath(mDeathHandler, 0);
-                mDeathHandler = null;
-                mFocusDispatcher = null;
+            if (srcRef != null && deathHdlr != null) {
+                srcRef.unlinkToDeath(deathHdlr, 0);
             }
-        } catch (java.util.NoSuchElementException e) {
-            Log.e(TAG, "FocusRequester.release() hit ", e);
-        }
+        } catch (java.util.NoSuchElementException e) { }
+        mDeathHandler = null;
+        mFocusDispatcher = null;
     }
 
     @Override
@@ -424,7 +424,7 @@
 
     int dispatchFocusChange(int focusChange) {
         if (mFocusDispatcher == null) {
-            if (MediaFocusControl.DEBUG) { Log.v(TAG, "dispatchFocusChange: no focus dispatcher"); }
+            if (MediaFocusControl.DEBUG) { Log.e(TAG, "dispatchFocusChange: no focus dispatcher"); }
             return AudioManager.AUDIOFOCUS_REQUEST_FAILED;
         }
         if (focusChange == AudioManager.AUDIOFOCUS_NONE) {
@@ -445,12 +445,29 @@
         try {
             mFocusDispatcher.dispatchAudioFocusChange(focusChange, mClientId);
         } catch (android.os.RemoteException e) {
-            Log.v(TAG, "dispatchFocusChange: error talking to focus listener", e);
+            Log.e(TAG, "dispatchFocusChange: error talking to focus listener " + mClientId, e);
             return AudioManager.AUDIOFOCUS_REQUEST_FAILED;
         }
         return AudioManager.AUDIOFOCUS_REQUEST_GRANTED;
     }
 
+    void dispatchFocusResultFromExtPolicy(int requestResult) {
+        if (mFocusDispatcher == null) {
+            if (MediaFocusControl.DEBUG) {
+                Log.e(TAG, "dispatchFocusResultFromExtPolicy: no focus dispatcher");
+            }
+        }
+        if (DEBUG) {
+            Log.v(TAG, "dispatching result" + requestResult + " to " + mClientId);
+        }
+        try {
+            mFocusDispatcher.dispatchFocusResultFromExtPolicy(requestResult, mClientId);
+        } catch (android.os.RemoteException e) {
+            Log.e(TAG, "dispatchFocusResultFromExtPolicy: error talking to focus listener"
+                    + mClientId, e);
+        }
+    }
+
     AudioFocusInfo toAudioFocusInfo() {
         return new AudioFocusInfo(mAttributes, mCallingUid, mClientId, mPackageName,
                 mFocusGainRequest, mFocusLossReceived, mGrantFlags, mSdkTarget);
diff --git a/services/core/java/com/android/server/audio/MediaFocusControl.java b/services/core/java/com/android/server/audio/MediaFocusControl.java
index 9ddc52a..d023bd7 100644
--- a/services/core/java/com/android/server/audio/MediaFocusControl.java
+++ b/services/core/java/com/android/server/audio/MediaFocusControl.java
@@ -83,6 +83,10 @@
 
     private boolean mRingOrCallActive = false;
 
+    private final Object mExtFocusChangeLock = new Object();
+    @GuardedBy("mExtFocusChangeLock")
+    private long mExtFocusChangeCounter;
+
     protected MediaFocusControl(Context cntxt, PlayerFocusEnforcer pfe) {
         mContext = cntxt;
         mAppOps = (AppOpsManager)mContext.getSystemService(Context.APP_OPS_SERVICE);
@@ -521,7 +525,7 @@
      * @param requestResult
      * @return true if the external audio focus policy (if any) is handling the focus request
      */
-    boolean notifyExtFocusPolicyFocusRequest_syncAf(AudioFocusInfo afi, int requestResult,
+    boolean notifyExtFocusPolicyFocusRequest_syncAf(AudioFocusInfo afi,
             IAudioFocusDispatcher fd, IBinder cb) {
         if (mFocusPolicy == null) {
             return false;
@@ -530,6 +534,9 @@
             Log.v(TAG, "notifyExtFocusPolicyFocusRequest client="+afi.getClientId()
             + " dispatcher=" + fd);
         }
+        synchronized (mExtFocusChangeLock) {
+            afi.setGen(mExtFocusChangeCounter++);
+        }
         final FocusRequester existingFr = mFocusOwnersForFocusPolicy.get(afi.getClientId());
         if (existingFr != null) {
             if (!existingFr.hasSameDispatcher(fd)) {
@@ -538,8 +545,7 @@
                 mFocusOwnersForFocusPolicy.put(afi.getClientId(),
                         new FocusRequester(afi, fd, cb, hdlr, this));
             }
-        } else if (requestResult == AudioManager.AUDIOFOCUS_REQUEST_GRANTED
-                 || requestResult == AudioManager.AUDIOFOCUS_REQUEST_DELAYED) {
+        } else {
             // new focus (future) focus owner to keep track of
             final AudioFocusDeathHandler hdlr = new AudioFocusDeathHandler(cb);
             mFocusOwnersForFocusPolicy.put(afi.getClientId(),
@@ -547,12 +553,25 @@
         }
         try {
             //oneway
-            mFocusPolicy.notifyAudioFocusRequest(afi, requestResult);
+            mFocusPolicy.notifyAudioFocusRequest(afi, AudioManager.AUDIOFOCUS_REQUEST_GRANTED);
+            return true;
         } catch (RemoteException e) {
             Log.e(TAG, "Can't call notifyAudioFocusRequest() on IAudioPolicyCallback "
                     + mFocusPolicy.asBinder(), e);
         }
-        return true;
+        return false;
+    }
+
+    void setFocusRequestResultFromExtPolicy(AudioFocusInfo afi, int requestResult) {
+        synchronized (mExtFocusChangeLock) {
+            if (afi.getGen() > mExtFocusChangeCounter) {
+                return;
+            }
+        }
+        final FocusRequester fr = mFocusOwnersForFocusPolicy.get(afi.getClientId());
+        if (fr != null) {
+            fr.dispatchFocusResultFromExtPolicy(requestResult);
+        }
     }
 
     /**
@@ -590,7 +609,12 @@
                 if (DEBUG) { Log.v(TAG, "> failed: no focus policy" ); }
                 return AudioManager.AUDIOFOCUS_REQUEST_FAILED;
             }
-            final FocusRequester fr = mFocusOwnersForFocusPolicy.get(afi.getClientId());
+            final FocusRequester fr;
+            if (focusChange == AudioManager.AUDIOFOCUS_LOSS) {
+                fr = mFocusOwnersForFocusPolicy.remove(afi.getClientId());
+            } else {
+                fr = mFocusOwnersForFocusPolicy.get(afi.getClientId());
+            }
             if (fr == null) {
                 if (DEBUG) { Log.v(TAG, "> failed: no such focus requester known" ); }
                 return AudioManager.AUDIOFOCUS_REQUEST_FAILED;
@@ -710,9 +734,7 @@
             boolean focusGrantDelayed = false;
             if (!canReassignAudioFocus()) {
                 if ((flags & AudioManager.AUDIOFOCUS_FLAG_DELAY_OK) == 0) {
-                    final int result = AudioManager.AUDIOFOCUS_REQUEST_FAILED;
-                    notifyExtFocusPolicyFocusRequest_syncAf(afiForExtPolicy, result, fd, cb);
-                    return result;
+                    return AudioManager.AUDIOFOCUS_REQUEST_FAILED;
                 } else {
                     // request has AUDIOFOCUS_FLAG_DELAY_OK: focus can't be
                     // granted right now, so the requester will be inserted in the focus stack
@@ -721,12 +743,11 @@
                 }
             }
 
-            // external focus policy: delay request for focus gain?
-            final int resultWithExtPolicy = AudioManager.AUDIOFOCUS_REQUEST_DELAYED;
+            // external focus policy?
             if (notifyExtFocusPolicyFocusRequest_syncAf(
-                    afiForExtPolicy, resultWithExtPolicy, fd, cb)) {
+                    afiForExtPolicy, fd, cb)) {
                 // stop handling focus request here as it is handled by external audio focus policy
-                return resultWithExtPolicy;
+                return AudioManager.AUDIOFOCUS_REQUEST_WAITING_FOR_EXT_POLICY;
             }
 
             // handle the potential premature death of the new holder of the focus
diff --git a/services/core/java/com/android/server/om/OverlayManagerServiceImpl.java b/services/core/java/com/android/server/om/OverlayManagerServiceImpl.java
index 7600e81..6e02db7 100644
--- a/services/core/java/com/android/server/om/OverlayManagerServiceImpl.java
+++ b/services/core/java/com/android/server/om/OverlayManagerServiceImpl.java
@@ -20,6 +20,8 @@
 import static android.content.om.OverlayInfo.STATE_ENABLED;
 import static android.content.om.OverlayInfo.STATE_MISSING_TARGET;
 import static android.content.om.OverlayInfo.STATE_NO_IDMAP;
+import static android.content.om.OverlayInfo.STATE_OVERLAY_UPGRADING;
+import static android.content.om.OverlayInfo.STATE_TARGET_UPGRADING;
 
 import static com.android.server.om.OverlayManagerService.DEBUG;
 import static com.android.server.om.OverlayManagerService.TAG;
@@ -50,6 +52,10 @@
  * @see OverlayManagerService
  */
 final class OverlayManagerServiceImpl {
+    // Flags to use in conjunction with updateState.
+    private static final int FLAG_TARGET_IS_UPGRADING = 1<<0;
+    private static final int FLAG_OVERLAY_IS_UPGRADING = 1<<1;
+
     private final PackageManagerHelper mPackageManager;
     private final IdmapManager mIdmapManager;
     private final OverlayManagerSettings mSettings;
@@ -123,9 +129,7 @@
             }
 
             try {
-                final PackageInfo targetPackage =
-                        mPackageManager.getPackageInfo(overlayPackage.overlayTarget, newUserId);
-                updateState(targetPackage, overlayPackage, newUserId);
+                updateState(overlayPackage.overlayTarget, overlayPackage.packageName, newUserId, 0);
             } catch (OverlayManagerSettings.BadKeyException e) {
                 Slog.e(TAG, "failed to update settings", e);
                 mSettings.remove(overlayPackage.packageName, newUserId);
@@ -168,8 +172,7 @@
             Slog.d(TAG, "onTargetPackageAdded packageName=" + packageName + " userId=" + userId);
         }
 
-        final PackageInfo targetPackage = mPackageManager.getPackageInfo(packageName, userId);
-        if (updateAllOverlaysForTarget(packageName, userId, targetPackage)) {
+        if (updateAllOverlaysForTarget(packageName, userId, 0)) {
             mListener.onOverlaysChanged(packageName, userId);
         }
     }
@@ -179,18 +182,18 @@
             Slog.d(TAG, "onTargetPackageChanged packageName=" + packageName + " userId=" + userId);
         }
 
-        final PackageInfo targetPackage = mPackageManager.getPackageInfo(packageName, userId);
-        if (updateAllOverlaysForTarget(packageName, userId, targetPackage)) {
+        if (updateAllOverlaysForTarget(packageName, userId, 0)) {
             mListener.onOverlaysChanged(packageName, userId);
         }
     }
 
     void onTargetPackageUpgrading(@NonNull final String packageName, final int userId) {
         if (DEBUG) {
-            Slog.d(TAG, "onTargetPackageUpgrading packageName=" + packageName + " userId=" + userId);
+            Slog.d(TAG, "onTargetPackageUpgrading packageName=" + packageName + " userId="
+                    + userId);
         }
 
-        if (updateAllOverlaysForTarget(packageName, userId, null)) {
+        if (updateAllOverlaysForTarget(packageName, userId, FLAG_TARGET_IS_UPGRADING)) {
             mListener.onOverlaysChanged(packageName, userId);
         }
     }
@@ -200,8 +203,7 @@
             Slog.d(TAG, "onTargetPackageUpgraded packageName=" + packageName + " userId=" + userId);
         }
 
-        final PackageInfo targetPackage = mPackageManager.getPackageInfo(packageName, userId);
-        if (updateAllOverlaysForTarget(packageName, userId, targetPackage)) {
+        if (updateAllOverlaysForTarget(packageName, userId, 0)) {
             mListener.onOverlaysChanged(packageName, userId);
         }
     }
@@ -211,7 +213,7 @@
             Slog.d(TAG, "onTargetPackageRemoved packageName=" + packageName + " userId=" + userId);
         }
 
-        if (updateAllOverlaysForTarget(packageName, userId, null)) {
+        if (updateAllOverlaysForTarget(packageName, userId, 0)) {
             mListener.onOverlaysChanged(packageName, userId);
         }
     }
@@ -219,20 +221,21 @@
     /**
      * Returns true if the settings were modified for this target.
      */
-    private boolean updateAllOverlaysForTarget(@NonNull final String packageName, final int userId,
-            @Nullable final PackageInfo targetPackage) {
+    private boolean updateAllOverlaysForTarget(@NonNull final String targetPackageName,
+            final int userId, final int flags) {
         boolean modified = false;
-        final List<OverlayInfo> ois = mSettings.getOverlaysForTarget(packageName, userId);
+        final List<OverlayInfo> ois = mSettings.getOverlaysForTarget(targetPackageName, userId);
         final int N = ois.size();
         for (int i = 0; i < N; i++) {
             final OverlayInfo oi = ois.get(i);
-            final PackageInfo overlayPackage = mPackageManager.getPackageInfo(oi.packageName, userId);
+            final PackageInfo overlayPackage = mPackageManager.getPackageInfo(oi.packageName,
+                    userId);
             if (overlayPackage == null) {
                 modified |= mSettings.remove(oi.packageName, oi.userId);
                 removeIdmapIfPossible(oi);
             } else {
                 try {
-                    modified |= updateState(targetPackage, overlayPackage, userId);
+                    modified |= updateState(targetPackageName, oi.packageName, userId, flags);
                 } catch (OverlayManagerSettings.BadKeyException e) {
                     Slog.e(TAG, "failed to update settings", e);
                     modified |= mSettings.remove(oi.packageName, userId);
@@ -254,14 +257,11 @@
             return;
         }
 
-        final PackageInfo targetPackage =
-                mPackageManager.getPackageInfo(overlayPackage.overlayTarget, userId);
-
         mSettings.init(packageName, userId, overlayPackage.overlayTarget,
                 overlayPackage.applicationInfo.getBaseCodePath(),
                 overlayPackage.isStaticOverlayPackage(), overlayPackage.overlayPriority);
         try {
-            if (updateState(targetPackage, overlayPackage, userId)) {
+            if (updateState(overlayPackage.overlayTarget, packageName, userId, 0)) {
                 mListener.onOverlaysChanged(overlayPackage.overlayTarget, userId);
             }
         } catch (OverlayManagerSettings.BadKeyException e) {
@@ -271,15 +271,64 @@
     }
 
     void onOverlayPackageChanged(@NonNull final String packageName, final int userId) {
-        Slog.wtf(TAG, "onOverlayPackageChanged called, but only pre-installed overlays supported");
+        if (DEBUG) {
+            Slog.d(TAG, "onOverlayPackageChanged packageName=" + packageName + " userId=" + userId);
+        }
+
+        try {
+            final OverlayInfo oi = mSettings.getOverlayInfo(packageName, userId);
+            if (updateState(oi.targetPackageName, packageName, userId, 0)) {
+                mListener.onOverlaysChanged(oi.targetPackageName, userId);
+            }
+        } catch (OverlayManagerSettings.BadKeyException e) {
+            Slog.e(TAG, "failed to update settings", e);
+        }
     }
 
     void onOverlayPackageUpgrading(@NonNull final String packageName, final int userId) {
-        Slog.wtf(TAG, "onOverlayPackageUpgrading called, but only pre-installed overlays supported");
+        if (DEBUG) {
+            Slog.d(TAG, "onOverlayPackageUpgrading packageName=" + packageName + " userId="
+                    + userId);
+        }
+
+        try {
+            final OverlayInfo oi = mSettings.getOverlayInfo(packageName, userId);
+            if (updateState(oi.targetPackageName, packageName, userId, FLAG_OVERLAY_IS_UPGRADING)) {
+                removeIdmapIfPossible(oi);
+                mListener.onOverlaysChanged(oi.targetPackageName, userId);
+            }
+        } catch (OverlayManagerSettings.BadKeyException e) {
+            Slog.e(TAG, "failed to update settings", e);
+        }
     }
 
     void onOverlayPackageUpgraded(@NonNull final String packageName, final int userId) {
-        Slog.wtf(TAG, "onOverlayPackageUpgraded called, but only pre-installed overlays supported");
+        if (DEBUG) {
+            Slog.d(TAG, "onOverlayPackageUpgraded packageName=" + packageName + " userId="
+                    + userId);
+        }
+
+        final PackageInfo pkg = mPackageManager.getPackageInfo(packageName, userId);
+        if (pkg == null) {
+            Slog.w(TAG, "overlay package " + packageName + " was upgraded, but couldn't be found");
+            onOverlayPackageRemoved(packageName, userId);
+            return;
+        }
+
+        try {
+            final OverlayInfo oldOi = mSettings.getOverlayInfo(packageName, userId);
+            if (!oldOi.targetPackageName.equals(pkg.overlayTarget)) {
+                mSettings.init(packageName, userId, pkg.overlayTarget,
+                        pkg.applicationInfo.getBaseCodePath(), pkg.isStaticOverlayPackage(),
+                        pkg.overlayPriority);
+            }
+
+            if (updateState(pkg.overlayTarget, packageName, userId, 0)) {
+                mListener.onOverlaysChanged(pkg.overlayTarget, userId);
+            }
+        } catch (OverlayManagerSettings.BadKeyException e) {
+            Slog.e(TAG, "failed to update settings", e);
+        }
     }
 
     void onOverlayPackageRemoved(@NonNull final String packageName, final int userId) {
@@ -333,10 +382,8 @@
 
         try {
             final OverlayInfo oi = mSettings.getOverlayInfo(packageName, userId);
-            final PackageInfo targetPackage =
-                    mPackageManager.getPackageInfo(oi.targetPackageName, userId);
             boolean modified = mSettings.setEnabled(packageName, userId, enable);
-            modified |= updateState(targetPackage, overlayPackage, userId);
+            modified |= updateState(oi.targetPackageName, oi.packageName, userId, 0);
 
             if (modified) {
                 mListener.onOverlaysChanged(oi.targetPackageName, userId);
@@ -349,7 +396,8 @@
 
     boolean setEnabledExclusive(@NonNull final String packageName, final int userId) {
         if (DEBUG) {
-            Slog.d(TAG, String.format("setEnabledExclusive packageName=%s userId=%d", packageName, userId));
+            Slog.d(TAG, String.format("setEnabledExclusive packageName=%s userId=%d", packageName,
+                    userId));
         }
 
         final PackageInfo overlayPackage = mPackageManager.getPackageInfo(packageName, userId);
@@ -359,10 +407,9 @@
 
         try {
             final OverlayInfo oi = mSettings.getOverlayInfo(packageName, userId);
-            final PackageInfo targetPackage =
-                    mPackageManager.getPackageInfo(oi.targetPackageName, userId);
+            final String targetPackageName = oi.targetPackageName;
 
-            List<OverlayInfo> allOverlays = getOverlayInfosForTarget(oi.targetPackageName, userId);
+            List<OverlayInfo> allOverlays = getOverlayInfosForTarget(targetPackageName, userId);
 
             boolean modified = false;
 
@@ -384,15 +431,15 @@
 
                 // Disable the overlay.
                 modified |= mSettings.setEnabled(disabledOverlayPackageName, userId, false);
-                modified |= updateState(targetPackage, disabledOverlayPackageInfo, userId);
+                modified |= updateState(targetPackageName, disabledOverlayPackageName, userId, 0);
             }
 
             // Enable the selected overlay.
             modified |= mSettings.setEnabled(packageName, userId, true);
-            modified |= updateState(targetPackage, overlayPackage, userId);
+            modified |= updateState(targetPackageName, packageName, userId, 0);
 
             if (modified) {
-                mListener.onOverlaysChanged(oi.targetPackageName, userId);
+                mListener.onOverlaysChanged(targetPackageName, userId);
             }
             return true;
         } catch (OverlayManagerSettings.BadKeyException e) {
@@ -477,7 +524,8 @@
 
     List<String> getEnabledOverlayPackageNames(@NonNull final String targetPackageName,
             final int userId) {
-        final List<OverlayInfo> overlays = mSettings.getOverlaysForTarget(targetPackageName, userId);
+        final List<OverlayInfo> overlays = mSettings.getOverlaysForTarget(targetPackageName,
+                userId);
         final List<String> paths = new ArrayList<>(overlays.size());
         final int N = overlays.size();
         for (int i = 0; i < N; i++) {
@@ -492,36 +540,59 @@
     /**
      * Returns true if the settings/state was modified, false otherwise.
      */
-    private boolean updateState(@Nullable final PackageInfo targetPackage,
-            @NonNull final PackageInfo overlayPackage, final int userId)
+    private boolean updateState(@NonNull final String targetPackageName,
+            @NonNull final String overlayPackageName, final int userId, final int flags)
             throws OverlayManagerSettings.BadKeyException {
+
+        final PackageInfo targetPackage = mPackageManager.getPackageInfo(targetPackageName, userId);
+        final PackageInfo overlayPackage = mPackageManager.getPackageInfo(overlayPackageName,
+                userId);
+
         // Static RROs targeting to "android", ie framework-res.apk, are handled by native layers.
-        if (targetPackage != null &&
-                !("android".equals(targetPackage.packageName)
+        if (targetPackage != null && overlayPackage != null &&
+                !("android".equals(targetPackageName)
                         && overlayPackage.isStaticOverlayPackage())) {
             mIdmapManager.createIdmap(targetPackage, overlayPackage, userId);
         }
 
-        boolean modified = mSettings.setBaseCodePath(overlayPackage.packageName, userId,
-                overlayPackage.applicationInfo.getBaseCodePath());
+        boolean modified = false;
+        if (overlayPackage != null) {
+            modified |= mSettings.setBaseCodePath(overlayPackageName, userId,
+                    overlayPackage.applicationInfo.getBaseCodePath());
+        }
 
-        final int currentState = mSettings.getState(overlayPackage.packageName, userId);
-        final int newState = calculateNewState(targetPackage, overlayPackage, userId);
+        final @OverlayInfo.State int currentState = mSettings.getState(overlayPackageName, userId);
+        final @OverlayInfo.State int newState = calculateNewState(targetPackage, overlayPackage,
+                userId, flags);
         if (currentState != newState) {
             if (DEBUG) {
                 Slog.d(TAG, String.format("%s:%d: %s -> %s",
-                            overlayPackage.packageName, userId,
+                            overlayPackageName, userId,
                             OverlayInfo.stateToString(currentState),
                             OverlayInfo.stateToString(newState)));
             }
-            modified |= mSettings.setState(overlayPackage.packageName, userId, newState);
+            modified |= mSettings.setState(overlayPackageName, userId, newState);
         }
         return modified;
     }
 
-    private int calculateNewState(@Nullable final PackageInfo targetPackage,
-            @NonNull final PackageInfo overlayPackage, final int userId)
+    private @OverlayInfo.State int calculateNewState(@Nullable final PackageInfo targetPackage,
+            @Nullable final PackageInfo overlayPackage, final int userId, final int flags)
         throws OverlayManagerSettings.BadKeyException {
+
+        if ((flags & FLAG_TARGET_IS_UPGRADING) != 0) {
+            return STATE_TARGET_UPGRADING;
+        }
+
+        if ((flags & FLAG_OVERLAY_IS_UPGRADING) != 0) {
+            return STATE_OVERLAY_UPGRADING;
+        }
+
+        // assert expectation on overlay package: can only be null if the flags are used
+        if (DEBUG && overlayPackage == null) {
+            throw new IllegalArgumentException("null overlay package not compatible with no flags");
+        }
+
         if (targetPackage == null) {
             return STATE_MISSING_TARGET;
         }
diff --git a/services/core/java/com/android/server/om/OverlayManagerSettings.java b/services/core/java/com/android/server/om/OverlayManagerSettings.java
index 17b38de..a80cae4 100644
--- a/services/core/java/com/android/server/om/OverlayManagerSettings.java
+++ b/services/core/java/com/android/server/om/OverlayManagerSettings.java
@@ -145,7 +145,8 @@
         return mItems.get(idx).setEnabled(enable);
     }
 
-    int getState(@NonNull final String packageName, final int userId) throws BadKeyException {
+    @OverlayInfo.State int getState(@NonNull final String packageName, final int userId)
+            throws BadKeyException {
         final int idx = select(packageName, userId);
         if (idx < 0) {
             throw new BadKeyException(packageName, userId);
@@ -156,8 +157,8 @@
     /**
      * Returns true if the settings were modified, false if they remain the same.
      */
-    boolean setState(@NonNull final String packageName, final int userId, final int state)
-            throws BadKeyException {
+    boolean setState(@NonNull final String packageName, final int userId,
+            final @OverlayInfo.State int state) throws BadKeyException {
         final int idx = select(packageName, userId);
         if (idx < 0) {
             throw new BadKeyException(packageName, userId);
@@ -413,7 +414,7 @@
         private final String mPackageName;
         private final String mTargetPackageName;
         private String mBaseCodePath;
-        private int mState;
+        private @OverlayInfo.State int mState;
         private boolean mIsEnabled;
         private OverlayInfo mCache;
         private boolean mIsStatic;
@@ -421,7 +422,7 @@
 
         SettingsItem(@NonNull final String packageName, final int userId,
                 @NonNull final String targetPackageName, @NonNull final String baseCodePath,
-                final int state, final boolean isEnabled, final boolean isStatic,
+                final @OverlayInfo.State int state, final boolean isEnabled, final boolean isStatic,
                 final int priority) {
             mPackageName = packageName;
             mUserId = userId;
@@ -462,11 +463,11 @@
             return false;
         }
 
-        private int getState() {
+        private @OverlayInfo.State int getState() {
             return mState;
         }
 
-        private boolean setState(final int state) {
+        private boolean setState(final @OverlayInfo.State int state) {
             if (mState != state) {
                 mState = state;
                 invalidateCache();
diff --git a/services/core/java/com/android/server/pm/InstantAppResolver.java b/services/core/java/com/android/server/pm/InstantAppResolver.java
index af446ba..6e898bb 100644
--- a/services/core/java/com/android/server/pm/InstantAppResolver.java
+++ b/services/core/java/com/android/server/pm/InstantAppResolver.java
@@ -377,7 +377,7 @@
         failureIntent.setFlags(failureIntent.getFlags() | Intent.FLAG_IGNORE_EPHEMERAL);
         failureIntent.setLaunchToken(token);
         ArrayList<AuxiliaryResolveInfo.AuxiliaryFilter> filters = null;
-        boolean isWebIntent = origIntent.isBrowsableWebIntent();
+        boolean isWebIntent = origIntent.isWebIntent();
         for (InstantAppResolveInfo instantAppResolveInfo : instantAppResolveInfoList) {
             if (shaPrefix.length > 0 && instantAppResolveInfo.shouldLetInstallerDecide()) {
                 Slog.e(TAG, "InstantAppResolveInfo with mShouldLetInstallerDecide=true when digest"
@@ -448,7 +448,7 @@
                 instantAppInfo.getIntentFilters();
         if (instantAppFilters == null || instantAppFilters.isEmpty()) {
             // No filters on web intent; no matches, 2nd phase unnecessary.
-            if (origIntent.isBrowsableWebIntent()) {
+            if (origIntent.isWebIntent()) {
                 return null;
             }
             // No filters; we need to start phase two
diff --git a/services/core/java/com/android/server/pm/PackageManagerService.java b/services/core/java/com/android/server/pm/PackageManagerService.java
index 8166332..2816bbd 100644
--- a/services/core/java/com/android/server/pm/PackageManagerService.java
+++ b/services/core/java/com/android/server/pm/PackageManagerService.java
@@ -5972,7 +5972,7 @@
         if (!skipPackageCheck && intent.getPackage() != null) {
             return false;
         }
-        if (!intent.isBrowsableWebIntent()) {
+        if (!intent.isWebIntent()) {
             // for non web intents, we should not resolve externally if an app already exists to
             // handle it or if the caller didn't explicitly request it.
             if ((resolvedActivities != null && resolvedActivities.size() != 0)
@@ -6683,7 +6683,7 @@
                                         ai.packageName, ai.versionCode, null /* splitName */);
             }
         }
-        if (intent.isBrowsableWebIntent() && auxiliaryResponse == null) {
+        if (intent.isWebIntent() && auxiliaryResponse == null) {
             return result;
         }
         final PackageSetting ps = mSettings.mPackages.get(mInstantAppInstallerActivity.packageName);
diff --git a/services/usage/java/com/android/server/usage/UsageStatsService.java b/services/usage/java/com/android/server/usage/UsageStatsService.java
index 36a2a95..3b0fd1f 100644
--- a/services/usage/java/com/android/server/usage/UsageStatsService.java
+++ b/services/usage/java/com/android/server/usage/UsageStatsService.java
@@ -654,7 +654,7 @@
         public boolean isAppInactive(String packageName, int userId) {
             try {
                 userId = ActivityManager.getService().handleIncomingUser(Binder.getCallingPid(),
-                        Binder.getCallingUid(), userId, false, true, "isAppInactive", null);
+                        Binder.getCallingUid(), userId, false, false, "isAppInactive", null);
             } catch (RemoteException re) {
                 throw re.rethrowFromSystemServer();
             }
diff --git a/telephony/java/android/telephony/TelephonyManager.java b/telephony/java/android/telephony/TelephonyManager.java
index 5c290da..03a8f33 100644
--- a/telephony/java/android/telephony/TelephonyManager.java
+++ b/telephony/java/android/telephony/TelephonyManager.java
@@ -6587,11 +6587,48 @@
      * @hide
      */
     public void setBasebandVersionForPhone(int phoneId, String version) {
+        setTelephonyProperty(phoneId, TelephonyProperties.PROPERTY_BASEBAND_VERSION, version);
+    }
+
+    /**
+     * Get baseband version for the default phone.
+     *
+     * @return baseband version.
+     * @hide
+     */
+    public String getBasebandVersion() {
+        int phoneId = getPhoneId();
+        return getBasebandVersionForPhone(phoneId);
+    }
+
+    /**
+     * Get baseband version for the default phone using the legacy approach.
+     * This change was added in P, to ensure backward compatiblity.
+     *
+     * @return baseband version.
+     * @hide
+     */
+    private String getBasebandVersionLegacy(int phoneId) {
         if (SubscriptionManager.isValidPhoneId(phoneId)) {
             String prop = TelephonyProperties.PROPERTY_BASEBAND_VERSION +
                     ((phoneId == 0) ? "" : Integer.toString(phoneId));
-            SystemProperties.set(prop, version);
+            return SystemProperties.get(prop);
         }
+        return null;
+    }
+
+    /**
+     * Get baseband version by phone id.
+     *
+     * @return baseband version.
+     * @hide
+     */
+    public String getBasebandVersionForPhone(int phoneId) {
+        String version = getBasebandVersionLegacy(phoneId);
+        if (version != null && !version.isEmpty()) {
+            setBasebandVersionForPhone(phoneId, version);
+        }
+        return getTelephonyProperty(phoneId, TelephonyProperties.PROPERTY_BASEBAND_VERSION, "");
     }
 
     /**
diff --git a/tests/ActivityManagerPerfTests/test-app/src/com/android/frameworks/perftests/amteststestapp/TestActivity.java b/tests/ActivityManagerPerfTests/test-app/src/com/android/frameworks/perftests/amteststestapp/TestActivity.java
index 7ea9ba3..1f06121 100644
--- a/tests/ActivityManagerPerfTests/test-app/src/com/android/frameworks/perftests/amteststestapp/TestActivity.java
+++ b/tests/ActivityManagerPerfTests/test-app/src/com/android/frameworks/perftests/amteststestapp/TestActivity.java
@@ -17,16 +17,19 @@
 package com.android.frameworks.perftests.amteststestapp;
 
 import android.app.Activity;
-import android.os.Bundle;
+import android.os.Looper;
+import android.os.MessageQueue;
 
 import com.android.frameworks.perftests.am.util.Constants;
 import com.android.frameworks.perftests.am.util.Utils;
 
 public class TestActivity extends Activity {
     @Override
-    protected void onCreate(Bundle savedInstanceState) {
-        super.onCreate(savedInstanceState);
-
-        Utils.sendTime(getIntent(), Constants.TYPE_ACTIVITY_CREATED);
+    protected void onResume() {
+        super.onResume();
+        Looper.myQueue().addIdleHandler(() -> {
+            Utils.sendTime(getIntent(), Constants.TYPE_TARGET_PACKAGE_START);
+            return false;
+        });
     }
 }
diff --git a/tests/ActivityManagerPerfTests/tests/src/com/android/frameworks/perftests/am/util/TargetPackageUtils.java b/tests/ActivityManagerPerfTests/tests/src/com/android/frameworks/perftests/am/util/TargetPackageUtils.java
index c867141..26a8e7b 100644
--- a/tests/ActivityManagerPerfTests/tests/src/com/android/frameworks/perftests/am/util/TargetPackageUtils.java
+++ b/tests/ActivityManagerPerfTests/tests/src/com/android/frameworks/perftests/am/util/TargetPackageUtils.java
@@ -62,7 +62,7 @@
             sleep();
         }
         // make sure Application has run
-        timeReceiver.getReceivedTimeNs(Constants.TYPE_ACTIVITY_CREATED);
+        timeReceiver.getReceivedTimeNs(Constants.TYPE_TARGET_PACKAGE_START);
         Utils.drainBroadcastQueue();
     }
 
diff --git a/tests/ActivityManagerPerfTests/utils/src/com/android/frameworks/perftests/am/util/Constants.java b/tests/ActivityManagerPerfTests/utils/src/com/android/frameworks/perftests/am/util/Constants.java
index 6528028..f35c2fd 100644
--- a/tests/ActivityManagerPerfTests/utils/src/com/android/frameworks/perftests/am/util/Constants.java
+++ b/tests/ActivityManagerPerfTests/utils/src/com/android/frameworks/perftests/am/util/Constants.java
@@ -17,7 +17,7 @@
 package com.android.frameworks.perftests.am.util;
 
 public class Constants {
-    public static final String TYPE_ACTIVITY_CREATED = "activity_create";
+    public static final String TYPE_TARGET_PACKAGE_START = "target_package_start";
     public static final String TYPE_BROADCAST_RECEIVE = "broadcast_receive";
 
     public static final String ACTION_BROADCAST_MANIFEST_RECEIVE =