Report input metrics using LatencyAggregator

The new class, LatencyAggregator, is responsible for reporting the input
metrics to statsd.

Bug: 167947340
Test: tested using statsd_testdrive
Change-Id: I1990851a44df897fe989046e7363bd71e1c0c129
diff --git a/services/inputflinger/dispatcher/Android.bp b/services/inputflinger/dispatcher/Android.bp
index a9b594c..6616795 100644
--- a/services/inputflinger/dispatcher/Android.bp
+++ b/services/inputflinger/dispatcher/Android.bp
@@ -41,6 +41,7 @@
         "InputEventTimeline.cpp",
         "InputState.cpp",
         "InputTarget.cpp",
+        "LatencyAggregator.cpp",
         "LatencyTracker.cpp",
         "Monitor.cpp",
         "TouchState.cpp",
@@ -58,9 +59,12 @@
         "libinput",
         "liblog",
         "libstatslog",
+        "libstatspull",
+        "libstatssocket",
         "libui",
         "libutils",
         "lib-platform-compat-native-api",
+        "server_configurable_flags",
     ],
     static_libs: [
         "libattestation",
diff --git a/services/inputflinger/dispatcher/InputDispatcher.cpp b/services/inputflinger/dispatcher/InputDispatcher.cpp
index f36aecf..83a203e 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.cpp
+++ b/services/inputflinger/dispatcher/InputDispatcher.cpp
@@ -517,7 +517,8 @@
         mFocusedDisplayId(ADISPLAY_ID_DEFAULT),
         mFocusedWindowRequestedPointerCapture(false),
         mWindowTokenWithPointerCapture(nullptr),
-        mLatencyTracker(&mEmptyProcessor),
+        mLatencyAggregator(),
+        mLatencyTracker(&mLatencyAggregator),
         mCompatService(getCompatService()) {
     mLooper = new Looper(false);
     mReporter = createInputReporter();
@@ -5114,6 +5115,7 @@
     dump += StringPrintf(INDENT2 "KeyRepeatTimeout: %" PRId64 "ms\n",
                          ns2ms(mConfig.keyRepeatTimeout));
     dump += mLatencyTracker.dump(INDENT2);
+    dump += mLatencyAggregator.dump(INDENT2);
 }
 
 void InputDispatcher::dumpMonitors(std::string& dump, const std::vector<Monitor>& monitors) {
diff --git a/services/inputflinger/dispatcher/InputDispatcher.h b/services/inputflinger/dispatcher/InputDispatcher.h
index e592f42..bb3f3e6 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.h
+++ b/services/inputflinger/dispatcher/InputDispatcher.h
@@ -29,6 +29,7 @@
 #include "InputState.h"
 #include "InputTarget.h"
 #include "InputThread.h"
+#include "LatencyAggregator.h"
 #include "LatencyTracker.h"
 #include "Monitor.h"
 #include "TouchState.h"
@@ -636,12 +637,7 @@
     void doOnPointerDownOutsideFocusLockedInterruptible(CommandEntry* commandEntry) REQUIRES(mLock);
 
     // Statistics gathering.
-    class EmptyTimelineProcessor : public InputEventTimelineProcessor {
-        void processTimeline(const InputEventTimeline& timeline) override {
-            // TODO(b/167947340): report latency information to the real aggregator
-        }
-    } mEmptyProcessor GUARDED_BY(mLock);
-
+    LatencyAggregator mLatencyAggregator GUARDED_BY(mLock);
     LatencyTracker mLatencyTracker GUARDED_BY(mLock);
     void traceInboundQueueLengthLocked() REQUIRES(mLock);
     void traceOutboundQueueLength(const Connection& connection);
diff --git a/services/inputflinger/dispatcher/LatencyAggregator.cpp b/services/inputflinger/dispatcher/LatencyAggregator.cpp
new file mode 100644
index 0000000..d79e6ae
--- /dev/null
+++ b/services/inputflinger/dispatcher/LatencyAggregator.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LatencyAggregator"
+#include "LatencyAggregator.h"
+
+#include <inttypes.h>
+
+#include <android-base/stringprintf.h>
+#include <input/Input.h>
+#include <log/log.h>
+#include <server_configurable_flags/get_flags.h>
+
+using android::base::StringPrintf;
+using std::chrono_literals::operator""ms;
+
+// Category (=namespace) name for the input settings that are applied at boot time
+static const char* INPUT_NATIVE_BOOT = "input_native_boot";
+// Feature flag name for the threshold of end-to-end touch latency that would trigger
+// SlowEventReported atom to be pushed
+static const char* SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS =
+        "slow_event_min_reporting_latency_millis";
+// Feature flag name for the minimum delay before reporting a slow event after having just reported
+// a slow event. This helps limit the amount of data sent to the server
+static const char* SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS =
+        "slow_event_min_reporting_interval_millis";
+
+// If an event has end-to-end latency > 200 ms, it will get reported as a slow event.
+std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY = 200ms;
+// If we receive two slow events less than 1 min apart, we will only report 1 of them.
+std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL = 60000ms;
+
+static std::chrono::milliseconds getSlowEventMinReportingLatency() {
+    std::string millis = server_configurable_flags::
+            GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS,
+                                      std::to_string(
+                                              DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY.count()));
+    return std::chrono::milliseconds(std::stoi(millis));
+}
+
+static std::chrono::milliseconds getSlowEventMinReportingInterval() {
+    std::string millis = server_configurable_flags::
+            GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS,
+                                      std::to_string(
+                                              DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL.count()));
+    return std::chrono::milliseconds(std::stoi(millis));
+}
+
+namespace android::inputdispatcher {
+
+void Sketch::addValue(nsecs_t value) {
+    // TODO(b/167947340): replace with real sketch
+}
+
+android::util::BytesField Sketch::serialize() {
+    return android::util::BytesField("TODO(b/167947340): use real sketch data", 4 /*length*/);
+}
+
+void Sketch::reset() {
+    // TODO(b/167947340): reset the sketch
+}
+
+LatencyAggregator::LatencyAggregator() {
+    AStatsManager_setPullAtomCallback(android::util::INPUT_EVENT_LATENCY_SKETCH, nullptr,
+                                      LatencyAggregator::pullAtomCallback, this);
+}
+
+LatencyAggregator::~LatencyAggregator() {
+    AStatsManager_clearPullAtomCallback(android::util::INPUT_EVENT_LATENCY_SKETCH);
+}
+
+AStatsManager_PullAtomCallbackReturn LatencyAggregator::pullAtomCallback(int32_t atomTag,
+                                                                         AStatsEventList* data,
+                                                                         void* cookie) {
+    LatencyAggregator* pAggregator = reinterpret_cast<LatencyAggregator*>(cookie);
+    if (pAggregator == nullptr) {
+        LOG_ALWAYS_FATAL("pAggregator is null!");
+    }
+    return pAggregator->pullData(data);
+}
+
+void LatencyAggregator::processTimeline(const InputEventTimeline& timeline) {
+    processStatistics(timeline);
+    processSlowEvent(timeline);
+}
+
+void LatencyAggregator::processStatistics(const InputEventTimeline& timeline) {
+    std::array<Sketch, SketchIndex::SIZE>& sketches =
+            timeline.isDown ? mDownSketches : mMoveSketches;
+
+    // Process common ones first
+    const nsecs_t eventToRead = timeline.readTime - timeline.eventTime;
+
+    sketches[SketchIndex::EVENT_TO_READ].addValue(eventToRead);
+
+    // Now process per-connection ones
+    for (const auto& [connectionToken, connectionTimeline] : timeline.connectionTimelines) {
+        if (!connectionTimeline.isComplete()) {
+            continue;
+        }
+        const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime;
+        const nsecs_t deliverToConsume =
+                connectionTimeline.consumeTime - connectionTimeline.deliveryTime;
+        const nsecs_t consumeToFinish =
+                connectionTimeline.finishTime - connectionTimeline.consumeTime;
+        const nsecs_t gpuCompletedTime =
+                connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME];
+        const nsecs_t presentTime =
+                connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME];
+        const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime;
+        const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime;
+        const nsecs_t endToEnd = presentTime - timeline.eventTime;
+
+        sketches[SketchIndex::READ_TO_DELIVER].addValue(readToDeliver);
+        sketches[SketchIndex::DELIVER_TO_CONSUME].addValue(deliverToConsume);
+        sketches[SketchIndex::CONSUME_TO_FINISH].addValue(consumeToFinish);
+        sketches[SketchIndex::CONSUME_TO_GPU_COMPLETE].addValue(consumeToGpuComplete);
+        sketches[SketchIndex::GPU_COMPLETE_TO_PRESENT].addValue(gpuCompleteToPresent);
+        sketches[SketchIndex::END_TO_END].addValue(endToEnd);
+    }
+}
+
+AStatsManager_PullAtomCallbackReturn LatencyAggregator::pullData(AStatsEventList* data) {
+    android::util::addAStatsEvent(data, android::util::INPUT_EVENT_LATENCY_SKETCH,
+                                  // DOWN sketches
+                                  mDownSketches[SketchIndex::EVENT_TO_READ].serialize(),
+                                  mDownSketches[SketchIndex::READ_TO_DELIVER].serialize(),
+                                  mDownSketches[SketchIndex::DELIVER_TO_CONSUME].serialize(),
+                                  mDownSketches[SketchIndex::CONSUME_TO_FINISH].serialize(),
+                                  mDownSketches[SketchIndex::CONSUME_TO_GPU_COMPLETE].serialize(),
+                                  mDownSketches[SketchIndex::GPU_COMPLETE_TO_PRESENT].serialize(),
+                                  mDownSketches[SketchIndex::END_TO_END].serialize(),
+                                  // MOVE sketches
+                                  mMoveSketches[SketchIndex::EVENT_TO_READ].serialize(),
+                                  mMoveSketches[SketchIndex::READ_TO_DELIVER].serialize(),
+                                  mMoveSketches[SketchIndex::DELIVER_TO_CONSUME].serialize(),
+                                  mMoveSketches[SketchIndex::CONSUME_TO_FINISH].serialize(),
+                                  mMoveSketches[SketchIndex::CONSUME_TO_GPU_COMPLETE].serialize(),
+                                  mMoveSketches[SketchIndex::GPU_COMPLETE_TO_PRESENT].serialize(),
+                                  mMoveSketches[SketchIndex::END_TO_END].serialize());
+
+    for (size_t i = 0; i < SketchIndex::SIZE; i++) {
+        mDownSketches[i].reset();
+        mMoveSketches[i].reset();
+    }
+    return AStatsManager_PULL_SUCCESS;
+}
+
+void LatencyAggregator::processSlowEvent(const InputEventTimeline& timeline) {
+    static const std::chrono::duration sSlowEventThreshold = getSlowEventMinReportingLatency();
+    static const std::chrono::duration sSlowEventReportingInterval =
+            getSlowEventMinReportingInterval();
+    for (const auto& [token, connectionTimeline] : timeline.connectionTimelines) {
+        if (!connectionTimeline.isComplete()) {
+            continue;
+        }
+        mNumEventsSinceLastSlowEventReport++;
+        const nsecs_t presentTime =
+                connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME];
+        const std::chrono::nanoseconds endToEndLatency =
+                std::chrono::nanoseconds(presentTime - timeline.eventTime);
+        if (endToEndLatency < sSlowEventThreshold) {
+            continue;
+        }
+        // This is a slow event. Before we report it, check if we are reporting too often
+        const std::chrono::duration elapsedSinceLastReport =
+                std::chrono::nanoseconds(timeline.eventTime - mLastSlowEventTime);
+        if (elapsedSinceLastReport < sSlowEventReportingInterval) {
+            mNumSkippedSlowEvents++;
+            continue;
+        }
+
+        const nsecs_t eventToRead = timeline.readTime - timeline.eventTime;
+        const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime;
+        const nsecs_t deliverToConsume =
+                connectionTimeline.consumeTime - connectionTimeline.deliveryTime;
+        const nsecs_t consumeToFinish =
+                connectionTimeline.finishTime - connectionTimeline.consumeTime;
+        const nsecs_t gpuCompletedTime =
+                connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME];
+        const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime;
+        const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime;
+
+        android::util::stats_write(android::util::SLOW_INPUT_EVENT_REPORTED, timeline.isDown,
+                                   static_cast<int32_t>(ns2us(eventToRead)),
+                                   static_cast<int32_t>(ns2us(readToDeliver)),
+                                   static_cast<int32_t>(ns2us(deliverToConsume)),
+                                   static_cast<int32_t>(ns2us(consumeToFinish)),
+                                   static_cast<int32_t>(ns2us(consumeToGpuComplete)),
+                                   static_cast<int32_t>(ns2us(gpuCompleteToPresent)),
+                                   static_cast<int32_t>(ns2us(endToEndLatency.count())),
+                                   static_cast<int32_t>(mNumEventsSinceLastSlowEventReport),
+                                   static_cast<int32_t>(mNumSkippedSlowEvents));
+        mNumEventsSinceLastSlowEventReport = 0;
+        mNumSkippedSlowEvents = 0;
+        mLastSlowEventTime = timeline.readTime;
+    }
+}
+
+std::string LatencyAggregator::dump(const char* prefix) {
+    return StringPrintf("%sLatencyAggregator:", prefix) +
+            StringPrintf("\n%s  mLastSlowEventTime=%" PRId64, prefix, mLastSlowEventTime) +
+            StringPrintf("\n%s  mNumEventsSinceLastSlowEventReport = %zu", prefix,
+                         mNumEventsSinceLastSlowEventReport) +
+            StringPrintf("\n%s  mNumSkippedSlowEvents = %zu", prefix, mNumSkippedSlowEvents);
+}
+
+} // namespace android::inputdispatcher
diff --git a/services/inputflinger/dispatcher/LatencyAggregator.h b/services/inputflinger/dispatcher/LatencyAggregator.h
new file mode 100644
index 0000000..ce54360
--- /dev/null
+++ b/services/inputflinger/dispatcher/LatencyAggregator.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _UI_INPUT_INPUTDISPATCHER_LATENCYAGGREGATOR_H
+#define _UI_INPUT_INPUTDISPATCHER_LATENCYAGGREGATOR_H
+
+#include <statslog.h>
+#include <utils/Timers.h>
+
+#include "InputEventTimeline.h"
+
+namespace android::inputdispatcher {
+
+enum SketchIndex : size_t {
+    EVENT_TO_READ = 0,
+    READ_TO_DELIVER = 1,
+    DELIVER_TO_CONSUME = 2,
+    CONSUME_TO_FINISH = 3,
+    CONSUME_TO_GPU_COMPLETE = 4,
+    GPU_COMPLETE_TO_PRESENT = 5,
+    END_TO_END = 6, // EVENT_TO_PRESENT
+    SIZE = 7,       // Must be last
+};
+
+// Let's create a full timeline here:
+// eventTime
+// readTime
+// <---- after this point, the data becomes per-connection
+// deliveryTime // time at which the event was sent to the receiver
+// consumeTime  // time at which the receiver read the event
+// finishTime   // time at which the finish event was received
+// GraphicsTimeline::GPU_COMPLETED_TIME
+// GraphicsTimeline::PRESENT_TIME
+
+/**
+ * TODO(b/167947340): Replace this class with a KLL sketch
+ */
+class Sketch {
+public:
+    void addValue(nsecs_t value);
+    android::util::BytesField serialize();
+    void reset();
+};
+
+/**
+ * Keep sketches of the provided events and report slow events
+ */
+class LatencyAggregator final : public InputEventTimelineProcessor {
+public:
+    LatencyAggregator();
+    /**
+     * Record a complete event timeline
+     */
+    void processTimeline(const InputEventTimeline& timeline) override;
+
+    std::string dump(const char* prefix);
+
+    ~LatencyAggregator();
+
+private:
+    static AStatsManager_PullAtomCallbackReturn pullAtomCallback(int32_t atom_tag,
+                                                                 AStatsEventList* data,
+                                                                 void* cookie);
+    AStatsManager_PullAtomCallbackReturn pullData(AStatsEventList* data);
+    // ---------- Slow event handling ----------
+    void processSlowEvent(const InputEventTimeline& timeline);
+    nsecs_t mLastSlowEventTime = 0;
+    // How many slow events have been skipped due to rate limiting
+    size_t mNumSkippedSlowEvents = 0;
+    // How many events have been received since the last time we reported a slow event
+    size_t mNumEventsSinceLastSlowEventReport = 0;
+
+    // ---------- Statistics handling ----------
+    void processStatistics(const InputEventTimeline& timeline);
+    // Sketches
+    std::array<Sketch, SketchIndex::SIZE> mDownSketches;
+    std::array<Sketch, SketchIndex::SIZE> mMoveSketches;
+};
+
+} // namespace android::inputdispatcher
+
+#endif // _UI_INPUT_INPUTDISPATCHER_LATENCYAGGREGATOR_H