SurfaceFlinger: expose duration as part of phase offsets
expose app/sf duration together with the offset configs. These
will be used in the next CLs to calculate when to wake up the
corresponding EventThread.
Test: examine offsets and duration via dumpsys SurfaceFlinger
Bug: 162888874
Change-Id: Ifc1848358823570a34760f29ea975c286fbd8837
diff --git a/services/surfaceflinger/Scheduler/VsyncConfiguration.cpp b/services/surfaceflinger/Scheduler/VsyncConfiguration.cpp
index 5373742..aac2569 100644
--- a/services/surfaceflinger/Scheduler/VsyncConfiguration.cpp
+++ b/services/surfaceflinger/Scheduler/VsyncConfiguration.cpp
@@ -51,145 +51,13 @@
} // namespace
-namespace android::scheduler {
+namespace android::scheduler::impl {
-PhaseConfiguration::~PhaseConfiguration() = default;
+VsyncConfiguration::VsyncConfiguration(float currentFps) : mRefreshRateFps(currentFps) {}
-namespace impl {
-
-PhaseOffsets::PhaseOffsets(const scheduler::RefreshRateConfigs& refreshRateConfigs)
- : PhaseOffsets(getRefreshRatesFromConfigs(refreshRateConfigs),
- refreshRateConfigs.getCurrentRefreshRate().getFps(),
- sysprop::vsync_event_phase_offset_ns(1000000),
- sysprop::vsync_sf_event_phase_offset_ns(1000000),
- getProperty("debug.sf.early_phase_offset_ns"),
- getProperty("debug.sf.early_gl_phase_offset_ns"),
- getProperty("debug.sf.early_app_phase_offset_ns"),
- getProperty("debug.sf.early_gl_app_phase_offset_ns"),
- // Below defines the threshold when an offset is considered to be negative,
- // i.e. targeting for the N+2 vsync instead of N+1. This means that: For offset
- // < threshold, SF wake up (vsync_duration - offset) before HW vsync. For
- // offset >= threshold, SF wake up (2 * vsync_duration - offset) before HW
- // vsync.
- getProperty("debug.sf.phase_offset_threshold_for_next_vsync_ns")
- .value_or(std::numeric_limits<nsecs_t>::max())) {}
-
-PhaseOffsets::PhaseOffsets(const std::vector<float>& refreshRates, float currentFps,
- nsecs_t vsyncPhaseOffsetNs, nsecs_t sfVSyncPhaseOffsetNs,
- std::optional<nsecs_t> earlySfOffsetNs,
- std::optional<nsecs_t> earlyGlSfOffsetNs,
- std::optional<nsecs_t> earlyAppOffsetNs,
- std::optional<nsecs_t> earlyGlAppOffsetNs, nsecs_t thresholdForNextVsync)
- : mVSyncPhaseOffsetNs(vsyncPhaseOffsetNs),
- mSfVSyncPhaseOffsetNs(sfVSyncPhaseOffsetNs),
- mEarlySfOffsetNs(earlySfOffsetNs),
- mEarlyGlSfOffsetNs(earlyGlSfOffsetNs),
- mEarlyAppOffsetNs(earlyAppOffsetNs),
- mEarlyGlAppOffsetNs(earlyGlAppOffsetNs),
- mThresholdForNextVsync(thresholdForNextVsync),
- mOffsets(initializeOffsets(refreshRates)),
- mRefreshRateFps(currentFps) {}
-
-void PhaseOffsets::dump(std::string& result) const {
- const auto [early, earlyGl, late] = getCurrentOffsets();
- using base::StringAppendF;
- StringAppendF(&result,
- " app phase: %9" PRId64 " ns\t SF phase: %9" PRId64 " ns\n"
- " early app phase: %9" PRId64 " ns\t early SF phase: %9" PRId64 " ns\n"
- " GL early app phase: %9" PRId64 " ns\tGL early SF phase: %9" PRId64 " ns\n"
- "next VSYNC threshold: %9" PRId64 " ns\n",
- late.app, late.sf, early.app, early.sf, earlyGl.app, earlyGl.sf,
- mThresholdForNextVsync);
-}
-
-std::unordered_map<float, PhaseOffsets::Offsets> PhaseOffsets::initializeOffsets(
- const std::vector<float>& refreshRates) const {
- std::unordered_map<float, Offsets> offsets;
-
- for (const auto& refreshRate : refreshRates) {
- offsets.emplace(refreshRate,
- getPhaseOffsets(refreshRate, static_cast<nsecs_t>(1e9f / refreshRate)));
- }
- return offsets;
-}
-
-PhaseOffsets::Offsets PhaseOffsets::getPhaseOffsets(float fps, nsecs_t vsyncPeriod) const {
- if (fps > 65.0f) {
- return getHighFpsOffsets(vsyncPeriod);
- } else {
- return getDefaultOffsets(vsyncPeriod);
- }
-}
-
-PhaseOffsets::Offsets PhaseOffsets::getDefaultOffsets(nsecs_t vsyncDuration) const {
- return {
- {
- mEarlySfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) < mThresholdForNextVsync
- ? mEarlySfOffsetNs.value_or(mSfVSyncPhaseOffsetNs)
- : mEarlySfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) - vsyncDuration,
-
- mEarlyAppOffsetNs.value_or(mVSyncPhaseOffsetNs),
- },
- {
- mEarlyGlSfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) < mThresholdForNextVsync
- ? mEarlyGlSfOffsetNs.value_or(mSfVSyncPhaseOffsetNs)
- : mEarlyGlSfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) - vsyncDuration,
-
- mEarlyGlAppOffsetNs.value_or(mVSyncPhaseOffsetNs),
- },
- {
- mSfVSyncPhaseOffsetNs < mThresholdForNextVsync
- ? mSfVSyncPhaseOffsetNs
- : mSfVSyncPhaseOffsetNs - vsyncDuration,
-
- mVSyncPhaseOffsetNs,
- },
- };
-}
-
-PhaseOffsets::Offsets PhaseOffsets::getHighFpsOffsets(nsecs_t vsyncDuration) const {
- const auto highFpsLateAppOffsetNs =
- getProperty("debug.sf.high_fps_late_app_phase_offset_ns").value_or(2000000);
- const auto highFpsLateSfOffsetNs =
- getProperty("debug.sf.high_fps_late_sf_phase_offset_ns").value_or(1000000);
-
- const auto highFpsEarlySfOffsetNs = getProperty("debug.sf.high_fps_early_phase_offset_ns");
- const auto highFpsEarlyGlSfOffsetNs = getProperty("debug.sf.high_fps_early_gl_phase_offset_ns");
- const auto highFpsEarlyAppOffsetNs = getProperty("debug.sf.high_fps_early_app_phase_offset_ns");
- const auto highFpsEarlyGlAppOffsetNs =
- getProperty("debug.sf.high_fps_early_gl_app_phase_offset_ns");
-
- return {
- {
- highFpsEarlySfOffsetNs.value_or(highFpsLateSfOffsetNs) < mThresholdForNextVsync
- ? highFpsEarlySfOffsetNs.value_or(highFpsLateSfOffsetNs)
- : highFpsEarlySfOffsetNs.value_or(highFpsLateSfOffsetNs) -
- vsyncDuration,
-
- highFpsEarlyAppOffsetNs.value_or(highFpsLateAppOffsetNs),
- },
- {
- highFpsEarlyGlSfOffsetNs.value_or(highFpsLateSfOffsetNs) <
- mThresholdForNextVsync
- ? highFpsEarlyGlSfOffsetNs.value_or(highFpsLateSfOffsetNs)
- : highFpsEarlyGlSfOffsetNs.value_or(highFpsLateSfOffsetNs) -
- vsyncDuration,
-
- highFpsEarlyGlAppOffsetNs.value_or(highFpsLateAppOffsetNs),
- },
- {
- highFpsLateSfOffsetNs < mThresholdForNextVsync
- ? highFpsLateSfOffsetNs
- : highFpsLateSfOffsetNs - vsyncDuration,
-
- highFpsLateAppOffsetNs,
- },
- };
-}
-
-PhaseOffsets::Offsets PhaseOffsets::getOffsetsForRefreshRate(float fps) const {
+PhaseOffsets::VsyncConfigSet VsyncConfiguration::getConfigsForRefreshRate(float fps) const {
const auto iter = std::find_if(mOffsets.begin(), mOffsets.end(),
- [&fps](const std::pair<float, Offsets>& candidateFps) {
+ [&fps](const std::pair<float, VsyncConfigSet>& candidateFps) {
return fpsEqualsWithMargin(fps, candidateFps.first);
});
@@ -200,7 +68,194 @@
// Unknown refresh rate. This might happen if we get a hotplug event for an external display.
// In this case just construct the offset.
ALOGW("Can't find offset for %.2f fps", fps);
- return getPhaseOffsets(fps, static_cast<nsecs_t>(1e9f / fps));
+ return constructOffsets(static_cast<nsecs_t>(1e9f / fps));
+}
+
+void VsyncConfiguration::initializeOffsets(const std::vector<float>& refreshRates) {
+ for (const auto fps : refreshRates) {
+ mOffsets.emplace(fps, constructOffsets(static_cast<nsecs_t>(1e9f / fps)));
+ }
+}
+
+void VsyncConfiguration::dump(std::string& result) const {
+ const auto [early, earlyGpu, late] = getCurrentConfigs();
+ using base::StringAppendF;
+ StringAppendF(&result,
+ " app phase: %9" PRId64 " ns\t SF phase: %9" PRId64
+ " ns\n"
+ " app duration: %9lld ns\t SF duration: %9lld ns\n"
+ " early app phase: %9" PRId64 " ns\t early SF phase: %9" PRId64
+ " ns\n"
+ " early app duration: %9lld ns\t early SF duration: %9lld ns\n"
+ " GL early app phase: %9" PRId64 " ns\tGL early SF phase: %9" PRId64
+ " ns\n"
+ " GL early app duration: %9lld ns\tGL early SF duration: %9lld ns\n",
+ late.appOffset, late.sfOffset,
+
+ late.appWorkDuration.count(), late.sfWorkDuration.count(),
+
+ early.appOffset, early.sfOffset,
+
+ early.appWorkDuration.count(), early.sfWorkDuration.count(),
+
+ earlyGpu.appOffset, earlyGpu.sfOffset,
+
+ earlyGpu.appWorkDuration.count(), earlyGpu.sfWorkDuration.count());
+}
+
+PhaseOffsets::PhaseOffsets(const scheduler::RefreshRateConfigs& refreshRateConfigs)
+ : PhaseOffsets(getRefreshRatesFromConfigs(refreshRateConfigs),
+ refreshRateConfigs.getCurrentRefreshRate().getFps(),
+ sysprop::vsync_event_phase_offset_ns(1000000),
+ sysprop::vsync_sf_event_phase_offset_ns(1000000),
+ getProperty("debug.sf.early_phase_offset_ns"),
+ getProperty("debug.sf.early_gl_phase_offset_ns"),
+ getProperty("debug.sf.early_app_phase_offset_ns"),
+ getProperty("debug.sf.early_gl_app_phase_offset_ns"),
+ getProperty("debug.sf.high_fps_late_app_phase_offset_ns").value_or(2000000),
+ getProperty("debug.sf.high_fps_late_sf_phase_offset_ns").value_or(1000000),
+ getProperty("debug.sf.high_fps_early_phase_offset_ns"),
+ getProperty("debug.sf.high_fps_early_gl_phase_offset_ns"),
+ getProperty("debug.sf.high_fps_early_app_phase_offset_ns"),
+ getProperty("debug.sf.high_fps_early_gl_app_phase_offset_ns"),
+ // Below defines the threshold when an offset is considered to be negative,
+ // i.e. targeting for the N+2 vsync instead of N+1. This means that: For offset
+ // < threshold, SF wake up (vsync_duration - offset) before HW vsync. For
+ // offset >= threshold, SF wake up (2 * vsync_duration - offset) before HW
+ // vsync.
+ getProperty("debug.sf.phase_offset_threshold_for_next_vsync_ns")
+ .value_or(std::numeric_limits<nsecs_t>::max())) {}
+
+PhaseOffsets::PhaseOffsets(
+ const std::vector<float>& refreshRates, float currentFps, nsecs_t vsyncPhaseOffsetNs,
+ nsecs_t sfVSyncPhaseOffsetNs, std::optional<nsecs_t> earlySfOffsetNs,
+ std::optional<nsecs_t> earlyGpuSfOffsetNs, std::optional<nsecs_t> earlyAppOffsetNs,
+ std::optional<nsecs_t> earlyGpuAppOffsetNs, nsecs_t highFpsVsyncPhaseOffsetNs,
+ nsecs_t highFpsSfVSyncPhaseOffsetNs, std::optional<nsecs_t> highFpsEarlySfOffsetNs,
+ std::optional<nsecs_t> highFpsEarlyGpuSfOffsetNs,
+ std::optional<nsecs_t> highFpsEarlyAppOffsetNs,
+ std::optional<nsecs_t> highFpsEarlyGpuAppOffsetNs, nsecs_t thresholdForNextVsync)
+ : VsyncConfiguration(currentFps),
+ mVSyncPhaseOffsetNs(vsyncPhaseOffsetNs),
+ mSfVSyncPhaseOffsetNs(sfVSyncPhaseOffsetNs),
+ mEarlySfOffsetNs(earlySfOffsetNs),
+ mEarlyGpuSfOffsetNs(earlyGpuSfOffsetNs),
+ mEarlyAppOffsetNs(earlyAppOffsetNs),
+ mEarlyGpuAppOffsetNs(earlyGpuAppOffsetNs),
+ mHighFpsVSyncPhaseOffsetNs(highFpsVsyncPhaseOffsetNs),
+ mHighFpsSfVSyncPhaseOffsetNs(highFpsSfVSyncPhaseOffsetNs),
+ mHighFpsEarlySfOffsetNs(highFpsEarlySfOffsetNs),
+ mHighFpsEarlyGpuSfOffsetNs(highFpsEarlyGpuSfOffsetNs),
+ mHighFpsEarlyAppOffsetNs(highFpsEarlyAppOffsetNs),
+ mHighFpsEarlyGpuAppOffsetNs(highFpsEarlyGpuAppOffsetNs),
+ mThresholdForNextVsync(thresholdForNextVsync) {
+ initializeOffsets(refreshRates);
+}
+
+PhaseOffsets::VsyncConfigSet PhaseOffsets::constructOffsets(nsecs_t vsyncDuration) const {
+ if (vsyncDuration < std::chrono::nanoseconds(15ms).count()) {
+ return getHighFpsOffsets(vsyncDuration);
+ } else {
+ return getDefaultOffsets(vsyncDuration);
+ }
+}
+
+namespace {
+std::chrono::nanoseconds sfOffsetToDuration(nsecs_t sfOffset, nsecs_t vsyncDuration) {
+ return std::chrono::nanoseconds(vsyncDuration - sfOffset);
+}
+
+std::chrono::nanoseconds appOffsetToDuration(nsecs_t appOffset, nsecs_t sfOffset,
+ nsecs_t vsyncDuration) {
+ auto duration = vsyncDuration + (sfOffset - appOffset);
+ if (duration < vsyncDuration) {
+ duration += vsyncDuration;
+ }
+
+ return std::chrono::nanoseconds(duration);
+}
+} // namespace
+
+PhaseOffsets::VsyncConfigSet PhaseOffsets::getDefaultOffsets(nsecs_t vsyncDuration) const {
+ const auto earlySfOffset =
+ mEarlySfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) < mThresholdForNextVsync
+
+ ? mEarlySfOffsetNs.value_or(mSfVSyncPhaseOffsetNs)
+ : mEarlySfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) - vsyncDuration;
+ const auto earlyAppOffset = mEarlyAppOffsetNs.value_or(mVSyncPhaseOffsetNs);
+ const auto earlyGpuSfOffset =
+ mEarlyGpuSfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) < mThresholdForNextVsync
+
+ ? mEarlyGpuSfOffsetNs.value_or(mSfVSyncPhaseOffsetNs)
+ : mEarlyGpuSfOffsetNs.value_or(mSfVSyncPhaseOffsetNs) - vsyncDuration;
+ const auto earlyGpuAppOffset = mEarlyGpuAppOffsetNs.value_or(mVSyncPhaseOffsetNs);
+ const auto lateSfOffset = mSfVSyncPhaseOffsetNs < mThresholdForNextVsync
+ ? mSfVSyncPhaseOffsetNs
+ : mSfVSyncPhaseOffsetNs - vsyncDuration;
+ const auto lateAppOffset = mVSyncPhaseOffsetNs;
+
+ return {
+ .early = {.sfOffset = earlySfOffset,
+ .appOffset = earlyAppOffset,
+ .sfWorkDuration = sfOffsetToDuration(earlySfOffset, vsyncDuration),
+ .appWorkDuration =
+ appOffsetToDuration(earlyAppOffset, earlySfOffset, vsyncDuration)},
+ .earlyGpu = {.sfOffset = earlyGpuSfOffset,
+ .appOffset = earlyGpuAppOffset,
+ .sfWorkDuration = sfOffsetToDuration(earlyGpuSfOffset, vsyncDuration),
+ .appWorkDuration = appOffsetToDuration(earlyGpuAppOffset, earlyGpuSfOffset,
+ vsyncDuration)},
+ .late = {.sfOffset = lateSfOffset,
+ .appOffset = lateAppOffset,
+ .sfWorkDuration = sfOffsetToDuration(lateSfOffset, vsyncDuration),
+ .appWorkDuration =
+ appOffsetToDuration(lateAppOffset, lateSfOffset, vsyncDuration)},
+ };
+}
+
+PhaseOffsets::VsyncConfigSet PhaseOffsets::getHighFpsOffsets(nsecs_t vsyncDuration) const {
+ const auto earlySfOffset =
+ mHighFpsEarlySfOffsetNs.value_or(mHighFpsSfVSyncPhaseOffsetNs) < mThresholdForNextVsync
+ ? mHighFpsEarlySfOffsetNs.value_or(mHighFpsSfVSyncPhaseOffsetNs)
+ : mHighFpsEarlySfOffsetNs.value_or(mHighFpsSfVSyncPhaseOffsetNs) - vsyncDuration;
+ const auto earlyAppOffset = mHighFpsEarlyAppOffsetNs.value_or(mHighFpsVSyncPhaseOffsetNs);
+ const auto earlyGpuSfOffset = mHighFpsEarlyGpuSfOffsetNs.value_or(
+ mHighFpsSfVSyncPhaseOffsetNs) < mThresholdForNextVsync
+
+ ? mHighFpsEarlyGpuSfOffsetNs.value_or(mHighFpsSfVSyncPhaseOffsetNs)
+ : mHighFpsEarlyGpuSfOffsetNs.value_or(mHighFpsSfVSyncPhaseOffsetNs) - vsyncDuration;
+ const auto earlyGpuAppOffset = mHighFpsEarlyGpuAppOffsetNs.value_or(mHighFpsVSyncPhaseOffsetNs);
+ const auto lateSfOffset = mHighFpsSfVSyncPhaseOffsetNs < mThresholdForNextVsync
+ ? mHighFpsSfVSyncPhaseOffsetNs
+ : mHighFpsSfVSyncPhaseOffsetNs - vsyncDuration;
+ const auto lateAppOffset = mHighFpsVSyncPhaseOffsetNs;
+
+ return {
+ .early =
+ {
+ .sfOffset = earlySfOffset,
+ .appOffset = earlyAppOffset,
+ .sfWorkDuration = sfOffsetToDuration(earlySfOffset, vsyncDuration),
+ .appWorkDuration = appOffsetToDuration(earlyAppOffset, earlySfOffset,
+ vsyncDuration),
+ },
+ .earlyGpu =
+ {
+ .sfOffset = earlyGpuSfOffset,
+ .appOffset = earlyGpuAppOffset,
+ .sfWorkDuration = sfOffsetToDuration(earlyGpuSfOffset, vsyncDuration),
+ .appWorkDuration = appOffsetToDuration(earlyGpuAppOffset,
+ earlyGpuSfOffset, vsyncDuration),
+ },
+ .late =
+ {
+ .sfOffset = lateSfOffset,
+ .appOffset = lateAppOffset,
+ .sfWorkDuration = sfOffsetToDuration(lateSfOffset, vsyncDuration),
+ .appWorkDuration =
+ appOffsetToDuration(lateAppOffset, lateSfOffset, vsyncDuration),
+ },
+ };
}
static void validateSysprops() {
@@ -236,123 +291,105 @@
validateProperty("debug.sf.high_fps_early_gl_app_phase_offset_ns");
}
-static nsecs_t sfDurationToOffset(nsecs_t sfDuration, nsecs_t vsyncDuration) {
- return sfDuration == -1 ? 1'000'000 : vsyncDuration - sfDuration % vsyncDuration;
+namespace {
+nsecs_t sfDurationToOffset(std::chrono::nanoseconds sfDuration, nsecs_t vsyncDuration) {
+ return vsyncDuration - sfDuration.count() % vsyncDuration;
}
-static nsecs_t appDurationToOffset(nsecs_t appDuration, nsecs_t sfDuration, nsecs_t vsyncDuration) {
- return sfDuration == -1 ? 1'000'000
- : vsyncDuration - (appDuration + sfDuration) % vsyncDuration;
+nsecs_t appDurationToOffset(std::chrono::nanoseconds appDuration,
+ std::chrono::nanoseconds sfDuration, nsecs_t vsyncDuration) {
+ return vsyncDuration - (appDuration + sfDuration).count() % vsyncDuration;
}
+} // namespace
-PhaseDurations::Offsets PhaseDurations::constructOffsets(nsecs_t vsyncDuration) const {
- return Offsets{
- {
- mSfEarlyDuration < vsyncDuration
- ? sfDurationToOffset(mSfEarlyDuration, vsyncDuration)
- : sfDurationToOffset(mSfEarlyDuration, vsyncDuration) - vsyncDuration,
+WorkDuration::VsyncConfigSet WorkDuration::constructOffsets(nsecs_t vsyncDuration) const {
+ const auto sfDurationFixup = [vsyncDuration](nsecs_t duration) {
+ return duration == -1 ? std::chrono::nanoseconds(vsyncDuration) - 1ms
+ : std::chrono::nanoseconds(duration);
+ };
- appDurationToOffset(mAppEarlyDuration, mSfEarlyDuration, vsyncDuration),
- },
- {
- mSfEarlyGlDuration < vsyncDuration
- ? sfDurationToOffset(mSfEarlyGlDuration, vsyncDuration)
- : sfDurationToOffset(mSfEarlyGlDuration, vsyncDuration) - vsyncDuration,
+ const auto appDurationFixup = [vsyncDuration](nsecs_t duration) {
+ return duration == -1 ? std::chrono::nanoseconds(vsyncDuration)
+ : std::chrono::nanoseconds(duration);
+ };
- appDurationToOffset(mAppEarlyGlDuration, mSfEarlyGlDuration, vsyncDuration),
- },
- {
- mSfDuration < vsyncDuration
- ? sfDurationToOffset(mSfDuration, vsyncDuration)
- : sfDurationToOffset(mSfDuration, vsyncDuration) - vsyncDuration,
+ const auto sfEarlyDuration = sfDurationFixup(mSfEarlyDuration);
+ const auto appEarlyDuration = appDurationFixup(mAppEarlyDuration);
+ const auto sfEarlyGpuDuration = sfDurationFixup(mSfEarlyGpuDuration);
+ const auto appEarlyGpuDuration = appDurationFixup(mAppEarlyGpuDuration);
+ const auto sfDuration = sfDurationFixup(mSfDuration);
+ const auto appDuration = appDurationFixup(mAppDuration);
- appDurationToOffset(mAppDuration, mSfDuration, vsyncDuration),
- },
+ return {
+ .early =
+ {
+
+ .sfOffset = sfEarlyDuration.count() < vsyncDuration
+ ? sfDurationToOffset(sfEarlyDuration, vsyncDuration)
+ : sfDurationToOffset(sfEarlyDuration, vsyncDuration) -
+ vsyncDuration,
+
+ .appOffset = appDurationToOffset(appEarlyDuration, sfEarlyDuration,
+ vsyncDuration),
+
+ .sfWorkDuration = sfEarlyDuration,
+ .appWorkDuration = appEarlyDuration,
+ },
+ .earlyGpu =
+ {
+
+ .sfOffset = sfEarlyGpuDuration.count() < vsyncDuration
+
+ ? sfDurationToOffset(sfEarlyGpuDuration, vsyncDuration)
+ : sfDurationToOffset(sfEarlyGpuDuration, vsyncDuration) -
+ vsyncDuration,
+
+ .appOffset = appDurationToOffset(appEarlyGpuDuration,
+ sfEarlyGpuDuration, vsyncDuration),
+ .sfWorkDuration = sfEarlyGpuDuration,
+ .appWorkDuration = appEarlyGpuDuration,
+ },
+ .late =
+ {
+
+ .sfOffset = sfDuration.count() < vsyncDuration
+
+ ? sfDurationToOffset(sfDuration, vsyncDuration)
+ : sfDurationToOffset(sfDuration, vsyncDuration) - vsyncDuration,
+
+ .appOffset =
+ appDurationToOffset(appDuration, sfDuration, vsyncDuration),
+
+ .sfWorkDuration = sfDuration,
+ .appWorkDuration = appDuration,
+ },
};
}
-std::unordered_map<float, PhaseDurations::Offsets> PhaseDurations::initializeOffsets(
- const std::vector<float>& refreshRates) const {
- std::unordered_map<float, Offsets> offsets;
-
- for (const auto fps : refreshRates) {
- offsets.emplace(fps, constructOffsets(static_cast<nsecs_t>(1e9f / fps)));
- }
- return offsets;
-}
-
-PhaseDurations::PhaseDurations(const scheduler::RefreshRateConfigs& refreshRateConfigs)
- : PhaseDurations(getRefreshRatesFromConfigs(refreshRateConfigs),
- refreshRateConfigs.getCurrentRefreshRate().getFps(),
- getProperty("debug.sf.late.sf.duration").value_or(-1),
- getProperty("debug.sf.late.app.duration").value_or(-1),
- getProperty("debug.sf.early.sf.duration").value_or(mSfDuration),
- getProperty("debug.sf.early.app.duration").value_or(mAppDuration),
- getProperty("debug.sf.earlyGl.sf.duration").value_or(mSfDuration),
- getProperty("debug.sf.earlyGl.app.duration").value_or(mAppDuration)) {
+WorkDuration::WorkDuration(const scheduler::RefreshRateConfigs& refreshRateConfigs)
+ : WorkDuration(getRefreshRatesFromConfigs(refreshRateConfigs),
+ refreshRateConfigs.getCurrentRefreshRate().getFps(),
+ getProperty("debug.sf.late.sf.duration").value_or(-1),
+ getProperty("debug.sf.late.app.duration").value_or(-1),
+ getProperty("debug.sf.early.sf.duration").value_or(mSfDuration),
+ getProperty("debug.sf.early.app.duration").value_or(mAppDuration),
+ getProperty("debug.sf.earlyGl.sf.duration").value_or(mSfDuration),
+ getProperty("debug.sf.earlyGl.app.duration").value_or(mAppDuration)) {
validateSysprops();
}
-PhaseDurations::PhaseDurations(const std::vector<float>& refreshRates, float currentFps,
- nsecs_t sfDuration, nsecs_t appDuration, nsecs_t sfEarlyDuration,
- nsecs_t appEarlyDuration, nsecs_t sfEarlyGlDuration,
- nsecs_t appEarlyGlDuration)
- : mSfDuration(sfDuration),
+WorkDuration::WorkDuration(const std::vector<float>& refreshRates, float currentFps,
+ nsecs_t sfDuration, nsecs_t appDuration, nsecs_t sfEarlyDuration,
+ nsecs_t appEarlyDuration, nsecs_t sfEarlyGpuDuration,
+ nsecs_t appEarlyGpuDuration)
+ : VsyncConfiguration(currentFps),
+ mSfDuration(sfDuration),
mAppDuration(appDuration),
mSfEarlyDuration(sfEarlyDuration),
mAppEarlyDuration(appEarlyDuration),
- mSfEarlyGlDuration(sfEarlyGlDuration),
- mAppEarlyGlDuration(appEarlyGlDuration),
- mOffsets(initializeOffsets(refreshRates)),
- mRefreshRateFps(currentFps) {}
-
-PhaseOffsets::Offsets PhaseDurations::getOffsetsForRefreshRate(float fps) const {
- const auto iter = std::find_if(mOffsets.begin(), mOffsets.end(), [=](const auto& candidateFps) {
- return fpsEqualsWithMargin(fps, candidateFps.first);
- });
-
- if (iter != mOffsets.end()) {
- return iter->second;
- }
-
- // Unknown refresh rate. This might happen if we get a hotplug event for an external display.
- // In this case just construct the offset.
- ALOGW("Can't find offset for %.2f fps", fps);
- return constructOffsets(static_cast<nsecs_t>(1e9f / fps));
+ mSfEarlyGpuDuration(sfEarlyGpuDuration),
+ mAppEarlyGpuDuration(appEarlyGpuDuration) {
+ initializeOffsets(refreshRates);
}
-void PhaseDurations::dump(std::string& result) const {
- const auto [early, earlyGl, late] = getCurrentOffsets();
- using base::StringAppendF;
- StringAppendF(&result,
- " app phase: %9" PRId64 " ns\t SF phase: %9" PRId64
- " ns\n"
- " app duration: %9" PRId64 " ns\t SF duration: %9" PRId64
- " ns\n"
- " early app phase: %9" PRId64 " ns\t early SF phase: %9" PRId64
- " ns\n"
- " early app duration: %9" PRId64 " ns\t early SF duration: %9" PRId64
- " ns\n"
- " GL early app phase: %9" PRId64 " ns\tGL early SF phase: %9" PRId64
- " ns\n"
- " GL early app duration: %9" PRId64 " ns\tGL early SF duration: %9" PRId64
- " ns\n",
- late.app,
-
- late.sf,
-
- mAppDuration, mSfDuration,
-
- early.app, early.sf,
-
- mAppEarlyDuration, mSfEarlyDuration,
-
- earlyGl.app,
-
- earlyGl.sf,
-
- mAppEarlyGlDuration, mSfEarlyGlDuration);
-}
-
-} // namespace impl
-} // namespace android::scheduler
+} // namespace android::scheduler::impl