blob: a8e36e37905def91bf1f108ebdcff311238535fe [file] [log] [blame]
John Reck7075c792017-07-05 14:03:43 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "ProfileData.h"
Stan Iliev637ba5e2019-08-16 13:43:08 -040018#include "Properties.h"
John Reck7075c792017-07-05 14:03:43 -070019
20#include <cinttypes>
21
22namespace android {
23namespace uirenderer {
24
25static const char* JANK_TYPE_NAMES[] = {
John Reck1bcacfd2017-11-03 10:12:19 -070026 "Missed Vsync", "High input latency", "Slow UI thread",
John Reck0e486472018-03-19 14:06:16 -070027 "Slow bitmap uploads", "Slow issue draw commands", "Frame deadline missed"};
John Reck7075c792017-07-05 14:03:43 -070028
29// The bucketing algorithm controls so to speak
30// If a frame is <= to this it goes in bucket 0
31static const uint32_t kBucketMinThreshold = 5;
32// If a frame is > this, start counting in increments of 2ms
33static const uint32_t kBucket2msIntervals = 32;
34// If a frame is > this, start counting in increments of 4ms
35static const uint32_t kBucket4msIntervals = 48;
36
37// The interval of the slow frame histogram
38static const uint32_t kSlowFrameBucketIntervalMs = 50;
39// The start point of the slow frame bucket in ms
40static const uint32_t kSlowFrameBucketStartMs = 150;
41
42// This will be called every frame, performance sensitive
43// Uses bit twiddling to avoid branching while achieving the packing desired
44static uint32_t frameCountIndexForFrameTime(nsecs_t frameTime) {
45 uint32_t index = static_cast<uint32_t>(ns2ms(frameTime));
46 // If index > kBucketMinThreshold mask will be 0xFFFFFFFF as a result
47 // of negating 1 (twos compliment, yaay) else mask will be 0
48 uint32_t mask = -(index > kBucketMinThreshold);
49 // If index > threshold, this will essentially perform:
50 // amountAboveThreshold = index - threshold;
51 // index = threshold + (amountAboveThreshold / 2)
52 // However if index is <= this will do nothing. It will underflow, do
53 // a right shift by 0 (no-op), then overflow back to the original value
John Reck1bcacfd2017-11-03 10:12:19 -070054 index = ((index - kBucket4msIntervals) >> (index > kBucket4msIntervals)) + kBucket4msIntervals;
55 index = ((index - kBucket2msIntervals) >> (index > kBucket2msIntervals)) + kBucket2msIntervals;
John Reck7075c792017-07-05 14:03:43 -070056 // If index was < minThreshold at the start of all this it's going to
57 // be a pretty garbage value right now. However, mask is 0 so we'll end
58 // up with the desired result of 0.
59 index = (index - kBucketMinThreshold) & mask;
60 return index;
61}
62
63// Only called when dumping stats, less performance sensitive
64uint32_t ProfileData::frameTimeForFrameCountIndex(uint32_t index) {
65 index = index + kBucketMinThreshold;
66 if (index > kBucket2msIntervals) {
67 index += (index - kBucket2msIntervals);
68 }
69 if (index > kBucket4msIntervals) {
70 // This works because it was already doubled by the above if
71 // 1 is added to shift slightly more towards the middle of the bucket
72 index += (index - kBucket4msIntervals) + 1;
73 }
74 return index;
75}
76
77uint32_t ProfileData::frameTimeForSlowFrameCountIndex(uint32_t index) {
78 return (index * kSlowFrameBucketIntervalMs) + kSlowFrameBucketStartMs;
79}
80
81void ProfileData::mergeWith(const ProfileData& other) {
82 // Make sure we don't overflow Just In Case
83 uint32_t divider = 0;
84 if (mTotalFrameCount > (1 << 24)) {
85 divider = 4;
86 }
87 for (size_t i = 0; i < other.mJankTypeCounts.size(); i++) {
88 mJankTypeCounts[i] >>= divider;
89 mJankTypeCounts[i] += other.mJankTypeCounts[i];
90 }
91 for (size_t i = 0; i < other.mFrameCounts.size(); i++) {
92 mFrameCounts[i] >>= divider;
93 mFrameCounts[i] += other.mFrameCounts[i];
94 }
95 mJankFrameCount >>= divider;
96 mJankFrameCount += other.mJankFrameCount;
97 mTotalFrameCount >>= divider;
98 mTotalFrameCount += other.mTotalFrameCount;
John Reck1bcacfd2017-11-03 10:12:19 -070099 if (mStatStartTime > other.mStatStartTime || mStatStartTime == 0) {
John Reck7075c792017-07-05 14:03:43 -0700100 mStatStartTime = other.mStatStartTime;
101 }
Stan Iliev7203e1f2019-07-25 13:12:02 -0400102 for (size_t i = 0; i < other.mGPUFrameCounts.size(); i++) {
103 mGPUFrameCounts[i] >>= divider;
104 mGPUFrameCounts[i] += other.mGPUFrameCounts[i];
105 }
Stan Iliev637ba5e2019-08-16 13:43:08 -0400106 mPipelineType = other.mPipelineType;
John Reck7075c792017-07-05 14:03:43 -0700107}
108
109void ProfileData::dump(int fd) const {
110 dprintf(fd, "\nStats since: %" PRIu64 "ns", mStatStartTime);
111 dprintf(fd, "\nTotal frames rendered: %u", mTotalFrameCount);
112 dprintf(fd, "\nJanky frames: %u (%.2f%%)", mJankFrameCount,
John Recke170fb62018-05-07 08:12:07 -0700113 mTotalFrameCount == 0 ? 0.0f
114 : (float)mJankFrameCount / (float)mTotalFrameCount * 100.0f);
John Reck7075c792017-07-05 14:03:43 -0700115 dprintf(fd, "\n50th percentile: %ums", findPercentile(50));
116 dprintf(fd, "\n90th percentile: %ums", findPercentile(90));
117 dprintf(fd, "\n95th percentile: %ums", findPercentile(95));
118 dprintf(fd, "\n99th percentile: %ums", findPercentile(99));
119 for (int i = 0; i < NUM_BUCKETS; i++) {
120 dprintf(fd, "\nNumber %s: %u", JANK_TYPE_NAMES[i], mJankTypeCounts[i]);
121 }
122 dprintf(fd, "\nHISTOGRAM:");
123 histogramForEach([fd](HistogramEntry entry) {
124 dprintf(fd, " %ums=%u", entry.renderTimeMs, entry.frameCount);
125 });
Stan Iliev7203e1f2019-07-25 13:12:02 -0400126 dprintf(fd, "\n50th gpu percentile: %ums", findGPUPercentile(50));
127 dprintf(fd, "\n90th gpu percentile: %ums", findGPUPercentile(90));
128 dprintf(fd, "\n95th gpu percentile: %ums", findGPUPercentile(95));
129 dprintf(fd, "\n99th gpu percentile: %ums", findGPUPercentile(99));
130 dprintf(fd, "\nGPU HISTOGRAM:");
131 histogramGPUForEach([fd](HistogramEntry entry) {
132 dprintf(fd, " %ums=%u", entry.renderTimeMs, entry.frameCount);
133 });
John Reck7075c792017-07-05 14:03:43 -0700134}
135
136uint32_t ProfileData::findPercentile(int percentile) const {
137 int pos = percentile * mTotalFrameCount / 100;
138 int remaining = mTotalFrameCount - pos;
139 for (int i = mSlowFrameCounts.size() - 1; i >= 0; i--) {
140 remaining -= mSlowFrameCounts[i];
141 if (remaining <= 0) {
142 return (i * kSlowFrameBucketIntervalMs) + kSlowFrameBucketStartMs;
143 }
144 }
145 for (int i = mFrameCounts.size() - 1; i >= 0; i--) {
146 remaining -= mFrameCounts[i];
147 if (remaining <= 0) {
148 return frameTimeForFrameCountIndex(i);
149 }
150 }
151 return 0;
152}
153
154void ProfileData::reset() {
155 mJankTypeCounts.fill(0);
156 mFrameCounts.fill(0);
Stan Iliev7203e1f2019-07-25 13:12:02 -0400157 mGPUFrameCounts.fill(0);
John Reck7075c792017-07-05 14:03:43 -0700158 mSlowFrameCounts.fill(0);
159 mTotalFrameCount = 0;
160 mJankFrameCount = 0;
Jerome Gaillarde218c692019-06-14 12:58:57 +0100161 mStatStartTime = systemTime(SYSTEM_TIME_MONOTONIC);
Stan Iliev637ba5e2019-08-16 13:43:08 -0400162 mPipelineType = Properties::getRenderPipelineType();
John Reck7075c792017-07-05 14:03:43 -0700163}
164
165void ProfileData::reportFrame(int64_t duration) {
166 mTotalFrameCount++;
167 uint32_t framebucket = frameCountIndexForFrameTime(duration);
168 if (framebucket <= mFrameCounts.size()) {
169 mFrameCounts[framebucket]++;
170 } else {
171 framebucket = (ns2ms(duration) - kSlowFrameBucketStartMs) / kSlowFrameBucketIntervalMs;
172 framebucket = std::min(framebucket, static_cast<uint32_t>(mSlowFrameCounts.size() - 1));
173 mSlowFrameCounts[framebucket]++;
174 }
175}
176
177void ProfileData::histogramForEach(const std::function<void(HistogramEntry)>& callback) const {
178 for (size_t i = 0; i < mFrameCounts.size(); i++) {
179 callback(HistogramEntry{frameTimeForFrameCountIndex(i), mFrameCounts[i]});
180 }
181 for (size_t i = 0; i < mSlowFrameCounts.size(); i++) {
182 callback(HistogramEntry{frameTimeForSlowFrameCountIndex(i), mSlowFrameCounts[i]});
183 }
184}
185
Stan Iliev7203e1f2019-07-25 13:12:02 -0400186uint32_t ProfileData::findGPUPercentile(int percentile) const {
187 uint32_t totalGPUFrameCount = 0; // this is usually mTotalFrameCount - 3.
188 for (int i = mGPUFrameCounts.size() - 1; i >= 0; i--) {
189 totalGPUFrameCount += mGPUFrameCounts[i];
190 }
191 int pos = percentile * totalGPUFrameCount / 100;
192 int remaining = totalGPUFrameCount - pos;
193 for (int i = mGPUFrameCounts.size() - 1; i >= 0; i--) {
194 remaining -= mGPUFrameCounts[i];
195 if (remaining <= 0) {
196 return GPUFrameTimeForFrameCountIndex(i);
197 }
198 }
199 return 0;
200}
201
202uint32_t ProfileData::GPUFrameTimeForFrameCountIndex(uint32_t index) {
203 return index != 25 ? index + 1 : 4950;
204}
205
206void ProfileData::reportGPUFrame(int64_t duration) {
207 uint32_t index = static_cast<uint32_t>(ns2ms(duration));
208 if (index > 25) {
209 index = 25;
210 }
211
212 mGPUFrameCounts[index]++;
213}
214
215void ProfileData::histogramGPUForEach(const std::function<void(HistogramEntry)>& callback) const {
216 for (size_t i = 0; i < mGPUFrameCounts.size(); i++) {
217 callback(HistogramEntry{GPUFrameTimeForFrameCountIndex(i), mGPUFrameCounts[i]});
218 }
219}
220
John Reck7075c792017-07-05 14:03:43 -0700221} /* namespace uirenderer */
222} /* namespace android */