blob: bb71bf3c28e86926faf069f226d2c8d637816530 [file] [log] [blame]
ynwang62cb3722016-06-17 14:30:48 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Jin Qiand691d6e2017-09-28 16:02:22 -070017#include <chrono>
ynwang62cb3722016-06-17 14:30:48 -070018#include <deque>
19#include <fcntl.h>
20#include <random>
21#include <string.h>
22#include <stdio.h>
23#include <sys/stat.h>
24#include <unistd.h>
25
26#include <gtest/gtest.h>
ynwang62cb3722016-06-17 14:30:48 -070027
Yifan Hong1407fd02021-12-06 23:04:12 -080028#include <aidl/android/hardware/health/IHealth.h>
Yifan Hongc4b46e02018-01-16 15:49:08 -080029#include <healthhalutils/HealthHalUtils.h>
ynwang62cb3722016-06-17 14:30:48 -070030#include <storaged.h> // data structures
31#include <storaged_utils.h> // functions to test
32
33#define MMC_DISK_STATS_PATH "/sys/block/mmcblk0/stat"
34#define SDA_DISK_STATS_PATH "/sys/block/sda/stat"
ynwang62cb3722016-06-17 14:30:48 -070035
Jin Qiand691d6e2017-09-28 16:02:22 -070036using namespace std;
37using namespace chrono;
Jin Qian6df3bc62017-10-18 17:52:14 -070038using namespace storaged_proto;
Jin Qiand691d6e2017-09-28 16:02:22 -070039
Jin Qian65dea712017-08-29 16:48:20 -070040namespace {
41
42void write_and_pause(uint32_t sec) {
ynwang62cb3722016-06-17 14:30:48 -070043 const char* path = "/cache/test";
George Burgess IV151ea522017-02-03 14:45:47 -080044 int fd = open(path, O_WRONLY | O_CREAT, 0600);
ynwang62cb3722016-06-17 14:30:48 -070045 ASSERT_LT(-1, fd);
46 char buffer[2048];
47 memset(buffer, 1, sizeof(buffer));
48 int loop_size = 100;
49 for (int i = 0; i < loop_size; ++i) {
50 ASSERT_EQ(2048, write(fd, buffer, sizeof(buffer)));
51 }
52 fsync(fd);
53 close(fd);
54
55 fd = open(path, O_RDONLY);
56 ASSERT_LT(-1, fd);
57 for (int i = 0; i < loop_size; ++i) {
58 ASSERT_EQ(2048, read(fd, buffer, sizeof(buffer)));
59 }
60 close(fd);
61
62 sleep(sec);
63}
64
Jin Qian65dea712017-08-29 16:48:20 -070065} // namespace
66
ynwang62cb3722016-06-17 14:30:48 -070067// the return values of the tested functions should be the expected ones
Yifan Honge8474c22021-12-06 22:44:35 -080068const char* get_disk_stats_path() {
69 if (access(MMC_DISK_STATS_PATH, R_OK) >= 0) {
70 return MMC_DISK_STATS_PATH;
71 } else if (access(SDA_DISK_STATS_PATH, R_OK) >= 0) {
72 return SDA_DISK_STATS_PATH;
73 } else {
74 return nullptr;
75 }
76}
ynwang62cb3722016-06-17 14:30:48 -070077TEST(storaged_test, retvals) {
78 struct disk_stats stats;
ynwang62cb3722016-06-17 14:30:48 -070079 memset(&stats, 0, sizeof(struct disk_stats));
ynwang62cb3722016-06-17 14:30:48 -070080
Yifan Honge8474c22021-12-06 22:44:35 -080081 auto disk_stats_path = get_disk_stats_path();
82 if (disk_stats_path == nullptr) GTEST_SKIP();
ynwang62cb3722016-06-17 14:30:48 -070083
Yifan Honge8474c22021-12-06 22:44:35 -080084 EXPECT_TRUE(parse_disk_stats(disk_stats_path, &stats));
ynwang62cb3722016-06-17 14:30:48 -070085
86 struct disk_stats old_stats;
87 memset(&old_stats, 0, sizeof(struct disk_stats));
88 old_stats = stats;
89
90 const char wrong_path[] = "/this/is/wrong";
91 EXPECT_FALSE(parse_disk_stats(wrong_path, &stats));
92
93 // reading a wrong path should not damage the output structure
Jin Qian65dea712017-08-29 16:48:20 -070094 EXPECT_EQ(stats, old_stats);
ynwang62cb3722016-06-17 14:30:48 -070095}
96
97TEST(storaged_test, disk_stats) {
Jin Qian65dea712017-08-29 16:48:20 -070098 struct disk_stats stats = {};
Yifan Honge8474c22021-12-06 22:44:35 -080099 auto disk_stats_path = get_disk_stats_path();
100 if (disk_stats_path == nullptr) GTEST_SKIP();
101 ASSERT_TRUE(parse_disk_stats(disk_stats_path, &stats));
ynwang62cb3722016-06-17 14:30:48 -0700102
103 // every entry of stats (except io_in_flight) should all be greater than 0
104 for (uint i = 0; i < DISK_STATS_SIZE; ++i) {
105 if (i == 8) continue; // skip io_in_flight which can be 0
106 EXPECT_LT((uint64_t)0, *((uint64_t*)&stats + i));
107 }
108
109 // accumulation of the increments should be the same with the overall increment
Jin Qian65dea712017-08-29 16:48:20 -0700110 struct disk_stats base = {}, tmp = {}, curr, acc = {}, inc[5];
ynwang62cb3722016-06-17 14:30:48 -0700111 for (uint i = 0; i < 5; ++i) {
Yifan Honge8474c22021-12-06 22:44:35 -0800112 ASSERT_TRUE(parse_disk_stats(disk_stats_path, &curr));
ynwang62cb3722016-06-17 14:30:48 -0700113 if (i == 0) {
114 base = curr;
115 tmp = curr;
116 sleep(5);
117 continue;
118 }
Jin Qian65dea712017-08-29 16:48:20 -0700119 get_inc_disk_stats(&tmp, &curr, &inc[i]);
ynwang62cb3722016-06-17 14:30:48 -0700120 add_disk_stats(&inc[i], &acc);
121 tmp = curr;
Jin Qian65dea712017-08-29 16:48:20 -0700122 write_and_pause(5);
ynwang62cb3722016-06-17 14:30:48 -0700123 }
Jin Qian65dea712017-08-29 16:48:20 -0700124 struct disk_stats overall_inc = {};
125 get_inc_disk_stats(&base, &curr, &overall_inc);
ynwang62cb3722016-06-17 14:30:48 -0700126
Jin Qian65dea712017-08-29 16:48:20 -0700127 EXPECT_EQ(overall_inc, acc);
ynwang62cb3722016-06-17 14:30:48 -0700128}
129
Jin Qian65dea712017-08-29 16:48:20 -0700130double mean(std::deque<uint32_t> nums) {
ynwang62cb3722016-06-17 14:30:48 -0700131 double sum = 0.0;
132 for (uint32_t i : nums) {
133 sum += i;
134 }
135 return sum / nums.size();
136}
137
Jin Qian65dea712017-08-29 16:48:20 -0700138double standard_deviation(std::deque<uint32_t> nums) {
ynwang62cb3722016-06-17 14:30:48 -0700139 double sum = 0.0;
140 double avg = mean(nums);
141 for (uint32_t i : nums) {
142 sum += ((double)i - avg) * ((double)i - avg);
143 }
144 return sqrt(sum / nums.size());
145}
146
147TEST(storaged_test, stream_stats) {
148 // 100 random numbers
149 std::vector<uint32_t> data = {8147,9058,1270,9134,6324,975,2785,5469,9575,9649,1576,9706,9572,4854,8003,1419,4218,9157,7922,9595,6557,357,8491,9340,6787,7577,7431,3922,6555,1712,7060,318,2769,462,971,8235,6948,3171,9502,344,4387,3816,7655,7952,1869,4898,4456,6463,7094,7547,2760,6797,6551,1626,1190,4984,9597,3404,5853,2238,7513,2551,5060,6991,8909,9593,5472,1386,1493,2575,8407,2543,8143,2435,9293,3500,1966,2511,6160,4733,3517,8308,5853,5497,9172,2858,7572,7537,3804,5678,759,540,5308,7792,9340,1299,5688,4694,119,3371};
150 std::deque<uint32_t> test_data;
151 stream_stats sstats;
152 for (uint32_t i : data) {
153 test_data.push_back(i);
154 sstats.add(i);
155
156 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats.get_std());
157 EXPECT_EQ((int)mean(test_data), (int)sstats.get_mean());
158 }
159
160 for (uint32_t i : data) {
161 test_data.pop_front();
162 sstats.evict(i);
163
164 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats.get_std());
165 EXPECT_EQ((int)mean(test_data), (int)sstats.get_mean());
166 }
167
168 // some real data
169 std::vector<uint32_t> another_data = {113875,81620,103145,28327,86855,207414,96526,52567,28553,250311};
170 test_data.clear();
171 uint32_t window_size = 2;
172 uint32_t idx;
173 stream_stats sstats1;
174 for (idx = 0; idx < window_size; ++idx) {
175 test_data.push_back(another_data[idx]);
176 sstats1.add(another_data[idx]);
177 }
178 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats1.get_std());
179 EXPECT_EQ((int)mean(test_data), (int)sstats1.get_mean());
180 for (;idx < another_data.size(); ++idx) {
181 test_data.pop_front();
182 sstats1.evict(another_data[idx - window_size]);
183 test_data.push_back(another_data[idx]);
184 sstats1.add(another_data[idx]);
185 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats1.get_std());
186 EXPECT_EQ((int)mean(test_data), (int)sstats1.get_mean());
187 }
188}
189
Jin Qian65dea712017-08-29 16:48:20 -0700190struct disk_perf disk_perf_multiply(struct disk_perf perf, double mul) {
ynwang62cb3722016-06-17 14:30:48 -0700191 struct disk_perf retval;
192 retval.read_perf = (double)perf.read_perf * mul;
193 retval.read_ios = (double)perf.read_ios * mul;
194 retval.write_perf = (double)perf.write_perf * mul;
195 retval.write_ios = (double)perf.write_ios * mul;
196 retval.queue = (double)perf.queue * mul;
197
198 return retval;
199}
200
Jin Qian65dea712017-08-29 16:48:20 -0700201struct disk_stats disk_stats_add(struct disk_stats stats1, struct disk_stats stats2) {
ynwang62cb3722016-06-17 14:30:48 -0700202 struct disk_stats retval;
203 retval.read_ios = stats1.read_ios + stats2.read_ios;
204 retval.read_merges = stats1.read_merges + stats2.read_merges;
205 retval.read_sectors = stats1.read_sectors + stats2.read_sectors;
206 retval.read_ticks = stats1.read_ticks + stats2.read_ticks;
207 retval.write_ios = stats1.write_ios + stats2.write_ios;
208 retval.write_merges = stats1.write_merges + stats2.write_merges;
209 retval.write_sectors = stats1.write_sectors + stats2.write_sectors;
210 retval.write_ticks = stats1.write_ticks + stats2.write_ticks;
211 retval.io_in_flight = stats1.io_in_flight + stats2.io_in_flight;
212 retval.io_ticks = stats1.io_ticks + stats2.io_ticks;
213 retval.io_in_queue = stats1.io_in_queue + stats2.io_in_queue;
214 retval.end_time = stats1.end_time + stats2.end_time;
215
216 return retval;
217}
218
Jin Qian65dea712017-08-29 16:48:20 -0700219void expect_increasing(struct disk_stats stats1, struct disk_stats stats2) {
220 EXPECT_LE(stats1.read_ios, stats2.read_ios);
221 EXPECT_LE(stats1.read_merges, stats2.read_merges);
222 EXPECT_LE(stats1.read_sectors, stats2.read_sectors);
223 EXPECT_LE(stats1.read_ticks, stats2.read_ticks);
224 EXPECT_LE(stats1.write_ios, stats2.write_ios);
225 EXPECT_LE(stats1.write_merges, stats2.write_merges);
226 EXPECT_LE(stats1.write_sectors, stats2.write_sectors);
227 EXPECT_LE(stats1.write_ticks, stats2.write_ticks);
228 EXPECT_LE(stats1.io_ticks, stats2.io_ticks);
229 EXPECT_LE(stats1.io_in_queue, stats2.io_in_queue);
230
231 EXPECT_TRUE(stats1.read_ios < stats2.read_ios ||
232 stats1.read_merges < stats2.read_merges ||
233 stats1.read_sectors < stats2.read_sectors ||
234 stats1.read_ticks < stats2.read_ticks ||
235 stats1.write_ios < stats2.write_ios ||
236 stats1.write_merges < stats2.write_merges ||
237 stats1.write_sectors < stats2.write_sectors ||
238 stats1.write_ticks < stats2.write_ticks ||
239 stats1.io_ticks < stats2.io_ticks ||
240 stats1.io_in_queue < stats2.io_in_queue);
241}
242
ynwang62cb3722016-06-17 14:30:48 -0700243TEST(storaged_test, disk_stats_monitor) {
Yifan Hong5291a672021-12-06 14:29:40 -0800244 auto [healthService, hidlHealth] = HealthServicePair::get();
Yifan Hongc4b46e02018-01-16 15:49:08 -0800245
ynwang62cb3722016-06-17 14:30:48 -0700246 // asserting that there is one file for diskstats
Yifan Hongc4b46e02018-01-16 15:49:08 -0800247 ASSERT_TRUE(healthService != nullptr || access(MMC_DISK_STATS_PATH, R_OK) >= 0 ||
248 access(SDA_DISK_STATS_PATH, R_OK) >= 0);
249
ynwang62cb3722016-06-17 14:30:48 -0700250 // testing if detect() will return the right value
Yifan Hongc4b46e02018-01-16 15:49:08 -0800251 disk_stats_monitor dsm_detect{healthService};
252 ASSERT_TRUE(dsm_detect.enabled());
Yifan Hong1407fd02021-12-06 23:04:12 -0800253
254 // Even if enabled(), healthService may not support disk stats. Check if it is supported.
255 std::vector<aidl::android::hardware::health::DiskStats> halStats;
256 if (healthService->getDiskStats(&halStats).getExceptionCode() == EX_UNSUPPORTED_OPERATION) {
257 GTEST_SKIP();
258 }
259
ynwang62cb3722016-06-17 14:30:48 -0700260 // feed monitor with constant perf data for io perf baseline
261 // using constant perf is reasonable since the functionality of stream_stats
262 // has already been tested
263 struct disk_perf norm_perf = {
264 .read_perf = 10 * 1024,
265 .read_ios = 50,
266 .write_perf = 5 * 1024,
267 .write_ios = 25,
268 .queue = 5
269 };
270
271 std::random_device rd;
272 std::mt19937 gen(rd());
273 std::uniform_real_distribution<> rand(0.8, 1.2);
274
275 for (uint i = 0; i < dsm_detect.mWindow; ++i) {
276 struct disk_perf perf = disk_perf_multiply(norm_perf, rand(gen));
277
278 dsm_detect.add(&perf);
279 dsm_detect.mBuffer.push(perf);
280 EXPECT_EQ(dsm_detect.mBuffer.size(), (uint64_t)i + 1);
281 }
282
283 dsm_detect.mValid = true;
284 dsm_detect.update_mean();
285 dsm_detect.update_std();
286
287 for (double i = 0; i < 2 * dsm_detect.mSigma; i += 0.5) {
288 struct disk_perf test_perf;
289 struct disk_perf test_mean = dsm_detect.mMean;
290 struct disk_perf test_std = dsm_detect.mStd;
291
292 test_perf.read_perf = (double)test_mean.read_perf - i * test_std.read_perf;
293 test_perf.read_ios = (double)test_mean.read_ios - i * test_std.read_ios;
294 test_perf.write_perf = (double)test_mean.write_perf - i * test_std.write_perf;
295 test_perf.write_ios = (double)test_mean.write_ios - i * test_std.write_ios;
296 test_perf.queue = (double)test_mean.queue + i * test_std.queue;
297
298 EXPECT_EQ((i > dsm_detect.mSigma), dsm_detect.detect(&test_perf));
299 }
300
301 // testing if stalled disk_stats can be correctly accumulated in the monitor
Yifan Hongc4b46e02018-01-16 15:49:08 -0800302 disk_stats_monitor dsm_acc{healthService};
ynwang62cb3722016-06-17 14:30:48 -0700303 struct disk_stats norm_inc = {
304 .read_ios = 200,
305 .read_merges = 0,
306 .read_sectors = 200,
307 .read_ticks = 200,
308 .write_ios = 100,
309 .write_merges = 0,
310 .write_sectors = 100,
311 .write_ticks = 100,
312 .io_in_flight = 0,
313 .io_ticks = 600,
314 .io_in_queue = 300,
315 .start_time = 0,
316 .end_time = 100,
317 .counter = 0,
318 .io_avg = 0
319 };
320
321 struct disk_stats stall_inc = {
322 .read_ios = 200,
323 .read_merges = 0,
324 .read_sectors = 20,
325 .read_ticks = 200,
326 .write_ios = 100,
327 .write_merges = 0,
328 .write_sectors = 10,
329 .write_ticks = 100,
330 .io_in_flight = 0,
331 .io_ticks = 600,
332 .io_in_queue = 1200,
333 .start_time = 0,
334 .end_time = 100,
335 .counter = 0,
336 .io_avg = 0
337 };
338
Jin Qian65dea712017-08-29 16:48:20 -0700339 struct disk_stats stats_base = {};
ynwang62cb3722016-06-17 14:30:48 -0700340 int loop_size = 100;
341 for (int i = 0; i < loop_size; ++i) {
342 stats_base = disk_stats_add(stats_base, norm_inc);
343 dsm_acc.update(&stats_base);
Jin Qian65dea712017-08-29 16:48:20 -0700344 EXPECT_EQ(dsm_acc.mValid, (uint32_t)i >= dsm_acc.mWindow);
ynwang62cb3722016-06-17 14:30:48 -0700345 EXPECT_FALSE(dsm_acc.mStall);
346 }
347
348 stats_base = disk_stats_add(stats_base, stall_inc);
349 dsm_acc.update(&stats_base);
350 EXPECT_TRUE(dsm_acc.mValid);
351 EXPECT_TRUE(dsm_acc.mStall);
352
353 for (int i = 0; i < 10; ++i) {
354 stats_base = disk_stats_add(stats_base, norm_inc);
355 dsm_acc.update(&stats_base);
356 EXPECT_TRUE(dsm_acc.mValid);
357 EXPECT_FALSE(dsm_acc.mStall);
358 }
ynwang62cb3722016-06-17 14:30:48 -0700359
Jin Qian65dea712017-08-29 16:48:20 -0700360 struct disk_stats stats_prev = {};
361 loop_size = 10;
362 write_and_pause(5);
363 for (int i = 0; i < loop_size; ++i) {
364 dsm_detect.update();
365 expect_increasing(stats_prev, dsm_detect.mPrevious);
366 stats_prev = dsm_detect.mPrevious;
367 write_and_pause(5);
ynwang62cb3722016-06-17 14:30:48 -0700368 }
369}
Jin Qiand691d6e2017-09-28 16:02:22 -0700370
371TEST(storaged_test, storage_info_t) {
372 storage_info_t si;
373 time_point<steady_clock> tp;
374 time_point<system_clock> stp;
375
376 // generate perf history [least_recent ------> most recent]
377 // day 1: 5, 10, 15, 20 | daily average 12
378 // day 2: 25, 30, 35, 40, 45 | daily average 35
379 // day 3: 50, 55, 60, 65, 70 | daily average 60
380 // day 4: 75, 80, 85, 90, 95 | daily average 85
381 // day 5: 100, 105, 110, 115, | daily average 107
382 // day 6: 120, 125, 130, 135, 140 | daily average 130
383 // day 7: 145, 150, 155, 160, 165 | daily average 155
384 // end of week 1: | weekly average 83
385 // day 1: 170, 175, 180, 185, 190 | daily average 180
386 // day 2: 195, 200, 205, 210, 215 | daily average 205
387 // day 3: 220, 225, 230, 235 | daily average 227
388 // day 4: 240, 245, 250, 255, 260 | daily average 250
389 // day 5: 265, 270, 275, 280, 285 | daily average 275
390 // day 6: 290, 295, 300, 305, 310 | daily average 300
391 // day 7: 315, 320, 325, 330, 335 | daily average 325
392 // end of week 2: | weekly average 251
393 // day 1: 340, 345, 350, 355 | daily average 347
394 // day 2: 360, 365, 370, 375
395 si.day_start_tp = {};
396 for (int i = 0; i < 75; i++) {
397 tp += hours(5);
398 stp = {};
Jin Qian6df3bc62017-10-18 17:52:14 -0700399 stp += duration_cast<chrono::seconds>(tp.time_since_epoch());
Jin Qiand691d6e2017-09-28 16:02:22 -0700400 si.update_perf_history((i + 1) * 5, stp);
401 }
402
Jin Qianb049d182017-10-12 17:02:17 -0700403 vector<int> history = si.get_perf_history();
404 EXPECT_EQ(history.size(), 66UL);
405 size_t i = 0;
406 EXPECT_EQ(history[i++], 4);
407 EXPECT_EQ(history[i++], 7); // 7 days
408 EXPECT_EQ(history[i++], 52); // 52 weeks
Jin Qiand691d6e2017-09-28 16:02:22 -0700409 // last 24 hours
Jin Qianb049d182017-10-12 17:02:17 -0700410 EXPECT_EQ(history[i++], 375);
411 EXPECT_EQ(history[i++], 370);
412 EXPECT_EQ(history[i++], 365);
413 EXPECT_EQ(history[i++], 360);
Jin Qiand691d6e2017-09-28 16:02:22 -0700414 // daily average of last 7 days
Jin Qianb049d182017-10-12 17:02:17 -0700415 EXPECT_EQ(history[i++], 347);
416 EXPECT_EQ(history[i++], 325);
417 EXPECT_EQ(history[i++], 300);
418 EXPECT_EQ(history[i++], 275);
419 EXPECT_EQ(history[i++], 250);
420 EXPECT_EQ(history[i++], 227);
421 EXPECT_EQ(history[i++], 205);
Jin Qiand691d6e2017-09-28 16:02:22 -0700422 // weekly average of last 52 weeks
Jin Qianb049d182017-10-12 17:02:17 -0700423 EXPECT_EQ(history[i++], 251);
424 EXPECT_EQ(history[i++], 83);
425 for (; i < history.size(); i++) {
426 EXPECT_EQ(history[i], 0);
Jin Qiand691d6e2017-09-28 16:02:22 -0700427 }
428}
Jin Qian6df3bc62017-10-18 17:52:14 -0700429
David Anderson7d74a5a2018-03-26 15:15:05 -0700430TEST(storaged_test, storage_info_t_proto) {
431 storage_info_t si;
432 si.day_start_tp = {};
433
434 IOPerfHistory proto;
435 proto.set_nr_samples(10);
436 proto.set_day_start_sec(0);
437 si.load_perf_history_proto(proto);
438
439 // Skip ahead > 1 day, with no data points in the previous day.
440 time_point<system_clock> stp;
441 stp += hours(36);
442 si.update_perf_history(100, stp);
443
444 vector<int> history = si.get_perf_history();
445 EXPECT_EQ(history.size(), 63UL);
446 EXPECT_EQ(history[0], 1);
447 EXPECT_EQ(history[1], 7);
448 EXPECT_EQ(history[2], 52);
449 EXPECT_EQ(history[3], 100);
450 for (size_t i = 4; i < history.size(); i++) {
451 EXPECT_EQ(history[i], 0);
452 }
453}
454
Jin Qian6df3bc62017-10-18 17:52:14 -0700455TEST(storaged_test, uid_monitor) {
456 uid_monitor uidm;
David Anderson3488d9f2018-07-26 13:20:26 -0700457 auto& io_history = uidm.io_history();
Jin Qian6df3bc62017-10-18 17:52:14 -0700458
David Anderson3488d9f2018-07-26 13:20:26 -0700459 io_history[200] = {
Jin Qian6df3bc62017-10-18 17:52:14 -0700460 .start_ts = 100,
461 .entries = {
462 { "app1", {
463 .user_id = 0,
464 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
465 }
466 },
467 { "app2", {
468 .user_id = 0,
469 .uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF] = 1000,
470 }
471 },
472 { "app1", {
473 .user_id = 1,
474 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
475 .uid_ios.bytes[READ][FOREGROUND][CHARGER_ON] = 1000,
476 }
477 },
478 },
479 };
480
David Anderson3488d9f2018-07-26 13:20:26 -0700481 io_history[300] = {
Jin Qian6df3bc62017-10-18 17:52:14 -0700482 .start_ts = 200,
483 .entries = {
484 { "app1", {
485 .user_id = 1,
486 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_OFF] = 1000,
487 }
488 },
489 { "app3", {
490 .user_id = 0,
491 .uid_ios.bytes[READ][BACKGROUND][CHARGER_OFF] = 1000,
492 }
493 },
494 },
495 };
496
Jin Qian6df3bc62017-10-18 17:52:14 -0700497 unordered_map<int, StoragedProto> protos;
Jin Qian6df3bc62017-10-18 17:52:14 -0700498
499 uidm.update_uid_io_proto(&protos);
500
501 EXPECT_EQ(protos.size(), 2U);
502 EXPECT_EQ(protos.count(0), 1UL);
503 EXPECT_EQ(protos.count(1), 1UL);
504
505 EXPECT_EQ(protos[0].uid_io_usage().uid_io_items_size(), 2);
506 const UidIOItem& user_0_item_0 = protos[0].uid_io_usage().uid_io_items(0);
507 EXPECT_EQ(user_0_item_0.end_ts(), 200UL);
508 EXPECT_EQ(user_0_item_0.records().start_ts(), 100UL);
509 EXPECT_EQ(user_0_item_0.records().entries_size(), 2);
510 EXPECT_EQ(user_0_item_0.records().entries(0).uid_name(), "app1");
511 EXPECT_EQ(user_0_item_0.records().entries(0).user_id(), 0UL);
512 EXPECT_EQ(user_0_item_0.records().entries(0).uid_io().wr_fg_chg_on(), 1000UL);
513 EXPECT_EQ(user_0_item_0.records().entries(1).uid_name(), "app2");
514 EXPECT_EQ(user_0_item_0.records().entries(1).user_id(), 0UL);
515 EXPECT_EQ(user_0_item_0.records().entries(1).uid_io().rd_fg_chg_off(), 1000UL);
516 const UidIOItem& user_0_item_1 = protos[0].uid_io_usage().uid_io_items(1);
517 EXPECT_EQ(user_0_item_1.end_ts(), 300UL);
518 EXPECT_EQ(user_0_item_1.records().start_ts(), 200UL);
519 EXPECT_EQ(user_0_item_1.records().entries_size(), 1);
520 EXPECT_EQ(user_0_item_1.records().entries(0).uid_name(), "app3");
521 EXPECT_EQ(user_0_item_1.records().entries(0).user_id(), 0UL);
522 EXPECT_EQ(user_0_item_1.records().entries(0).uid_io().rd_bg_chg_off(), 1000UL);
523
524 EXPECT_EQ(protos[1].uid_io_usage().uid_io_items_size(), 2);
525 const UidIOItem& user_1_item_0 = protos[1].uid_io_usage().uid_io_items(0);
526 EXPECT_EQ(user_1_item_0.end_ts(), 200UL);
527 EXPECT_EQ(user_1_item_0.records().start_ts(), 100UL);
528 EXPECT_EQ(user_1_item_0.records().entries_size(), 1);
529 EXPECT_EQ(user_1_item_0.records().entries(0).uid_name(), "app1");
530 EXPECT_EQ(user_1_item_0.records().entries(0).user_id(), 1UL);
531 EXPECT_EQ(user_1_item_0.records().entries(0).uid_io().rd_fg_chg_on(), 1000UL);
532 EXPECT_EQ(user_1_item_0.records().entries(0).uid_io().wr_fg_chg_on(), 1000UL);
533 const UidIOItem& user_1_item_1 = protos[1].uid_io_usage().uid_io_items(1);
534 EXPECT_EQ(user_1_item_1.end_ts(), 300UL);
535 EXPECT_EQ(user_1_item_1.records().start_ts(), 200UL);
536 EXPECT_EQ(user_1_item_1.records().entries_size(), 1);
537 EXPECT_EQ(user_1_item_1.records().entries(0).uid_name(), "app1");
538 EXPECT_EQ(user_1_item_1.records().entries(0).user_id(), 1UL);
539 EXPECT_EQ(user_1_item_1.records().entries(0).uid_io().wr_fg_chg_off(), 1000UL);
540
David Anderson3488d9f2018-07-26 13:20:26 -0700541 io_history.clear();
Jin Qian6df3bc62017-10-18 17:52:14 -0700542
David Anderson3488d9f2018-07-26 13:20:26 -0700543 io_history[300] = {
Jin Qian6df3bc62017-10-18 17:52:14 -0700544 .start_ts = 200,
545 .entries = {
546 { "app1", {
547 .user_id = 0,
548 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
549 }
550 },
551 },
552 };
553
David Anderson3488d9f2018-07-26 13:20:26 -0700554 io_history[400] = {
Jin Qian6df3bc62017-10-18 17:52:14 -0700555 .start_ts = 300,
556 .entries = {
557 { "app1", {
558 .user_id = 0,
559 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
560 }
561 },
562 },
563 };
564
David Anderson0026a142018-07-26 14:30:35 -0700565 uidm.load_uid_io_proto(0, protos[0].uid_io_usage());
566 uidm.load_uid_io_proto(1, protos[1].uid_io_usage());
Jin Qian6df3bc62017-10-18 17:52:14 -0700567
David Anderson3488d9f2018-07-26 13:20:26 -0700568 EXPECT_EQ(io_history.size(), 3UL);
569 EXPECT_EQ(io_history.count(200), 1UL);
570 EXPECT_EQ(io_history.count(300), 1UL);
571 EXPECT_EQ(io_history.count(400), 1UL);
Jin Qian6df3bc62017-10-18 17:52:14 -0700572
David Anderson3488d9f2018-07-26 13:20:26 -0700573 EXPECT_EQ(io_history[200].start_ts, 100UL);
574 const vector<struct uid_record>& entries_0 = io_history[200].entries;
Jin Qian6df3bc62017-10-18 17:52:14 -0700575 EXPECT_EQ(entries_0.size(), 3UL);
576 EXPECT_EQ(entries_0[0].name, "app1");
577 EXPECT_EQ(entries_0[0].ios.user_id, 0UL);
578 EXPECT_EQ(entries_0[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
579 EXPECT_EQ(entries_0[1].name, "app2");
580 EXPECT_EQ(entries_0[1].ios.user_id, 0UL);
581 EXPECT_EQ(entries_0[1].ios.uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF], 1000UL);
582 EXPECT_EQ(entries_0[2].name, "app1");
583 EXPECT_EQ(entries_0[2].ios.user_id, 1UL);
584 EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
585 EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[READ][FOREGROUND][CHARGER_ON], 1000UL);
586
David Anderson3488d9f2018-07-26 13:20:26 -0700587 EXPECT_EQ(io_history[300].start_ts, 200UL);
588 const vector<struct uid_record>& entries_1 = io_history[300].entries;
Jin Qian6df3bc62017-10-18 17:52:14 -0700589 EXPECT_EQ(entries_1.size(), 3UL);
590 EXPECT_EQ(entries_1[0].name, "app1");
591 EXPECT_EQ(entries_1[0].ios.user_id, 0UL);
592 EXPECT_EQ(entries_1[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
593 EXPECT_EQ(entries_1[1].name, "app3");
594 EXPECT_EQ(entries_1[1].ios.user_id, 0UL);
595 EXPECT_EQ(entries_1[1].ios.uid_ios.bytes[READ][BACKGROUND][CHARGER_OFF], 1000UL);
596 EXPECT_EQ(entries_1[2].name, "app1");
597 EXPECT_EQ(entries_1[2].ios.user_id, 1UL);
598 EXPECT_EQ(entries_1[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_OFF], 1000UL);
599
David Anderson3488d9f2018-07-26 13:20:26 -0700600 EXPECT_EQ(io_history[400].start_ts, 300UL);
601 const vector<struct uid_record>& entries_2 = io_history[400].entries;
Jin Qian6df3bc62017-10-18 17:52:14 -0700602 EXPECT_EQ(entries_2.size(), 1UL);
603 EXPECT_EQ(entries_2[0].name, "app1");
604 EXPECT_EQ(entries_2[0].ios.user_id, 0UL);
605 EXPECT_EQ(entries_2[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
606
607 map<string, io_usage> merged_entries_0 = merge_io_usage(entries_0);
608 EXPECT_EQ(merged_entries_0.size(), 2UL);
609 EXPECT_EQ(merged_entries_0.count("app1"), 1UL);
610 EXPECT_EQ(merged_entries_0.count("app2"), 1UL);
611 EXPECT_EQ(merged_entries_0["app1"].bytes[READ][FOREGROUND][CHARGER_ON], 1000UL);
612 EXPECT_EQ(merged_entries_0["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 2000UL);
613 EXPECT_EQ(merged_entries_0["app2"].bytes[READ][FOREGROUND][CHARGER_OFF], 1000UL);
614
615 map<string, io_usage> merged_entries_1 = merge_io_usage(entries_1);
616 EXPECT_EQ(merged_entries_1.size(), 2UL);
617 EXPECT_EQ(merged_entries_1.count("app1"), 1UL);
618 EXPECT_EQ(merged_entries_1.count("app3"), 1UL);
619 EXPECT_EQ(merged_entries_1["app1"].bytes[WRITE][FOREGROUND][CHARGER_OFF], 1000UL);
620 EXPECT_EQ(merged_entries_1["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
621 EXPECT_EQ(merged_entries_1["app3"].bytes[READ][BACKGROUND][CHARGER_OFF], 1000UL);
622
623 map<string, io_usage> merged_entries_2 = merge_io_usage(entries_2);
624 EXPECT_EQ(merged_entries_2.size(), 1UL);
625 EXPECT_EQ(merged_entries_2.count("app1"), 1UL);
626 EXPECT_EQ(merged_entries_2["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
Jin Qian94b64ef2017-11-09 15:07:18 -0800627
628 uidm.clear_user_history(0);
629
David Andersondec6a882018-07-26 18:01:03 -0700630 EXPECT_EQ(io_history.size(), 2UL);
631 EXPECT_EQ(io_history.count(200), 1UL);
632 EXPECT_EQ(io_history.count(300), 1UL);
Jin Qian94b64ef2017-11-09 15:07:18 -0800633
David Andersondec6a882018-07-26 18:01:03 -0700634 EXPECT_EQ(io_history[200].entries.size(), 1UL);
635 EXPECT_EQ(io_history[300].entries.size(), 1UL);
Jin Qian94b64ef2017-11-09 15:07:18 -0800636
637 uidm.clear_user_history(1);
638
David Andersondec6a882018-07-26 18:01:03 -0700639 EXPECT_EQ(io_history.size(), 0UL);
Jin Qian6df3bc62017-10-18 17:52:14 -0700640}
David Anderson0026a142018-07-26 14:30:35 -0700641
642TEST(storaged_test, load_uid_io_proto) {
643 uid_monitor uidm;
David Andersondec6a882018-07-26 18:01:03 -0700644 auto& io_history = uidm.io_history();
David Anderson0026a142018-07-26 14:30:35 -0700645
David Andersondec6a882018-07-26 18:01:03 -0700646 static const uint64_t kProtoTime = 200;
647 io_history[kProtoTime] = {
David Anderson0026a142018-07-26 14:30:35 -0700648 .start_ts = 100,
649 .entries = {
650 { "app1", {
651 .user_id = 0,
652 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
653 }
654 },
655 { "app2", {
656 .user_id = 0,
657 .uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF] = 2000,
658 }
659 },
660 { "app3", {
661 .user_id = 0,
662 .uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF] = 3000,
663 }
664 },
665 },
666 };
667
668 unordered_map<int, StoragedProto> protos;
669 uidm.update_uid_io_proto(&protos);
670 ASSERT_EQ(protos.size(), size_t(1));
671
672 // Loading the same proto many times should not add duplicate entries.
David Andersondec6a882018-07-26 18:01:03 -0700673 UidIOUsage user_0 = protos[0].uid_io_usage();
David Anderson0026a142018-07-26 14:30:35 -0700674 for (size_t i = 0; i < 10000; i++) {
675 uidm.load_uid_io_proto(0, user_0);
676 }
David Andersondec6a882018-07-26 18:01:03 -0700677 ASSERT_EQ(io_history.size(), size_t(1));
678 ASSERT_EQ(io_history[kProtoTime].entries.size(), size_t(3));
679
680 // Create duplicate entries until we go over the limit.
681 auto record = io_history[kProtoTime];
682 io_history.clear();
683 for (size_t i = 0; i < uid_monitor::MAX_UID_RECORDS_SIZE * 2; i++) {
684 if (i == kProtoTime) {
685 continue;
686 }
687 io_history[i] = record;
688 }
689 ASSERT_GT(io_history.size(), size_t(uid_monitor::MAX_UID_RECORDS_SIZE));
690
691 // After loading, the history should be truncated.
692 for (auto& item : *user_0.mutable_uid_io_items()) {
693 item.set_end_ts(io_history.size());
694 }
695 uidm.load_uid_io_proto(0, user_0);
696 ASSERT_LE(io_history.size(), size_t(uid_monitor::MAX_UID_RECORDS_SIZE));
David Anderson0026a142018-07-26 14:30:35 -0700697}