blob: 0032453f545ffd4f2c4abd92420605d30777cf01 [file] [log] [blame]
Michael Ludwig45191342020-03-24 12:29:39 -04001/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Michael Ludwigc1ed11d2021-08-24 11:49:55 -04008#include "src/core/SkTBlockList.h"
Michael Ludwig45191342020-03-24 12:29:39 -04009#include "tests/Test.h"
10
11namespace {
12struct C {
13 C() : fID(-1) { ++gInstCnt; }
14 C(int id) : fID(id) { ++gInstCnt; }
Michael Ludwiga291b372020-07-16 14:20:49 -040015 C(C&& c) : C(c.fID) {}
Michael Ludwigc97ebe02020-07-17 08:39:46 -040016 C(const C& c) : C(c.fID) {}
17
Ben Wagner14ba58f2020-03-30 17:53:53 -040018 C& operator=(C&&) = default;
Michael Ludwigc97ebe02020-07-17 08:39:46 -040019 C& operator=(const C&) = default;
20
Michael Ludwig45191342020-03-24 12:29:39 -040021 ~C() { --gInstCnt; }
Michael Ludwigc97ebe02020-07-17 08:39:46 -040022
Michael Ludwig45191342020-03-24 12:29:39 -040023 int fID;
24
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040025 // Under the hood, SkTBlockList and SkBlockAllocator round up to max_align_t. If 'C' was
Michael Ludwigcc848b52020-07-22 16:36:49 -040026 // just 4 bytes, that often means the internal blocks can squeeze a few extra instances in. This
Michael Ludwig68e5f292020-07-20 16:17:14 -040027 // is fine, but makes predicting a little trickier, so make sure C is a bit bigger.
28 int fPadding[4];
29
Michael Ludwig45191342020-03-24 12:29:39 -040030 static int gInstCnt;
31};
Michael Ludwig45191342020-03-24 12:29:39 -040032int C::gInstCnt = 0;
Michael Ludwig1d4c08f2020-07-21 13:04:42 -040033
34struct D {
35 int fID;
36};
37
John Stilesa6841be2020-08-06 14:11:56 -040038} // namespace
Michael Ludwig45191342020-03-24 12:29:39 -040039
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040040class TBlockListTestAccess {
41public:
42 template<int N>
43 static size_t ScratchBlockSize(SkTBlockList<C, N>& list) {
44 return (size_t) list.fAllocator->scratchBlockSize();
45 }
46
47 template<int N>
48 static size_t TotalSize(SkTBlockList<C, N>& list) {
49 return list.fAllocator->totalSize();
50 }
51};
52
Michael Ludwig45191342020-03-24 12:29:39 -040053// Checks that the allocator has the correct count, etc and that the element IDs are correct.
54// Then pops popCnt items and checks again.
55template<int N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040056static void check_allocator_helper(SkTBlockList<C, N>* allocator, int cnt, int popCnt,
Michael Ludwig45191342020-03-24 12:29:39 -040057 skiatest::Reporter* reporter) {
58 REPORTER_ASSERT(reporter, (0 == cnt) == allocator->empty());
59 REPORTER_ASSERT(reporter, cnt == allocator->count());
60 REPORTER_ASSERT(reporter, cnt == C::gInstCnt);
61
62 int i = 0;
63 for (const C& c : allocator->items()) {
64 REPORTER_ASSERT(reporter, i == c.fID);
65 REPORTER_ASSERT(reporter, allocator->item(i).fID == i);
66 ++i;
67 }
68 REPORTER_ASSERT(reporter, i == cnt);
69
70 if (cnt > 0) {
71 REPORTER_ASSERT(reporter, cnt-1 == allocator->back().fID);
72 }
73
74 if (popCnt > 0) {
John Stiles97659732021-08-12 10:48:09 -040075 for (i = 0; i < popCnt; ++i) {
Michael Ludwig45191342020-03-24 12:29:39 -040076 allocator->pop_back();
77 }
78 check_allocator_helper(allocator, cnt - popCnt, 0, reporter);
79 }
80}
81
Michael Ludwig26b4ffd2020-07-15 12:36:44 -040082template<int N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040083static void check_iterator_helper(SkTBlockList<C, N>* allocator,
Michael Ludwigcc848b52020-07-22 16:36:49 -040084 const std::vector<C*>& expected,
Michael Ludwig26b4ffd2020-07-15 12:36:44 -040085 skiatest::Reporter* reporter) {
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040086 const SkTBlockList<C, N>* cAlloc = allocator;
Michael Ludwig26b4ffd2020-07-15 12:36:44 -040087 REPORTER_ASSERT(reporter, (size_t) allocator->count() == expected.size());
88 // Forward+const
89 int i = 0;
90 for (const C& c : cAlloc->items()) {
91 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
92 ++i;
93 }
94 REPORTER_ASSERT(reporter, (size_t) i == expected.size());
95
96 // Forward+non-const
97 i = 0;
98 for (C& c : allocator->items()) {
99 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
100 ++i;
101 }
102 REPORTER_ASSERT(reporter, (size_t) i == expected.size());
103
104 // Reverse+const
105 i = (int) expected.size() - 1;
106 for (const C& c : cAlloc->ritems()) {
107 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
108 --i;
109 }
110 REPORTER_ASSERT(reporter, i == -1);
111
112 // Reverse+non-const
113 i = (int) expected.size() - 1;
114 for (C& c : allocator->ritems()) {
115 REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
116 --i;
117 }
118 REPORTER_ASSERT(reporter, i == -1);
119
120 // Also test random access
John Stiles97659732021-08-12 10:48:09 -0400121 for (i = 0; i < allocator->count(); ++i) {
Michael Ludwig26b4ffd2020-07-15 12:36:44 -0400122 REPORTER_ASSERT(reporter, (uintptr_t) &allocator->item(i) == (uintptr_t) expected[i]);
123 REPORTER_ASSERT(reporter, (uintptr_t) &cAlloc->item(i) == (uintptr_t) expected[i]);
124 }
125}
126
Michael Ludwig45191342020-03-24 12:29:39 -0400127// Adds cnt items to the allocator, tests the cnts and iterators, pops popCnt items and checks
128// again. Finally it resets the allocator and checks again.
129template<int N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400130static void check_allocator(SkTBlockList<C, N>* allocator, int cnt, int popCnt,
Michael Ludwig45191342020-03-24 12:29:39 -0400131 skiatest::Reporter* reporter) {
Michael Ludwigc97ebe02020-07-17 08:39:46 -0400132 enum ItemInitializer : int {
133 kCopyCtor,
134 kMoveCtor,
135 kCopyAssign,
136 kMoveAssign,
137 kEmplace,
138 };
139 static constexpr int kInitCount = (int) kEmplace + 1;
140
Michael Ludwig45191342020-03-24 12:29:39 -0400141 SkASSERT(allocator);
142 SkASSERT(allocator->empty());
Michael Ludwig26b4ffd2020-07-15 12:36:44 -0400143 std::vector<C*> items;
Michael Ludwig45191342020-03-24 12:29:39 -0400144 for (int i = 0; i < cnt; ++i) {
Michael Ludwigc97ebe02020-07-17 08:39:46 -0400145 switch((ItemInitializer) (i % kInitCount)) {
146 case kCopyCtor:
147 allocator->push_back(C(i));
148 break;
149 case kMoveCtor:
150 allocator->push_back(std::move(C(i)));
151 break;
152 case kCopyAssign:
153 allocator->push_back() = C(i);
154 break;
155 case kMoveAssign:
156 allocator->push_back() = std::move(C(i));
157 break;
158 case kEmplace:
159 allocator->emplace_back(i);
160 break;
Michael Ludwig45191342020-03-24 12:29:39 -0400161 }
Michael Ludwig26b4ffd2020-07-15 12:36:44 -0400162 items.push_back(&allocator->back());
Michael Ludwig45191342020-03-24 12:29:39 -0400163 }
Michael Ludwig26b4ffd2020-07-15 12:36:44 -0400164 check_iterator_helper(allocator, items, reporter);
Michael Ludwig45191342020-03-24 12:29:39 -0400165 check_allocator_helper(allocator, cnt, popCnt, reporter);
166 allocator->reset();
Michael Ludwig26b4ffd2020-07-15 12:36:44 -0400167 check_iterator_helper(allocator, {}, reporter);
Michael Ludwig45191342020-03-24 12:29:39 -0400168 check_allocator_helper(allocator, 0, 0, reporter);
169}
170
171template<int N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400172static void run_allocator_test(SkTBlockList<C, N>* allocator, skiatest::Reporter* reporter) {
Michael Ludwig45191342020-03-24 12:29:39 -0400173 check_allocator(allocator, 0, 0, reporter);
174 check_allocator(allocator, 1, 1, reporter);
175 check_allocator(allocator, 2, 2, reporter);
176 check_allocator(allocator, 10, 1, reporter);
177 check_allocator(allocator, 10, 5, reporter);
178 check_allocator(allocator, 10, 10, reporter);
179 check_allocator(allocator, 100, 10, reporter);
180}
181
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400182template<int N1, int N2>
183static void run_concat_test(skiatest::Reporter* reporter, int aCount, int bCount) {
184
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400185 SkTBlockList<C, N1> listA;
186 SkTBlockList<C, N2> listB;
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400187
188 for (int i = 0; i < aCount; ++i) {
189 listA.emplace_back(i);
190 }
191 for (int i = 0; i < bCount; ++i) {
192 listB.emplace_back(aCount + i);
193 }
194
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400195 REPORTER_ASSERT(reporter, listA.count() == aCount && listB.count() == bCount);
196 REPORTER_ASSERT(reporter, C::gInstCnt == aCount + bCount);
197
198 // Concatenate B into A and verify.
199 listA.concat(std::move(listB));
200 REPORTER_ASSERT(reporter, listA.count() == aCount + bCount);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400201 // SkTBlockList guarantees the moved list is empty, but clang-tidy doesn't know about it;
Michael Ludwigcc848b52020-07-22 16:36:49 -0400202 // in practice we won't really be using moved lists so this won't pollute our main code base
203 // with lots of warning disables.
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400204 REPORTER_ASSERT(reporter, listB.count() == 0); // NOLINT(bugprone-use-after-move)
205 REPORTER_ASSERT(reporter, C::gInstCnt == aCount + bCount);
206
207 int i = 0;
208 for (const C& item : listA.items()) {
209 // By construction of A and B originally, the concatenated id sequence is continuous
210 REPORTER_ASSERT(reporter, i == item.fID);
211 i++;
212 }
213 REPORTER_ASSERT(reporter, i == (aCount + bCount));
214}
215
216template<int N1, int N2>
217static void run_concat_trivial_test(skiatest::Reporter* reporter, int aCount, int bCount) {
218 static_assert(std::is_trivially_copyable<D>::value);
219
220 // This is similar to run_concat_test(), except since D is trivial we can't verify the instant
221 // counts that are tracked via ctor/dtor.
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400222 SkTBlockList<D, N1> listA;
223 SkTBlockList<D, N2> listB;
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400224
225 for (int i = 0; i < aCount; ++i) {
226 listA.push_back({i});
227 }
228 for (int i = 0; i < bCount; ++i) {
229 listB.push_back({aCount + i});
230 }
231
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400232 REPORTER_ASSERT(reporter, listA.count() == aCount && listB.count() == bCount);
233 // Concatenate B into A and verify.
234 listA.concat(std::move(listB));
235 REPORTER_ASSERT(reporter, listA.count() == aCount + bCount);
236 REPORTER_ASSERT(reporter, listB.count() == 0); // NOLINT(bugprone-use-after-move): see above
237
238 int i = 0;
239 for (const D& item : listA.items()) {
240 // By construction of A and B originally, the concatenated id sequence is continuous
241 REPORTER_ASSERT(reporter, i == item.fID);
242 i++;
243 }
244 REPORTER_ASSERT(reporter, i == (aCount + bCount));
245}
Michael Ludwig68e5f292020-07-20 16:17:14 -0400246
247template<int N>
248static void run_reserve_test(skiatest::Reporter* reporter) {
249 constexpr int kItemsPerBlock = N + 4; // Make this a number > 1, even if N starting items == 1
250
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400251 SkTBlockList<C, N> list(kItemsPerBlock);
252 size_t initialSize = TBlockListTestAccess::TotalSize(list);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400253 // Should be able to add N instances of T w/o changing size from initialSize
254 for (int i = 0; i < N; ++i) {
255 list.push_back(C(i));
256 }
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400257 REPORTER_ASSERT(reporter, initialSize == TBlockListTestAccess::TotalSize(list));
Michael Ludwig68e5f292020-07-20 16:17:14 -0400258
259 // Reserve room for 2*kItemsPerBlock items
260 list.reserve(2 * kItemsPerBlock);
261 REPORTER_ASSERT(reporter, list.count() == N); // count shouldn't change though
262
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400263 size_t reservedSize = TBlockListTestAccess::TotalSize(list);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400264 REPORTER_ASSERT(reporter, reservedSize >= initialSize + 2 * kItemsPerBlock * sizeof(C));
265 for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
266 list.push_back(C(i));
267 }
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400268 REPORTER_ASSERT(reporter, reservedSize == TBlockListTestAccess::TotalSize(list));
Michael Ludwig68e5f292020-07-20 16:17:14 -0400269
270 // Make the next block partially fully (N > 0 but < kItemsPerBlock)
271 for (int i = 0; i < N; ++i) {
272 list.push_back(C(i));
273 }
274
275 // Reserve room again for 2*kItemsPerBlock, but reserve should automatically take account of the
276 // (kItemsPerBlock-N) that are still available in the active block
277 list.reserve(2 * kItemsPerBlock);
278 int extraReservedCount = kItemsPerBlock + N;
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400279 // Because SkTBlockList normally allocates blocks in fixed sizes, and extraReservedCount >
Michael Ludwig68e5f292020-07-20 16:17:14 -0400280 // items-per-block, it will always use that size and not that of the growth policy.
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400281 REPORTER_ASSERT(reporter, TBlockListTestAccess::ScratchBlockSize(list) >=
Michael Ludwig68e5f292020-07-20 16:17:14 -0400282 extraReservedCount * sizeof(C));
283
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400284 reservedSize = TBlockListTestAccess::TotalSize(list);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400285 for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
286 list.push_back(C(i));
287 }
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400288 REPORTER_ASSERT(reporter, reservedSize == TBlockListTestAccess::TotalSize(list));
Michael Ludwig68e5f292020-07-20 16:17:14 -0400289
290 // If we reserve a count < items-per-block, it will use the fixed size from the growth policy.
291 list.reserve(2);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400292 REPORTER_ASSERT(reporter, TBlockListTestAccess::ScratchBlockSize(list) >=
Michael Ludwig68e5f292020-07-20 16:17:14 -0400293 kItemsPerBlock * sizeof(C));
294
295 // Ensure the reservations didn't initialize any more D's than anticipated
296 int expectedInstanceCount = 2 * (N + 2 * kItemsPerBlock);
297 REPORTER_ASSERT(reporter, expectedInstanceCount == C::gInstCnt);
298
299 list.reset();
300 REPORTER_ASSERT(reporter, 0 == C::gInstCnt);
301}
302
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400303DEF_TEST(SkTBlockList, reporter) {
Michael Ludwigc97ebe02020-07-17 08:39:46 -0400304 // Test combinations of allocators with and without stack storage and with different block sizes
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400305 SkTBlockList<C> a1(1);
Michael Ludwig45191342020-03-24 12:29:39 -0400306 run_allocator_test(&a1, reporter);
307
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400308 SkTBlockList<C> a2(2);
Michael Ludwig45191342020-03-24 12:29:39 -0400309 run_allocator_test(&a2, reporter);
310
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400311 SkTBlockList<C> a5(5);
Michael Ludwig45191342020-03-24 12:29:39 -0400312 run_allocator_test(&a5, reporter);
313
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400314 SkTBlockList<C, 1> sa1;
Michael Ludwig45191342020-03-24 12:29:39 -0400315 run_allocator_test(&sa1, reporter);
316
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400317 SkTBlockList<C, 3> sa3;
Michael Ludwig45191342020-03-24 12:29:39 -0400318 run_allocator_test(&sa3, reporter);
319
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400320 SkTBlockList<C, 4> sa4;
Michael Ludwig45191342020-03-24 12:29:39 -0400321 run_allocator_test(&sa4, reporter);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400322
323 run_reserve_test<1>(reporter);
324 run_reserve_test<2>(reporter);
325 run_reserve_test<3>(reporter);
326 run_reserve_test<4>(reporter);
327 run_reserve_test<5>(reporter);
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400328
329 run_concat_test<1, 1>(reporter, 10, 10);
330 run_concat_test<5, 1>(reporter, 50, 10);
331 run_concat_test<1, 5>(reporter, 10, 50);
332 run_concat_test<5, 5>(reporter, 100, 100);
333
334 run_concat_trivial_test<1, 1>(reporter, 10, 10);
335 run_concat_trivial_test<5, 1>(reporter, 50, 10);
336 run_concat_trivial_test<1, 5>(reporter, 10, 50);
337 run_concat_trivial_test<5, 5>(reporter, 100, 100);
Michael Ludwig45191342020-03-24 12:29:39 -0400338}