blob: 973d9c13280ac4c93cc74010a7b31f52ec8707fe [file] [log] [blame]
Michael Ludwigcd019792020-03-17 10:14:48 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Michael Ludwigc1ed11d2021-08-24 11:49:55 -04008#include "src/core/SkBlockAllocator.h"
Michael Ludwigcd019792020-03-17 10:14:48 -04009#include "tests/Test.h"
10
John Stiles7990e032020-11-06 11:22:16 -050011#include <cstring>
12
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040013using Block = SkBlockAllocator::Block;
14using GrowthPolicy = SkBlockAllocator::GrowthPolicy;
15
16class BlockAllocatorTestAccess {
17public:
18 template<size_t N>
19 static size_t ScratchBlockSize(SkSBlockAllocator<N>& pool) {
20 return (size_t) pool->scratchBlockSize();
21 }
22};
Michael Ludwigcd019792020-03-17 10:14:48 -040023
24// Helper functions for modifying the allocator in a controlled manner
25template<size_t N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040026static int block_count(const SkSBlockAllocator<N>& pool) {
Michael Ludwigcd019792020-03-17 10:14:48 -040027 int ct = 0;
28 for (const Block* b : pool->blocks()) {
29 (void) b;
30 ct++;
31 }
32 return ct;
33}
34
35template<size_t N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040036static Block* get_block(SkSBlockAllocator<N>& pool, int blockIndex) {
Michael Ludwigcd019792020-03-17 10:14:48 -040037 Block* found = nullptr;
38 int i = 0;
39 for (Block* b: pool->blocks()) {
40 if (i == blockIndex) {
41 found = b;
42 break;
43 }
44 i++;
45 }
46
47 SkASSERT(found != nullptr);
48 return found;
49}
50
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040051// SkBlockAllocator holds on to the largest last-released block to reuse for new allocations,
Michael Ludwig68e5f292020-07-20 16:17:14 -040052// and this is still counted in its totalSize(). However, it's easier to reason about size - scratch
53// in many of these tests.
54template<size_t N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040055static size_t total_size(SkSBlockAllocator<N>& pool) {
56 return pool->totalSize() - BlockAllocatorTestAccess::ScratchBlockSize(pool);
Michael Ludwig68e5f292020-07-20 16:17:14 -040057}
58
Michael Ludwigcd019792020-03-17 10:14:48 -040059template<size_t N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040060static size_t add_block(SkSBlockAllocator<N>& pool) {
Michael Ludwig68e5f292020-07-20 16:17:14 -040061 size_t currentSize = total_size(pool);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040062 SkBlockAllocator::Block* current = pool->currentBlock();
Michael Ludwig68e5f292020-07-20 16:17:14 -040063 while(pool->currentBlock() == current) {
Michael Ludwigcd019792020-03-17 10:14:48 -040064 pool->template allocate<4>(pool->preallocSize() / 2);
65 }
Michael Ludwig68e5f292020-07-20 16:17:14 -040066 return total_size(pool) - currentSize;
Michael Ludwigcd019792020-03-17 10:14:48 -040067}
68
69template<size_t N>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040070static void* alloc_byte(SkSBlockAllocator<N>& pool) {
Michael Ludwigcd019792020-03-17 10:14:48 -040071 auto br = pool->template allocate<1>(1);
72 return br.fBlock->ptr(br.fAlignedOffset);
73}
74
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040075DEF_TEST(SkBlockAllocatorPreallocSize, r) {
Michael Ludwigcd019792020-03-17 10:14:48 -040076 // Tests stack/member initialization, option #1 described in doc
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040077 SkBlockAllocator stack{GrowthPolicy::kFixed, 2048};
Michael Ludwigcd019792020-03-17 10:14:48 -040078 SkDEBUGCODE(stack.validate();)
79
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040080 REPORTER_ASSERT(r, stack.preallocSize() == sizeof(SkBlockAllocator));
Michael Ludwigcd019792020-03-17 10:14:48 -040081 REPORTER_ASSERT(r, stack.preallocUsableSpace() == (size_t) stack.currentBlock()->avail());
82
83 // Tests placement new initialization to increase head block size, option #2
84 void* mem = operator new(1024);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040085 SkBlockAllocator* placement = new (mem) SkBlockAllocator(GrowthPolicy::kLinear, 1024,
86 1024 - sizeof(SkBlockAllocator));
Michael Ludwigcd019792020-03-17 10:14:48 -040087 REPORTER_ASSERT(r, placement->preallocSize() == 1024);
88 REPORTER_ASSERT(r, placement->preallocUsableSpace() < 1024 &&
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040089 placement->preallocUsableSpace() >= (1024 - sizeof(SkBlockAllocator)));
Michael Ludwigcd019792020-03-17 10:14:48 -040090 delete placement;
91
92 // Tests inline increased preallocation, option #3
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040093 SkSBlockAllocator<2048> inlined{};
Michael Ludwigcd019792020-03-17 10:14:48 -040094 SkDEBUGCODE(inlined->validate();)
95 REPORTER_ASSERT(r, inlined->preallocSize() == 2048);
96 REPORTER_ASSERT(r, inlined->preallocUsableSpace() < 2048 &&
Michael Ludwigc1ed11d2021-08-24 11:49:55 -040097 inlined->preallocUsableSpace() >= (2048 - sizeof(SkBlockAllocator)));
Michael Ludwigcd019792020-03-17 10:14:48 -040098}
99
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400100DEF_TEST(SkBlockAllocatorAlloc, r) {
101 SkSBlockAllocator<1024> pool{};
Michael Ludwigcd019792020-03-17 10:14:48 -0400102 SkDEBUGCODE(pool->validate();)
103
104 // Assumes the previous pointer was in the same block
105 auto validate_ptr = [&](int align, int size,
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400106 SkBlockAllocator::ByteRange br,
107 SkBlockAllocator::ByteRange* prevBR) {
John Stiles96c21652020-11-05 16:17:32 +0000108 uintptr_t pt = reinterpret_cast<uintptr_t>(br.fBlock->ptr(br.fAlignedOffset));
Michael Ludwigcd019792020-03-17 10:14:48 -0400109 // Matches the requested align
110 REPORTER_ASSERT(r, pt % align == 0);
111 // And large enough
112 REPORTER_ASSERT(r, br.fEnd - br.fAlignedOffset >= size);
113 // And has enough padding for alignment
114 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart >= 0);
115 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart <= align - 1);
116 // And block of the returned struct is the current block of the allocator
117 REPORTER_ASSERT(r, pool->currentBlock() == br.fBlock);
118
119 // And make sure that we're past the required end of the previous allocation
120 if (prevBR) {
121 uintptr_t prevEnd =
122 reinterpret_cast<uintptr_t>(prevBR->fBlock->ptr(prevBR->fEnd - 1));
123 REPORTER_ASSERT(r, pt > prevEnd);
124 }
John Stiles7990e032020-11-06 11:22:16 -0500125
126 // And make sure that the entire byte range is safe to write into (excluding the dead space
127 // between "start" and "aligned offset," which is just padding and is left poisoned)
128 std::memset(br.fBlock->ptr(br.fAlignedOffset), 0xFF, br.fEnd - br.fAlignedOffset);
Michael Ludwigcd019792020-03-17 10:14:48 -0400129 };
130
131 auto p1 = pool->allocate<1>(14);
132 validate_ptr(1, 14, p1, nullptr);
133
134 auto p2 = pool->allocate<2>(24);
135 validate_ptr(2, 24, p2, &p1);
136
137 auto p4 = pool->allocate<4>(28);
138 validate_ptr(4, 28, p4, &p2);
139
140 auto p8 = pool->allocate<8>(40);
141 validate_ptr(8, 40, p8, &p4);
142
143 auto p16 = pool->allocate<16>(64);
144 validate_ptr(16, 64, p16, &p8);
145
146 auto p32 = pool->allocate<32>(96);
147 validate_ptr(32, 96, p32, &p16);
148
149 // All of these allocations should be in the head block
Michael Ludwig68e5f292020-07-20 16:17:14 -0400150 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
Michael Ludwigcd019792020-03-17 10:14:48 -0400151 SkDEBUGCODE(pool->validate();)
152
153 // Requesting an allocation of avail() should not make a new block
154 size_t avail = pool->currentBlock()->avail<4>();
155 auto pAvail = pool->allocate<4>(avail);
156 validate_ptr(4, avail, pAvail, &p32);
157
158 // Remaining should be less than the alignment that was requested, and then
159 // the next allocation will make a new block
160 REPORTER_ASSERT(r, pool->currentBlock()->avail<4>() < 4);
161 auto pNextBlock = pool->allocate<4>(4);
162 validate_ptr(4, 4, pNextBlock, nullptr);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400163 REPORTER_ASSERT(r, total_size(pool) > pool->preallocSize());
Michael Ludwigcd019792020-03-17 10:14:48 -0400164
165 // Allocating more than avail() makes an another block
Michael Ludwig68e5f292020-07-20 16:17:14 -0400166 size_t currentSize = total_size(pool);
Michael Ludwigcd019792020-03-17 10:14:48 -0400167 size_t bigRequest = pool->currentBlock()->avail<4>() * 2;
168 auto pTooBig = pool->allocate<4>(bigRequest);
169 validate_ptr(4, bigRequest, pTooBig, nullptr);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400170 REPORTER_ASSERT(r, total_size(pool) > currentSize);
Michael Ludwigcd019792020-03-17 10:14:48 -0400171
172 // Allocating more than the default growth policy (1024 in this case), will fulfill the request
Michael Ludwig68e5f292020-07-20 16:17:14 -0400173 REPORTER_ASSERT(r, total_size(pool) - currentSize < 4096);
174 currentSize = total_size(pool);
Michael Ludwigcd019792020-03-17 10:14:48 -0400175 auto pReallyTooBig = pool->allocate<4>(4096);
176 validate_ptr(4, 4096, pReallyTooBig, nullptr);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400177 REPORTER_ASSERT(r, total_size(pool) >= currentSize + 4096);
Michael Ludwigcd019792020-03-17 10:14:48 -0400178 SkDEBUGCODE(pool->validate();)
179}
180
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400181DEF_TEST(SkBlockAllocatorResize, r) {
182 SkSBlockAllocator<1024> pool{};
Michael Ludwigcd019792020-03-17 10:14:48 -0400183 SkDEBUGCODE(pool->validate();)
184
185 // Fixed resize from 16 to 32
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400186 SkBlockAllocator::ByteRange p = pool->allocate<4>(16);
Michael Ludwigcd019792020-03-17 10:14:48 -0400187 REPORTER_ASSERT(r, p.fBlock->avail<4>() > 16);
188 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, 16));
189 p.fEnd += 16;
190
John Stiles7990e032020-11-06 11:22:16 -0500191 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x11, p.fEnd - p.fAlignedOffset);
192
Michael Ludwigcd019792020-03-17 10:14:48 -0400193 // Subsequent allocation is 32 bytes ahead of 'p' now, and 'p' cannot be resized further.
194 auto pNext = pool->allocate<4>(16);
195 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
196 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
197 REPORTER_ASSERT(r, p.fBlock == pNext.fBlock);
198 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, 48));
199
200 // Confirm that releasing pNext allows 'p' to be resized, and that it can be resized up to avail
201 REPORTER_ASSERT(r, p.fBlock->release(pNext.fStart, pNext.fEnd));
202 int fillBlock = p.fBlock->avail<4>();
203 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
204 p.fEnd += fillBlock;
205
John Stiles7990e032020-11-06 11:22:16 -0500206 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x22, p.fEnd - p.fAlignedOffset);
207
Michael Ludwigcd019792020-03-17 10:14:48 -0400208 // Confirm that resizing when there's not enough room fails
209 REPORTER_ASSERT(r, p.fBlock->avail<4>() < fillBlock);
210 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
211
212 // Confirm that we can shrink 'p' back to 32 bytes and then further allocate again
213 int shrinkTo32 = p.fStart - p.fEnd + 32;
214 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, shrinkTo32));
215 p.fEnd += shrinkTo32;
216 REPORTER_ASSERT(r, p.fEnd - p.fStart == 32);
217
John Stiles7990e032020-11-06 11:22:16 -0500218 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x33, p.fEnd - p.fAlignedOffset);
219
Michael Ludwigcd019792020-03-17 10:14:48 -0400220 pNext = pool->allocate<4>(16);
221 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
222 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
223 SkDEBUGCODE(pool->validate();)
224
225 // Confirm that we can't shrink past the start of the allocation, but we can shrink it to 0
226 int shrinkTo0 = pNext.fStart - pNext.fEnd;
227#ifndef SK_DEBUG
228 // Only test for false on release builds; a negative size should assert on debug builds
229 REPORTER_ASSERT(r, !pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0 - 1));
230#endif
231 REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
232}
233
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400234DEF_TEST(SkBlockAllocatorRelease, r) {
235 SkSBlockAllocator<1024> pool{};
Michael Ludwigcd019792020-03-17 10:14:48 -0400236 SkDEBUGCODE(pool->validate();)
237
238 // Successful allocate and release
239 auto p = pool->allocate<8>(32);
240 REPORTER_ASSERT(r, pool->currentBlock()->release(p.fStart, p.fEnd));
241 // Ensure the above release actually means the next allocation reuses the same space
242 auto p2 = pool->allocate<8>(32);
243 REPORTER_ASSERT(r, p.fStart == p2.fStart);
244
245 // Confirm that 'p2' cannot be released if another allocation came after it
246 auto p3 = pool->allocate<8>(64);
247 (void) p3;
248 REPORTER_ASSERT(r, !p2.fBlock->release(p2.fStart, p2.fEnd));
249
250 // Confirm that 'p4' can be released if 'p5' is released first, and confirm that 'p2' and 'p3'
251 // can be released simultaneously (equivalent to 'p3' then 'p2').
252 auto p4 = pool->allocate<8>(16);
253 auto p5 = pool->allocate<8>(96);
254 REPORTER_ASSERT(r, p5.fBlock->release(p5.fStart, p5.fEnd));
255 REPORTER_ASSERT(r, p4.fBlock->release(p4.fStart, p4.fEnd));
256 REPORTER_ASSERT(r, p2.fBlock->release(p2.fStart, p3.fEnd));
257
258 // And confirm that passing in the wrong size for the allocation fails
259 p = pool->allocate<8>(32);
260 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd - 16));
261 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd + 16));
262 REPORTER_ASSERT(r, p.fBlock->release(p.fStart, p.fEnd));
263 SkDEBUGCODE(pool->validate();)
264}
265
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400266DEF_TEST(SkBlockAllocatorRewind, r) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400267 // Confirm that a bunch of allocations and then releases in stack order fully goes back to the
268 // start of the block (i.e. unwinds the entire stack, and not just the last cursor position)
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400269 SkSBlockAllocator<1024> pool{};
Michael Ludwigcd019792020-03-17 10:14:48 -0400270 SkDEBUGCODE(pool->validate();)
271
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400272 std::vector<SkBlockAllocator::ByteRange> ptrs;
Michael Ludwiga228a482021-08-25 12:26:17 -0400273 ptrs.reserve(32); // silence clang-tidy performance warning
Michael Ludwigcd019792020-03-17 10:14:48 -0400274 for (int i = 0; i < 32; ++i) {
275 ptrs.push_back(pool->allocate<4>(16));
276 }
277
278 // Release everything in reverse order
279 SkDEBUGCODE(pool->validate();)
280 for (int i = 31; i >= 0; --i) {
281 auto br = ptrs[i];
282 REPORTER_ASSERT(r, br.fBlock->release(br.fStart, br.fEnd));
283 }
284
285 // If correct, we've rewound all the way back to the start of the block, so a new allocation
286 // will have the same location as ptrs[0]
287 SkDEBUGCODE(pool->validate();)
288 REPORTER_ASSERT(r, pool->allocate<4>(16).fStart == ptrs[0].fStart);
289}
290
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400291DEF_TEST(SkBlockAllocatorGrowthPolicy, r) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400292 static constexpr int kInitSize = 128;
293 static constexpr int kBlockCount = 5;
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400294 static constexpr size_t kExpectedSizes[SkBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
Michael Ludwigcd019792020-03-17 10:14:48 -0400295 // kFixed -> kInitSize per block
296 { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
297 // kLinear -> (block ct + 1) * kInitSize for next block
298 { kInitSize, 2 * kInitSize, 3 * kInitSize, 4 * kInitSize, 5 * kInitSize },
299 // kFibonacci -> 1, 1, 2, 3, 5 * kInitSize for the blocks
300 { kInitSize, kInitSize, 2 * kInitSize, 3 * kInitSize, 5 * kInitSize },
301 // kExponential -> 1, 2, 4, 8, 16 * kInitSize for the blocks
302 { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
303 };
304
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400305 for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
306 SkSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
Michael Ludwigcd019792020-03-17 10:14:48 -0400307 SkDEBUGCODE(pool->validate();)
308
Michael Ludwig68e5f292020-07-20 16:17:14 -0400309 REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
Michael Ludwigcd019792020-03-17 10:14:48 -0400310 for (int i = 1; i < kBlockCount; ++i) {
311 REPORTER_ASSERT(r, kExpectedSizes[gp][i] == add_block(pool));
312 }
313
314 SkDEBUGCODE(pool->validate();)
315 }
316}
317
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400318DEF_TEST(SkBlockAllocatorReset, r) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400319 static constexpr int kBlockIncrement = 1024;
320
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400321 SkSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
Michael Ludwigcd019792020-03-17 10:14:48 -0400322 SkDEBUGCODE(pool->validate();)
323
324 void* firstAlloc = alloc_byte(pool);
325
326 // Add several blocks
327 add_block(pool);
328 add_block(pool);
329 add_block(pool);
330 SkDEBUGCODE(pool->validate();)
331
332 REPORTER_ASSERT(r, block_count(pool) == 4); // 3 added plus the implicit head
333
334 get_block(pool, 0)->setMetadata(2);
335
336 // Reset and confirm that there's only one block, a new allocation matches 'firstAlloc' again,
337 // and new blocks are sized based on a reset growth policy.
338 pool->reset();
339 SkDEBUGCODE(pool->validate();)
340
341 REPORTER_ASSERT(r,block_count(pool) == 1);
342 REPORTER_ASSERT(r, pool->preallocSize() == pool->totalSize());
343 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0);
344
345 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
346 REPORTER_ASSERT(r, 2 * kBlockIncrement == add_block(pool));
347 REPORTER_ASSERT(r, 3 * kBlockIncrement == add_block(pool));
348 SkDEBUGCODE(pool->validate();)
349}
350
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400351DEF_TEST(SkBlockAllocatorReleaseBlock, r) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400352 // This loops over all growth policies to make sure that the incremental releases update the
353 // sequence correctly for each policy.
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400354 for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
355 SkSBlockAllocator<1024> pool{(GrowthPolicy) gp};
Michael Ludwigcd019792020-03-17 10:14:48 -0400356 SkDEBUGCODE(pool->validate();)
357
358 void* firstAlloc = alloc_byte(pool);
359
Michael Ludwig68e5f292020-07-20 16:17:14 -0400360 size_t b1Size = total_size(pool);
Michael Ludwigcd019792020-03-17 10:14:48 -0400361 size_t b2Size = add_block(pool);
362 size_t b3Size = add_block(pool);
363 size_t b4Size = add_block(pool);
364 SkDEBUGCODE(pool->validate();)
365
366 get_block(pool, 0)->setMetadata(1);
367 get_block(pool, 1)->setMetadata(2);
368 get_block(pool, 2)->setMetadata(3);
369 get_block(pool, 3)->setMetadata(4);
370
371 // Remove the 3 added blocks, but always remove the i = 1 to test intermediate removal (and
372 // on the last iteration, will test tail removal).
Michael Ludwig68e5f292020-07-20 16:17:14 -0400373 REPORTER_ASSERT(r, total_size(pool) == b1Size + b2Size + b3Size + b4Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400374 pool->releaseBlock(get_block(pool, 1));
375 REPORTER_ASSERT(r, block_count(pool) == 3);
376 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 3);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400377 REPORTER_ASSERT(r, total_size(pool) == b1Size + b3Size + b4Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400378
379 pool->releaseBlock(get_block(pool, 1));
380 REPORTER_ASSERT(r, block_count(pool) == 2);
381 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 4);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400382 REPORTER_ASSERT(r, total_size(pool) == b1Size + b4Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400383
384 pool->releaseBlock(get_block(pool, 1));
385 REPORTER_ASSERT(r, block_count(pool) == 1);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400386 REPORTER_ASSERT(r, total_size(pool) == b1Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400387
388 // Since we're back to just the head block, if we add a new block, the growth policy should
389 // match the original sequence instead of continuing with "b5Size'"
Michael Ludwig68e5f292020-07-20 16:17:14 -0400390 pool->resetScratchSpace();
Michael Ludwigcd019792020-03-17 10:14:48 -0400391 size_t size = add_block(pool);
392 REPORTER_ASSERT(r, size == b2Size);
393 pool->releaseBlock(get_block(pool, 1));
394
395 // Explicitly release the head block and confirm it's reset
396 pool->releaseBlock(get_block(pool, 0));
Michael Ludwig68e5f292020-07-20 16:17:14 -0400397 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
Michael Ludwigcd019792020-03-17 10:14:48 -0400398 REPORTER_ASSERT(r, block_count(pool) == 1);
399 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
400 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0); // metadata reset too
401
402 // Confirm that if we have > 1 block, but release the head block we can still access the
403 // others
404 add_block(pool);
405 add_block(pool);
406 pool->releaseBlock(get_block(pool, 0));
407 REPORTER_ASSERT(r, block_count(pool) == 3);
408 SkDEBUGCODE(pool->validate();)
409 }
410}
411
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400412DEF_TEST(SkBlockAllocatorIterateAndRelease, r) {
413 SkSBlockAllocator<256> pool;
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400414
415 pool->headBlock()->setMetadata(1);
416 add_block(pool);
417 add_block(pool);
418 add_block(pool);
419
420 // Loop forward and release the blocks
421 int releaseCount = 0;
422 for (auto* b : pool->blocks()) {
423 pool->releaseBlock(b);
424 releaseCount++;
425 }
426 REPORTER_ASSERT(r, releaseCount == 4);
427 // pool should have just the head block, but was reset
428 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
429 REPORTER_ASSERT(r, block_count(pool) == 1);
430
431 // Add more blocks
432 pool->headBlock()->setMetadata(1);
433 add_block(pool);
434 add_block(pool);
435 add_block(pool);
436
437 // Loop in reverse and release the blocks
438 releaseCount = 0;
439 for (auto* b : pool->rblocks()) {
440 pool->releaseBlock(b);
441 releaseCount++;
442 }
443 REPORTER_ASSERT(r, releaseCount == 4);
444 // pool should have just the head block, but was reset
445 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
446 REPORTER_ASSERT(r, block_count(pool) == 1);
447}
448
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400449DEF_TEST(SkBlockAllocatorScratchBlockReserve, r) {
450 SkSBlockAllocator<256> pool;
Michael Ludwig68e5f292020-07-20 16:17:14 -0400451
452 size_t added = add_block(pool);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400453 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400454 size_t total = pool->totalSize();
455 pool->releaseBlock(pool->currentBlock());
456
457 // Total size shouldn't have changed, the released block should become scratch
458 REPORTER_ASSERT(r, pool->totalSize() == total);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400459 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == added);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400460
461 // But a reset definitely deletes any scratch block
462 pool->reset();
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400463 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400464
465 // Reserving more than what's available adds a scratch block, and current block remains avail.
466 size_t avail = pool->currentBlock()->avail();
467 size_t reserve = avail + 1;
468 pool->reserve(reserve);
469 REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
470 // And rounds up to the fixed size of this pool's growth policy
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400471 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= reserve &&
472 BlockAllocatorTestAccess::ScratchBlockSize(pool) % 256 == 0);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400473
474 // Allocating more than avail activates the scratch block (so totalSize doesn't change)
475 size_t preAllocTotalSize = pool->totalSize();
476 pool->allocate<1>(avail + 1);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400477 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400478 REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
479
480 // When reserving less than what's still available in the current block, no scratch block is
481 // added.
482 pool->reserve(pool->currentBlock()->avail());
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400483 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400484
485 // Unless checking available bytes is disabled
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400486 pool->reserve(pool->currentBlock()->avail(), SkBlockAllocator::kIgnoreExistingBytes_Flag);
487 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400488
489 // If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
490 // follow the size (which in this case is a fixed 256 bytes per block).
491 pool->resetScratchSpace();
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400492 pool->reserve(32, SkBlockAllocator::kIgnoreGrowthPolicy_Flag);
493 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0 &&
494 BlockAllocatorTestAccess::ScratchBlockSize(pool) < 256);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400495
496 // When requesting an allocation larger than the current block and the scratch block, a new
497 // block is added, and the scratch block remains scratch.
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400498 SkBlockAllocator::Block* oldTail = pool->currentBlock();
Michael Ludwig68e5f292020-07-20 16:17:14 -0400499 avail = oldTail->avail();
500 size_t scratchAvail = 2 * avail;
501 pool->reserve(scratchAvail);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400502 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= scratchAvail);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400503
504 // This allocation request is higher than oldTail's available, and the scratch size so we
505 // should add a new block and scratch size should stay the same.
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400506 scratchAvail = BlockAllocatorTestAccess::ScratchBlockSize(pool);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400507 pool->allocate<1>(scratchAvail + 1);
508 REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400509 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == scratchAvail);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400510}
511
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400512DEF_TEST(SkBlockAllocatorStealBlocks, r) {
513 SkSBlockAllocator<256> poolA;
514 SkSBlockAllocator<128> poolB;
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400515
516 add_block(poolA);
517 add_block(poolA);
518 add_block(poolA);
519
520 add_block(poolB);
521 add_block(poolB);
522
523 char* bAlloc = (char*) alloc_byte(poolB);
524 *bAlloc = 't';
525
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400526 const SkBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400527
528 REPORTER_ASSERT(r, block_count(poolA) == 4);
529 REPORTER_ASSERT(r, block_count(poolB) == 3);
530
531 size_t aSize = poolA->totalSize();
532 size_t bSize = poolB->totalSize();
533 size_t theftSize = bSize - poolB->preallocSize();
534
535 // This steal should move B's 2 heap blocks to A, bringing A to 6 and B to just its head
536 poolA->stealHeapBlocks(poolB.allocator());
537 REPORTER_ASSERT(r, block_count(poolA) == 6);
538 REPORTER_ASSERT(r, block_count(poolB) == 1);
539 REPORTER_ASSERT(r, poolB->preallocSize() == poolB->totalSize());
540 REPORTER_ASSERT(r, poolA->totalSize() == aSize + theftSize);
541
542 REPORTER_ASSERT(r, *bAlloc == 't');
543 REPORTER_ASSERT(r, (uintptr_t) poolA->findOwningBlock(bAlloc) == (uintptr_t) allocOwner);
544 REPORTER_ASSERT(r, !poolB->findOwningBlock(bAlloc));
545
546 // Redoing the steal now that B is just a head block should be a no-op
547 poolA->stealHeapBlocks(poolB.allocator());
548 REPORTER_ASSERT(r, block_count(poolA) == 6);
549 REPORTER_ASSERT(r, block_count(poolB) == 1);
550}
551
Michael Ludwigcd019792020-03-17 10:14:48 -0400552// These tests ensure that the allocation padding mechanism works as intended
553struct TestMeta {
554 int fX1;
555 int fX2;
556};
557struct alignas(32) TestMetaBig {
558 int fX1;
559 int fX2;
560};
561
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400562DEF_TEST(SkBlockAllocatorMetadata, r) {
563 SkSBlockAllocator<1024> pool{};
Michael Ludwigcd019792020-03-17 10:14:48 -0400564 SkDEBUGCODE(pool->validate();)
565
566 // Allocation where alignment of user data > alignment of metadata
567 SkASSERT(alignof(TestMeta) < 16);
568 auto p1 = pool->allocate<16, sizeof(TestMeta)>(16);
569 SkDEBUGCODE(pool->validate();)
570
571 REPORTER_ASSERT(r, p1.fAlignedOffset - p1.fStart >= (int) sizeof(TestMeta));
572 TestMeta* meta = static_cast<TestMeta*>(p1.fBlock->ptr(p1.fAlignedOffset - sizeof(TestMeta)));
573 // Confirm alignment for both pointers
574 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(meta) % alignof(TestMeta) == 0);
575 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p1.fBlock->ptr(p1.fAlignedOffset)) % 16 == 0);
576 // Access fields to make sure 'meta' matches compilers expectations...
577 meta->fX1 = 2;
578 meta->fX2 = 5;
579
580 // Repeat, but for metadata that has a larger alignment than the allocation
581 SkASSERT(alignof(TestMetaBig) == 32);
582 auto p2 = pool->allocate<alignof(TestMetaBig), sizeof(TestMetaBig)>(16);
583 SkDEBUGCODE(pool->validate();)
584
585 REPORTER_ASSERT(r, p2.fAlignedOffset - p2.fStart >= (int) sizeof(TestMetaBig));
586 TestMetaBig* metaBig = static_cast<TestMetaBig*>(
587 p2.fBlock->ptr(p2.fAlignedOffset - sizeof(TestMetaBig)));
588 // Confirm alignment for both pointers
589 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(metaBig) % alignof(TestMetaBig) == 0);
590 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p2.fBlock->ptr(p2.fAlignedOffset)) % 16 == 0);
591 // Access fields
592 metaBig->fX1 = 3;
593 metaBig->fX2 = 6;
594
595 // Ensure metadata values persist after allocations
596 REPORTER_ASSERT(r, meta->fX1 == 2 && meta->fX2 == 5);
597 REPORTER_ASSERT(r, metaBig->fX1 == 3 && metaBig->fX2 == 6);
598}
599
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400600DEF_TEST(SkBlockAllocatorAllocatorMetadata, r) {
601 SkSBlockAllocator<256> pool{};
Michael Ludwigc97ebe02020-07-17 08:39:46 -0400602 SkDEBUGCODE(pool->validate();)
603
604 REPORTER_ASSERT(r, pool->metadata() == 0); // initial value
605
606 pool->setMetadata(4);
607 REPORTER_ASSERT(r, pool->metadata() == 4);
608
609 // Releasing the head block doesn't change the allocator's metadata (even though that's where
610 // it is stored).
611 pool->releaseBlock(pool->headBlock());
612 REPORTER_ASSERT(r, pool->metadata() == 4);
613
614 // But resetting the whole allocator brings things back to as if it were newly constructed
615 pool->reset();
616 REPORTER_ASSERT(r, pool->metadata() == 0);
617}
618
Michael Ludwigcd019792020-03-17 10:14:48 -0400619template<size_t Align, size_t Padding>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400620static void run_owning_block_test(skiatest::Reporter* r, SkBlockAllocator* pool) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400621 auto br = pool->allocate<Align, Padding>(1);
622
623 void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
624 void* metaPtr = br.fBlock->ptr(br.fAlignedOffset - Padding);
625
626 Block* block = pool->owningBlock<Align, Padding>(userPtr, br.fStart);
627 REPORTER_ASSERT(r, block == br.fBlock);
628
629 block = pool->owningBlock<Align>(metaPtr, br.fStart);
630 REPORTER_ASSERT(r, block == br.fBlock);
631
632 block = reinterpret_cast<Block*>(reinterpret_cast<uintptr_t>(userPtr) - br.fAlignedOffset);
633 REPORTER_ASSERT(r, block == br.fBlock);
634}
635
636template<size_t Padding>
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400637static void run_owning_block_tests(skiatest::Reporter* r, SkBlockAllocator* pool) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400638 run_owning_block_test<1, Padding>(r, pool);
639 run_owning_block_test<2, Padding>(r, pool);
640 run_owning_block_test<4, Padding>(r, pool);
641 run_owning_block_test<8, Padding>(r, pool);
642 run_owning_block_test<16, Padding>(r, pool);
643 run_owning_block_test<32, Padding>(r, pool);
644 run_owning_block_test<64, Padding>(r, pool);
645 run_owning_block_test<128, Padding>(r, pool);
646}
647
Michael Ludwigc1ed11d2021-08-24 11:49:55 -0400648DEF_TEST(SkBlockAllocatorOwningBlock, r) {
649 SkSBlockAllocator<1024> pool{};
Michael Ludwigcd019792020-03-17 10:14:48 -0400650 SkDEBUGCODE(pool->validate();)
651
652 run_owning_block_tests<1>(r, pool.allocator());
653 run_owning_block_tests<2>(r, pool.allocator());
654 run_owning_block_tests<4>(r, pool.allocator());
655 run_owning_block_tests<8>(r, pool.allocator());
656 run_owning_block_tests<16>(r, pool.allocator());
657 run_owning_block_tests<32>(r, pool.allocator());
658
659 // And some weird numbers
660 run_owning_block_tests<3>(r, pool.allocator());
661 run_owning_block_tests<9>(r, pool.allocator());
662 run_owning_block_tests<17>(r, pool.allocator());
663}