blob: ea8907af0751af713a908262e1062755296e08e4 [file] [log] [blame]
Michael Ludwigcd019792020-03-17 10:14:48 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrBlockAllocator.h"
9#include "tests/Test.h"
10
11using Block = GrBlockAllocator::Block;
12using GrowthPolicy = GrBlockAllocator::GrowthPolicy;
13
14// Helper functions for modifying the allocator in a controlled manner
15template<size_t N>
16static int block_count(const GrSBlockAllocator<N>& pool) {
17 int ct = 0;
18 for (const Block* b : pool->blocks()) {
19 (void) b;
20 ct++;
21 }
22 return ct;
23}
24
25template<size_t N>
26static Block* get_block(GrSBlockAllocator<N>& pool, int blockIndex) {
27 Block* found = nullptr;
28 int i = 0;
29 for (Block* b: pool->blocks()) {
30 if (i == blockIndex) {
31 found = b;
32 break;
33 }
34 i++;
35 }
36
37 SkASSERT(found != nullptr);
38 return found;
39}
40
Michael Ludwig68e5f292020-07-20 16:17:14 -040041// GrBlockAllocator holds on to the largest last-released block to reuse for new allocations,
42// and this is still counted in its totalSize(). However, it's easier to reason about size - scratch
43// in many of these tests.
44template<size_t N>
45static size_t total_size(GrSBlockAllocator<N>& pool) {
46 return pool->totalSize() - pool->testingOnly_scratchBlockSize();
47}
48
Michael Ludwigcd019792020-03-17 10:14:48 -040049template<size_t N>
50static size_t add_block(GrSBlockAllocator<N>& pool) {
Michael Ludwig68e5f292020-07-20 16:17:14 -040051 size_t currentSize = total_size(pool);
52 GrBlockAllocator::Block* current = pool->currentBlock();
53 while(pool->currentBlock() == current) {
Michael Ludwigcd019792020-03-17 10:14:48 -040054 pool->template allocate<4>(pool->preallocSize() / 2);
55 }
Michael Ludwig68e5f292020-07-20 16:17:14 -040056 return total_size(pool) - currentSize;
Michael Ludwigcd019792020-03-17 10:14:48 -040057}
58
59template<size_t N>
60static void* alloc_byte(GrSBlockAllocator<N>& pool) {
61 auto br = pool->template allocate<1>(1);
62 return br.fBlock->ptr(br.fAlignedOffset);
63}
64
65DEF_TEST(GrBlockAllocatorPreallocSize, r) {
66 // Tests stack/member initialization, option #1 described in doc
67 GrBlockAllocator stack{GrowthPolicy::kFixed, 2048};
68 SkDEBUGCODE(stack.validate();)
69
70 REPORTER_ASSERT(r, stack.preallocSize() == sizeof(GrBlockAllocator));
71 REPORTER_ASSERT(r, stack.preallocUsableSpace() == (size_t) stack.currentBlock()->avail());
72
73 // Tests placement new initialization to increase head block size, option #2
74 void* mem = operator new(1024);
75 GrBlockAllocator* placement = new (mem) GrBlockAllocator(GrowthPolicy::kLinear, 1024,
76 1024 - sizeof(GrBlockAllocator));
77 REPORTER_ASSERT(r, placement->preallocSize() == 1024);
78 REPORTER_ASSERT(r, placement->preallocUsableSpace() < 1024 &&
79 placement->preallocUsableSpace() >= (1024 - sizeof(GrBlockAllocator)));
80 delete placement;
81
82 // Tests inline increased preallocation, option #3
83 GrSBlockAllocator<2048> inlined{};
84 SkDEBUGCODE(inlined->validate();)
85 REPORTER_ASSERT(r, inlined->preallocSize() == 2048);
86 REPORTER_ASSERT(r, inlined->preallocUsableSpace() < 2048 &&
87 inlined->preallocUsableSpace() >= (2048 - sizeof(GrBlockAllocator)));
88}
89
90DEF_TEST(GrBlockAllocatorAlloc, r) {
91 GrSBlockAllocator<1024> pool{};
92 SkDEBUGCODE(pool->validate();)
93
94 // Assumes the previous pointer was in the same block
95 auto validate_ptr = [&](int align, int size,
96 GrBlockAllocator::ByteRange br,
97 GrBlockAllocator::ByteRange* prevBR) {
98 uintptr_t pt = reinterpret_cast<uintptr_t>(br.fBlock->ptr(br.fAlignedOffset));
99 // Matches the requested align
100 REPORTER_ASSERT(r, pt % align == 0);
101 // And large enough
102 REPORTER_ASSERT(r, br.fEnd - br.fAlignedOffset >= size);
103 // And has enough padding for alignment
104 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart >= 0);
105 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart <= align - 1);
106 // And block of the returned struct is the current block of the allocator
107 REPORTER_ASSERT(r, pool->currentBlock() == br.fBlock);
108
109 // And make sure that we're past the required end of the previous allocation
110 if (prevBR) {
111 uintptr_t prevEnd =
112 reinterpret_cast<uintptr_t>(prevBR->fBlock->ptr(prevBR->fEnd - 1));
113 REPORTER_ASSERT(r, pt > prevEnd);
114 }
115 };
116
117 auto p1 = pool->allocate<1>(14);
118 validate_ptr(1, 14, p1, nullptr);
119
120 auto p2 = pool->allocate<2>(24);
121 validate_ptr(2, 24, p2, &p1);
122
123 auto p4 = pool->allocate<4>(28);
124 validate_ptr(4, 28, p4, &p2);
125
126 auto p8 = pool->allocate<8>(40);
127 validate_ptr(8, 40, p8, &p4);
128
129 auto p16 = pool->allocate<16>(64);
130 validate_ptr(16, 64, p16, &p8);
131
132 auto p32 = pool->allocate<32>(96);
133 validate_ptr(32, 96, p32, &p16);
134
135 // All of these allocations should be in the head block
Michael Ludwig68e5f292020-07-20 16:17:14 -0400136 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
Michael Ludwigcd019792020-03-17 10:14:48 -0400137 SkDEBUGCODE(pool->validate();)
138
139 // Requesting an allocation of avail() should not make a new block
140 size_t avail = pool->currentBlock()->avail<4>();
141 auto pAvail = pool->allocate<4>(avail);
142 validate_ptr(4, avail, pAvail, &p32);
143
144 // Remaining should be less than the alignment that was requested, and then
145 // the next allocation will make a new block
146 REPORTER_ASSERT(r, pool->currentBlock()->avail<4>() < 4);
147 auto pNextBlock = pool->allocate<4>(4);
148 validate_ptr(4, 4, pNextBlock, nullptr);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400149 REPORTER_ASSERT(r, total_size(pool) > pool->preallocSize());
Michael Ludwigcd019792020-03-17 10:14:48 -0400150
151 // Allocating more than avail() makes an another block
Michael Ludwig68e5f292020-07-20 16:17:14 -0400152 size_t currentSize = total_size(pool);
Michael Ludwigcd019792020-03-17 10:14:48 -0400153 size_t bigRequest = pool->currentBlock()->avail<4>() * 2;
154 auto pTooBig = pool->allocate<4>(bigRequest);
155 validate_ptr(4, bigRequest, pTooBig, nullptr);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400156 REPORTER_ASSERT(r, total_size(pool) > currentSize);
Michael Ludwigcd019792020-03-17 10:14:48 -0400157
158 // Allocating more than the default growth policy (1024 in this case), will fulfill the request
Michael Ludwig68e5f292020-07-20 16:17:14 -0400159 REPORTER_ASSERT(r, total_size(pool) - currentSize < 4096);
160 currentSize = total_size(pool);
Michael Ludwigcd019792020-03-17 10:14:48 -0400161 auto pReallyTooBig = pool->allocate<4>(4096);
162 validate_ptr(4, 4096, pReallyTooBig, nullptr);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400163 REPORTER_ASSERT(r, total_size(pool) >= currentSize + 4096);
Michael Ludwigcd019792020-03-17 10:14:48 -0400164 SkDEBUGCODE(pool->validate();)
165}
166
167DEF_TEST(GrBlockAllocatorResize, r) {
168 GrSBlockAllocator<1024> pool{};
169 SkDEBUGCODE(pool->validate();)
170
171 // Fixed resize from 16 to 32
172 auto p = pool->allocate<4>(16);
173 REPORTER_ASSERT(r, p.fBlock->avail<4>() > 16);
174 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, 16));
175 p.fEnd += 16;
176
177 // Subsequent allocation is 32 bytes ahead of 'p' now, and 'p' cannot be resized further.
178 auto pNext = pool->allocate<4>(16);
179 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
180 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
181 REPORTER_ASSERT(r, p.fBlock == pNext.fBlock);
182 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, 48));
183
184 // Confirm that releasing pNext allows 'p' to be resized, and that it can be resized up to avail
185 REPORTER_ASSERT(r, p.fBlock->release(pNext.fStart, pNext.fEnd));
186 int fillBlock = p.fBlock->avail<4>();
187 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
188 p.fEnd += fillBlock;
189
190 // Confirm that resizing when there's not enough room fails
191 REPORTER_ASSERT(r, p.fBlock->avail<4>() < fillBlock);
192 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
193
194 // Confirm that we can shrink 'p' back to 32 bytes and then further allocate again
195 int shrinkTo32 = p.fStart - p.fEnd + 32;
196 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, shrinkTo32));
197 p.fEnd += shrinkTo32;
198 REPORTER_ASSERT(r, p.fEnd - p.fStart == 32);
199
200 pNext = pool->allocate<4>(16);
201 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
202 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
203 SkDEBUGCODE(pool->validate();)
204
205 // Confirm that we can't shrink past the start of the allocation, but we can shrink it to 0
206 int shrinkTo0 = pNext.fStart - pNext.fEnd;
207#ifndef SK_DEBUG
208 // Only test for false on release builds; a negative size should assert on debug builds
209 REPORTER_ASSERT(r, !pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0 - 1));
210#endif
211 REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
212}
213
214DEF_TEST(GrBlockAllocatorRelease, r) {
215 GrSBlockAllocator<1024> pool{};
216 SkDEBUGCODE(pool->validate();)
217
218 // Successful allocate and release
219 auto p = pool->allocate<8>(32);
220 REPORTER_ASSERT(r, pool->currentBlock()->release(p.fStart, p.fEnd));
221 // Ensure the above release actually means the next allocation reuses the same space
222 auto p2 = pool->allocate<8>(32);
223 REPORTER_ASSERT(r, p.fStart == p2.fStart);
224
225 // Confirm that 'p2' cannot be released if another allocation came after it
226 auto p3 = pool->allocate<8>(64);
227 (void) p3;
228 REPORTER_ASSERT(r, !p2.fBlock->release(p2.fStart, p2.fEnd));
229
230 // Confirm that 'p4' can be released if 'p5' is released first, and confirm that 'p2' and 'p3'
231 // can be released simultaneously (equivalent to 'p3' then 'p2').
232 auto p4 = pool->allocate<8>(16);
233 auto p5 = pool->allocate<8>(96);
234 REPORTER_ASSERT(r, p5.fBlock->release(p5.fStart, p5.fEnd));
235 REPORTER_ASSERT(r, p4.fBlock->release(p4.fStart, p4.fEnd));
236 REPORTER_ASSERT(r, p2.fBlock->release(p2.fStart, p3.fEnd));
237
238 // And confirm that passing in the wrong size for the allocation fails
239 p = pool->allocate<8>(32);
240 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd - 16));
241 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd + 16));
242 REPORTER_ASSERT(r, p.fBlock->release(p.fStart, p.fEnd));
243 SkDEBUGCODE(pool->validate();)
244}
245
246DEF_TEST(GrBlockAllocatorRewind, r) {
247 // Confirm that a bunch of allocations and then releases in stack order fully goes back to the
248 // start of the block (i.e. unwinds the entire stack, and not just the last cursor position)
249 GrSBlockAllocator<1024> pool{};
250 SkDEBUGCODE(pool->validate();)
251
252 std::vector<GrBlockAllocator::ByteRange> ptrs;
253 for (int i = 0; i < 32; ++i) {
254 ptrs.push_back(pool->allocate<4>(16));
255 }
256
257 // Release everything in reverse order
258 SkDEBUGCODE(pool->validate();)
259 for (int i = 31; i >= 0; --i) {
260 auto br = ptrs[i];
261 REPORTER_ASSERT(r, br.fBlock->release(br.fStart, br.fEnd));
262 }
263
264 // If correct, we've rewound all the way back to the start of the block, so a new allocation
265 // will have the same location as ptrs[0]
266 SkDEBUGCODE(pool->validate();)
267 REPORTER_ASSERT(r, pool->allocate<4>(16).fStart == ptrs[0].fStart);
268}
269
270DEF_TEST(GrBlockAllocatorGrowthPolicy, r) {
271 static constexpr int kInitSize = 128;
272 static constexpr int kBlockCount = 5;
273 static constexpr size_t kExpectedSizes[GrBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
274 // kFixed -> kInitSize per block
275 { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
276 // kLinear -> (block ct + 1) * kInitSize for next block
277 { kInitSize, 2 * kInitSize, 3 * kInitSize, 4 * kInitSize, 5 * kInitSize },
278 // kFibonacci -> 1, 1, 2, 3, 5 * kInitSize for the blocks
279 { kInitSize, kInitSize, 2 * kInitSize, 3 * kInitSize, 5 * kInitSize },
280 // kExponential -> 1, 2, 4, 8, 16 * kInitSize for the blocks
281 { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
282 };
283
284 for (int gp = 0; gp < GrBlockAllocator::kGrowthPolicyCount; ++gp) {
285 GrSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
286 SkDEBUGCODE(pool->validate();)
287
Michael Ludwig68e5f292020-07-20 16:17:14 -0400288 REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
Michael Ludwigcd019792020-03-17 10:14:48 -0400289 for (int i = 1; i < kBlockCount; ++i) {
290 REPORTER_ASSERT(r, kExpectedSizes[gp][i] == add_block(pool));
291 }
292
293 SkDEBUGCODE(pool->validate();)
294 }
295}
296
297DEF_TEST(GrBlockAllocatorReset, r) {
298 static constexpr int kBlockIncrement = 1024;
299
300 GrSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
301 SkDEBUGCODE(pool->validate();)
302
303 void* firstAlloc = alloc_byte(pool);
304
305 // Add several blocks
306 add_block(pool);
307 add_block(pool);
308 add_block(pool);
309 SkDEBUGCODE(pool->validate();)
310
311 REPORTER_ASSERT(r, block_count(pool) == 4); // 3 added plus the implicit head
312
313 get_block(pool, 0)->setMetadata(2);
314
315 // Reset and confirm that there's only one block, a new allocation matches 'firstAlloc' again,
316 // and new blocks are sized based on a reset growth policy.
317 pool->reset();
318 SkDEBUGCODE(pool->validate();)
319
320 REPORTER_ASSERT(r,block_count(pool) == 1);
321 REPORTER_ASSERT(r, pool->preallocSize() == pool->totalSize());
322 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0);
323
324 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
325 REPORTER_ASSERT(r, 2 * kBlockIncrement == add_block(pool));
326 REPORTER_ASSERT(r, 3 * kBlockIncrement == add_block(pool));
327 SkDEBUGCODE(pool->validate();)
328}
329
330DEF_TEST(GrBlockAllocatorReleaseBlock, r) {
331 // This loops over all growth policies to make sure that the incremental releases update the
332 // sequence correctly for each policy.
333 for (int gp = 0; gp < GrBlockAllocator::kGrowthPolicyCount; ++gp) {
334 GrSBlockAllocator<1024> pool{(GrowthPolicy) gp};
335 SkDEBUGCODE(pool->validate();)
336
337 void* firstAlloc = alloc_byte(pool);
338
Michael Ludwig68e5f292020-07-20 16:17:14 -0400339 size_t b1Size = total_size(pool);
Michael Ludwigcd019792020-03-17 10:14:48 -0400340 size_t b2Size = add_block(pool);
341 size_t b3Size = add_block(pool);
342 size_t b4Size = add_block(pool);
343 SkDEBUGCODE(pool->validate();)
344
345 get_block(pool, 0)->setMetadata(1);
346 get_block(pool, 1)->setMetadata(2);
347 get_block(pool, 2)->setMetadata(3);
348 get_block(pool, 3)->setMetadata(4);
349
350 // Remove the 3 added blocks, but always remove the i = 1 to test intermediate removal (and
351 // on the last iteration, will test tail removal).
Michael Ludwig68e5f292020-07-20 16:17:14 -0400352 REPORTER_ASSERT(r, total_size(pool) == b1Size + b2Size + b3Size + b4Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400353 pool->releaseBlock(get_block(pool, 1));
354 REPORTER_ASSERT(r, block_count(pool) == 3);
355 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 3);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400356 REPORTER_ASSERT(r, total_size(pool) == b1Size + b3Size + b4Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400357
358 pool->releaseBlock(get_block(pool, 1));
359 REPORTER_ASSERT(r, block_count(pool) == 2);
360 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 4);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400361 REPORTER_ASSERT(r, total_size(pool) == b1Size + b4Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400362
363 pool->releaseBlock(get_block(pool, 1));
364 REPORTER_ASSERT(r, block_count(pool) == 1);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400365 REPORTER_ASSERT(r, total_size(pool) == b1Size);
Michael Ludwigcd019792020-03-17 10:14:48 -0400366
367 // Since we're back to just the head block, if we add a new block, the growth policy should
368 // match the original sequence instead of continuing with "b5Size'"
Michael Ludwig68e5f292020-07-20 16:17:14 -0400369 pool->resetScratchSpace();
Michael Ludwigcd019792020-03-17 10:14:48 -0400370 size_t size = add_block(pool);
371 REPORTER_ASSERT(r, size == b2Size);
372 pool->releaseBlock(get_block(pool, 1));
373
374 // Explicitly release the head block and confirm it's reset
375 pool->releaseBlock(get_block(pool, 0));
Michael Ludwig68e5f292020-07-20 16:17:14 -0400376 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
Michael Ludwigcd019792020-03-17 10:14:48 -0400377 REPORTER_ASSERT(r, block_count(pool) == 1);
378 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
379 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0); // metadata reset too
380
381 // Confirm that if we have > 1 block, but release the head block we can still access the
382 // others
383 add_block(pool);
384 add_block(pool);
385 pool->releaseBlock(get_block(pool, 0));
386 REPORTER_ASSERT(r, block_count(pool) == 3);
387 SkDEBUGCODE(pool->validate();)
388 }
389}
390
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400391DEF_TEST(GrBlockAllocatorIterateAndRelease, r) {
392 GrSBlockAllocator<256> pool;
393
394 pool->headBlock()->setMetadata(1);
395 add_block(pool);
396 add_block(pool);
397 add_block(pool);
398
399 // Loop forward and release the blocks
400 int releaseCount = 0;
401 for (auto* b : pool->blocks()) {
402 pool->releaseBlock(b);
403 releaseCount++;
404 }
405 REPORTER_ASSERT(r, releaseCount == 4);
406 // pool should have just the head block, but was reset
407 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
408 REPORTER_ASSERT(r, block_count(pool) == 1);
409
410 // Add more blocks
411 pool->headBlock()->setMetadata(1);
412 add_block(pool);
413 add_block(pool);
414 add_block(pool);
415
416 // Loop in reverse and release the blocks
417 releaseCount = 0;
418 for (auto* b : pool->rblocks()) {
419 pool->releaseBlock(b);
420 releaseCount++;
421 }
422 REPORTER_ASSERT(r, releaseCount == 4);
423 // pool should have just the head block, but was reset
424 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
425 REPORTER_ASSERT(r, block_count(pool) == 1);
426}
427
Michael Ludwig68e5f292020-07-20 16:17:14 -0400428DEF_TEST(GrBlockAllocatorScratchBlockReserve, r) {
429 GrSBlockAllocator<256> pool;
430
431 size_t added = add_block(pool);
432 REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
433 size_t total = pool->totalSize();
434 pool->releaseBlock(pool->currentBlock());
435
436 // Total size shouldn't have changed, the released block should become scratch
437 REPORTER_ASSERT(r, pool->totalSize() == total);
438 REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == added);
439
440 // But a reset definitely deletes any scratch block
441 pool->reset();
442 REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
443
444 // Reserving more than what's available adds a scratch block, and current block remains avail.
445 size_t avail = pool->currentBlock()->avail();
446 size_t reserve = avail + 1;
447 pool->reserve(reserve);
448 REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
449 // And rounds up to the fixed size of this pool's growth policy
450 REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= reserve &&
451 pool->testingOnly_scratchBlockSize() % 256 == 0);
452
453 // Allocating more than avail activates the scratch block (so totalSize doesn't change)
454 size_t preAllocTotalSize = pool->totalSize();
455 pool->allocate<1>(avail + 1);
456 REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == 0);
457 REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
458
459 // When reserving less than what's still available in the current block, no scratch block is
460 // added.
461 pool->reserve(pool->currentBlock()->avail());
462 REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
463
464 // Unless checking available bytes is disabled
465 pool->reserve(pool->currentBlock()->avail(), GrBlockAllocator::kIgnoreExistingBytes_Flag);
466 REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0);
467
468 // If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
469 // follow the size (which in this case is a fixed 256 bytes per block).
470 pool->resetScratchSpace();
471 pool->reserve(32, GrBlockAllocator::kIgnoreGrowthPolicy_Flag);
472 REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0 &&
473 pool->testingOnly_scratchBlockSize() < 256);
474
475 // When requesting an allocation larger than the current block and the scratch block, a new
476 // block is added, and the scratch block remains scratch.
477 GrBlockAllocator::Block* oldTail = pool->currentBlock();
478 avail = oldTail->avail();
479 size_t scratchAvail = 2 * avail;
480 pool->reserve(scratchAvail);
Leon Scroggins III982fff22020-07-31 14:09:06 -0400481 REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= scratchAvail);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400482
483 // This allocation request is higher than oldTail's available, and the scratch size so we
484 // should add a new block and scratch size should stay the same.
485 scratchAvail = pool->testingOnly_scratchBlockSize();
486 pool->allocate<1>(scratchAvail + 1);
487 REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
488 REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == scratchAvail);
489}
490
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400491DEF_TEST(GrBlockAllocatorStealBlocks, r) {
492 GrSBlockAllocator<256> poolA;
493 GrSBlockAllocator<128> poolB;
494
495 add_block(poolA);
496 add_block(poolA);
497 add_block(poolA);
498
499 add_block(poolB);
500 add_block(poolB);
501
502 char* bAlloc = (char*) alloc_byte(poolB);
503 *bAlloc = 't';
504
505 const GrBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
506
507 REPORTER_ASSERT(r, block_count(poolA) == 4);
508 REPORTER_ASSERT(r, block_count(poolB) == 3);
509
510 size_t aSize = poolA->totalSize();
511 size_t bSize = poolB->totalSize();
512 size_t theftSize = bSize - poolB->preallocSize();
513
514 // This steal should move B's 2 heap blocks to A, bringing A to 6 and B to just its head
515 poolA->stealHeapBlocks(poolB.allocator());
516 REPORTER_ASSERT(r, block_count(poolA) == 6);
517 REPORTER_ASSERT(r, block_count(poolB) == 1);
518 REPORTER_ASSERT(r, poolB->preallocSize() == poolB->totalSize());
519 REPORTER_ASSERT(r, poolA->totalSize() == aSize + theftSize);
520
521 REPORTER_ASSERT(r, *bAlloc == 't');
522 REPORTER_ASSERT(r, (uintptr_t) poolA->findOwningBlock(bAlloc) == (uintptr_t) allocOwner);
523 REPORTER_ASSERT(r, !poolB->findOwningBlock(bAlloc));
524
525 // Redoing the steal now that B is just a head block should be a no-op
526 poolA->stealHeapBlocks(poolB.allocator());
527 REPORTER_ASSERT(r, block_count(poolA) == 6);
528 REPORTER_ASSERT(r, block_count(poolB) == 1);
529}
530
Michael Ludwigcd019792020-03-17 10:14:48 -0400531// These tests ensure that the allocation padding mechanism works as intended
532struct TestMeta {
533 int fX1;
534 int fX2;
535};
536struct alignas(32) TestMetaBig {
537 int fX1;
538 int fX2;
539};
540
541DEF_TEST(GrBlockAllocatorMetadata, r) {
542 GrSBlockAllocator<1024> pool{};
543 SkDEBUGCODE(pool->validate();)
544
545 // Allocation where alignment of user data > alignment of metadata
546 SkASSERT(alignof(TestMeta) < 16);
547 auto p1 = pool->allocate<16, sizeof(TestMeta)>(16);
548 SkDEBUGCODE(pool->validate();)
549
550 REPORTER_ASSERT(r, p1.fAlignedOffset - p1.fStart >= (int) sizeof(TestMeta));
551 TestMeta* meta = static_cast<TestMeta*>(p1.fBlock->ptr(p1.fAlignedOffset - sizeof(TestMeta)));
552 // Confirm alignment for both pointers
553 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(meta) % alignof(TestMeta) == 0);
554 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p1.fBlock->ptr(p1.fAlignedOffset)) % 16 == 0);
555 // Access fields to make sure 'meta' matches compilers expectations...
556 meta->fX1 = 2;
557 meta->fX2 = 5;
558
559 // Repeat, but for metadata that has a larger alignment than the allocation
560 SkASSERT(alignof(TestMetaBig) == 32);
561 auto p2 = pool->allocate<alignof(TestMetaBig), sizeof(TestMetaBig)>(16);
562 SkDEBUGCODE(pool->validate();)
563
564 REPORTER_ASSERT(r, p2.fAlignedOffset - p2.fStart >= (int) sizeof(TestMetaBig));
565 TestMetaBig* metaBig = static_cast<TestMetaBig*>(
566 p2.fBlock->ptr(p2.fAlignedOffset - sizeof(TestMetaBig)));
567 // Confirm alignment for both pointers
568 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(metaBig) % alignof(TestMetaBig) == 0);
569 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p2.fBlock->ptr(p2.fAlignedOffset)) % 16 == 0);
570 // Access fields
571 metaBig->fX1 = 3;
572 metaBig->fX2 = 6;
573
574 // Ensure metadata values persist after allocations
575 REPORTER_ASSERT(r, meta->fX1 == 2 && meta->fX2 == 5);
576 REPORTER_ASSERT(r, metaBig->fX1 == 3 && metaBig->fX2 == 6);
577}
578
Michael Ludwigc97ebe02020-07-17 08:39:46 -0400579DEF_TEST(GrBlockAllocatorAllocatorMetadata, r) {
580 GrSBlockAllocator<256> pool{};
581 SkDEBUGCODE(pool->validate();)
582
583 REPORTER_ASSERT(r, pool->metadata() == 0); // initial value
584
585 pool->setMetadata(4);
586 REPORTER_ASSERT(r, pool->metadata() == 4);
587
588 // Releasing the head block doesn't change the allocator's metadata (even though that's where
589 // it is stored).
590 pool->releaseBlock(pool->headBlock());
591 REPORTER_ASSERT(r, pool->metadata() == 4);
592
593 // But resetting the whole allocator brings things back to as if it were newly constructed
594 pool->reset();
595 REPORTER_ASSERT(r, pool->metadata() == 0);
596}
597
Michael Ludwigcd019792020-03-17 10:14:48 -0400598template<size_t Align, size_t Padding>
599static void run_owning_block_test(skiatest::Reporter* r, GrBlockAllocator* pool) {
600 auto br = pool->allocate<Align, Padding>(1);
601
602 void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
603 void* metaPtr = br.fBlock->ptr(br.fAlignedOffset - Padding);
604
605 Block* block = pool->owningBlock<Align, Padding>(userPtr, br.fStart);
606 REPORTER_ASSERT(r, block == br.fBlock);
607
608 block = pool->owningBlock<Align>(metaPtr, br.fStart);
609 REPORTER_ASSERT(r, block == br.fBlock);
610
611 block = reinterpret_cast<Block*>(reinterpret_cast<uintptr_t>(userPtr) - br.fAlignedOffset);
612 REPORTER_ASSERT(r, block == br.fBlock);
613}
614
615template<size_t Padding>
616static void run_owning_block_tests(skiatest::Reporter* r, GrBlockAllocator* pool) {
617 run_owning_block_test<1, Padding>(r, pool);
618 run_owning_block_test<2, Padding>(r, pool);
619 run_owning_block_test<4, Padding>(r, pool);
620 run_owning_block_test<8, Padding>(r, pool);
621 run_owning_block_test<16, Padding>(r, pool);
622 run_owning_block_test<32, Padding>(r, pool);
623 run_owning_block_test<64, Padding>(r, pool);
624 run_owning_block_test<128, Padding>(r, pool);
625}
626
627DEF_TEST(GrBlockAllocatorOwningBlock, r) {
628 GrSBlockAllocator<1024> pool{};
629 SkDEBUGCODE(pool->validate();)
630
631 run_owning_block_tests<1>(r, pool.allocator());
632 run_owning_block_tests<2>(r, pool.allocator());
633 run_owning_block_tests<4>(r, pool.allocator());
634 run_owning_block_tests<8>(r, pool.allocator());
635 run_owning_block_tests<16>(r, pool.allocator());
636 run_owning_block_tests<32>(r, pool.allocator());
637
638 // And some weird numbers
639 run_owning_block_tests<3>(r, pool.allocator());
640 run_owning_block_tests<9>(r, pool.allocator());
641 run_owning_block_tests<17>(r, pool.allocator());
642}