blob: bac879db750b351ea36ac9e5fbcdf3c04d78a199 [file] [log] [blame]
Michael Ludwigcd019792020-03-17 10:14:48 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrBlockAllocator.h"
9
10#ifdef SK_DEBUG
11#include <vector>
12#endif
13
14GrBlockAllocator::GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
15 size_t additionalPreallocBytes)
16 : fTail(&fHead)
17 // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
18 // can effectively fit higher byte counts in its 16 bits of storage
Michael Ludwigad4760a2020-07-17 15:53:22 -040019 , fBlockIncrement(SkTo<uint16_t>(GrAlignTo(blockIncrementBytes, kAddressAlign)
20 / kAddressAlign))
Michael Ludwigcd019792020-03-17 10:14:48 -040021 , fGrowthPolicy(static_cast<uint64_t>(policy))
22 , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
23 , fN1(1)
Michael Ludwiga291b372020-07-16 14:20:49 -040024 // The head block always fills remaining space from GrBlockAllocator's size, because it's
Michael Ludwigcd019792020-03-17 10:14:48 -040025 // inline, but can take over the specified number of bytes immediately after it.
John Stiles351b8d82020-11-03 13:51:07 -050026 , fHead(/*prev=*/nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
Michael Ludwigcd019792020-03-17 10:14:48 -040027 SkASSERT(fBlockIncrement >= 1);
28 SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
29}
30
31GrBlockAllocator::Block::Block(Block* prev, int allocationSize)
32 : fNext(nullptr)
33 , fPrev(prev)
34 , fSize(allocationSize)
35 , fCursor(kDataStart)
Michael Ludwigc97ebe02020-07-17 08:39:46 -040036 , fMetadata(0)
37 , fAllocatorMetadata(0) {
Michael Ludwigcd019792020-03-17 10:14:48 -040038 SkASSERT(allocationSize >= (int) sizeof(Block));
39 SkDEBUGCODE(fSentinel = kAssignedMarker;)
John Stiles351b8d82020-11-03 13:51:07 -050040
41 this->poisonRange(kDataStart, fSize);
Michael Ludwigcd019792020-03-17 10:14:48 -040042}
43
44GrBlockAllocator::Block::~Block() {
John Stiles351b8d82020-11-03 13:51:07 -050045 this->unpoisonRange(kDataStart, fSize);
46
Michael Ludwigcd019792020-03-17 10:14:48 -040047 SkASSERT(fSentinel == kAssignedMarker);
48 SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
49}
50
51size_t GrBlockAllocator::totalSize() const {
52 // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
Michael Ludwig68e5f292020-07-20 16:17:14 -040053 size_t size = offsetof(GrBlockAllocator, fHead) + this->scratchBlockSize();
Michael Ludwigcd019792020-03-17 10:14:48 -040054 for (const Block* b : this->blocks()) {
55 size += b->fSize;
56 }
57 SkASSERT(size >= this->preallocSize());
58 return size;
59}
60
61size_t GrBlockAllocator::totalUsableSpace() const {
Michael Ludwig68e5f292020-07-20 16:17:14 -040062 size_t size = this->scratchBlockSize();
63 if (size > 0) {
64 size -= kDataStart; // scratchBlockSize reports total block size, not usable size
65 }
Michael Ludwigcd019792020-03-17 10:14:48 -040066 for (const Block* b : this->blocks()) {
67 size += (b->fSize - kDataStart);
68 }
69 SkASSERT(size >= this->preallocUsableSpace());
70 return size;
71}
72
73size_t GrBlockAllocator::totalSpaceInUse() const {
74 size_t size = 0;
75 for (const Block* b : this->blocks()) {
76 size += (b->fCursor - kDataStart);
77 }
78 SkASSERT(size <= this->totalUsableSpace());
79 return size;
80}
81
82GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
83 // When in doubt, search in reverse to find an overlapping block.
84 uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
85 for (Block* b : this->rblocks()) {
86 uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
87 uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
88 if (lowerBound <= ptr && ptr < upperBound) {
89 SkASSERT(b->fSentinel == kAssignedMarker);
90 return b;
91 }
92 }
93 return nullptr;
94}
95
96void GrBlockAllocator::releaseBlock(Block* block) {
Michael Ludwig68e5f292020-07-20 16:17:14 -040097 if (block == &fHead) {
98 // Reset the cursor of the head block so that it can be reused if it becomes the new tail
99 block->fCursor = kDataStart;
100 block->fMetadata = 0;
John Stiles351b8d82020-11-03 13:51:07 -0500101 block->poisonRange(kDataStart, block->fSize);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400102 // Unlike in reset(), we don't set the head's next block to null because there are
103 // potentially heap-allocated blocks that are still connected to it.
104 } else {
105 SkASSERT(block->fPrev);
Michael Ludwigcd019792020-03-17 10:14:48 -0400106 block->fPrev->fNext = block->fNext;
107 if (block->fNext) {
108 SkASSERT(fTail != block);
109 block->fNext->fPrev = block->fPrev;
110 } else {
111 SkASSERT(fTail == block);
112 fTail = block->fPrev;
113 }
114
Michael Ludwig68e5f292020-07-20 16:17:14 -0400115 // The released block becomes the new scratch block (if it's bigger), or delete it
116 if (this->scratchBlockSize() < block->fSize) {
Leon Scroggins III982fff22020-07-31 14:09:06 -0400117 SkASSERT(block != fHead.fPrev); // shouldn't already be the scratch block
Michael Ludwig68e5f292020-07-20 16:17:14 -0400118 if (fHead.fPrev) {
119 delete fHead.fPrev;
120 }
121 block->markAsScratch();
122 fHead.fPrev = block;
123 } else {
124 delete block;
125 }
Michael Ludwigcd019792020-03-17 10:14:48 -0400126 }
127
128 // Decrement growth policy (opposite of addBlock()'s increment operations)
129 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
130 if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
131 SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
132 if (gp == GrowthPolicy::kLinear) {
133 fN1 = fN1 - fN0;
134 } else if (gp == GrowthPolicy::kFibonacci) {
135 // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
136 int temp = fN1 - fN0; // yields prior fN0
137 fN1 = fN1 - temp; // yields prior fN1
138 fN0 = temp;
139 } else {
140 SkASSERT(gp == GrowthPolicy::kExponential);
141 // Divide by 2 to undo the 2N update from addBlock
142 fN1 = fN1 >> 1;
143 fN0 = fN1;
144 }
145 }
146
147 SkASSERT(fN1 >= 1 && fN0 >= 0);
148}
149
Michael Ludwig1d4c08f2020-07-21 13:04:42 -0400150void GrBlockAllocator::stealHeapBlocks(GrBlockAllocator* other) {
151 Block* toSteal = other->fHead.fNext;
152 if (toSteal) {
153 // The other's next block connects back to this allocator's current tail, and its new tail
154 // becomes the end of other's block linked list.
155 SkASSERT(other->fTail != &other->fHead);
156 toSteal->fPrev = fTail;
157 fTail->fNext = toSteal;
158 fTail = other->fTail;
159 // The other allocator becomes just its inline head block
160 other->fTail = &other->fHead;
161 other->fHead.fNext = nullptr;
162 } // else no block to steal
163}
164
Michael Ludwigcd019792020-03-17 10:14:48 -0400165void GrBlockAllocator::reset() {
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400166 for (Block* b : this->rblocks()) {
167 if (b == &fHead) {
168 // Reset metadata and cursor, tail points to the head block again
169 fTail = b;
170 b->fNext = nullptr;
171 b->fCursor = kDataStart;
172 b->fMetadata = 0;
Michael Ludwig68e5f292020-07-20 16:17:14 -0400173 // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
174 // are reset/destroyed.
Michael Ludwigc97ebe02020-07-17 08:39:46 -0400175 b->fAllocatorMetadata = 0;
John Stiles351b8d82020-11-03 13:51:07 -0500176 b->poisonRange(kDataStart, b->fSize);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400177 this->resetScratchSpace();
Michael Ludwigcd019792020-03-17 10:14:48 -0400178 } else {
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400179 delete b;
Michael Ludwigcd019792020-03-17 10:14:48 -0400180 }
Michael Ludwigcd019792020-03-17 10:14:48 -0400181 }
Michael Ludwig68e5f292020-07-20 16:17:14 -0400182 SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr &&
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400183 fHead.metadata() == 0 && fHead.fCursor == kDataStart);
Michael Ludwigcd019792020-03-17 10:14:48 -0400184
185 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
186 fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
187 fN1 = 1;
188}
189
Michael Ludwig68e5f292020-07-20 16:17:14 -0400190void GrBlockAllocator::resetScratchSpace() {
191 if (fHead.fPrev) {
192 delete fHead.fPrev;
193 fHead.fPrev = nullptr;
194 }
195}
196
Michael Ludwigcd019792020-03-17 10:14:48 -0400197void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
198 SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize);
199
200 // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
201 static constexpr int kMaxN = (1 << 23) - 1;
202 static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
203
Michael Ludwig68e5f292020-07-20 16:17:14 -0400204 auto alignAllocSize = [](int size) {
205 // Round to a nice boundary since the block isn't maxing out:
Michael Ludwigcd019792020-03-17 10:14:48 -0400206 // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
207 // nicely with jeMalloc (from SkArenaAlloc).
Michael Ludwig68e5f292020-07-20 16:17:14 -0400208 int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
209 return (size + mask) & ~mask;
210 };
211
212 int allocSize;
213 void* mem = nullptr;
214 if (this->scratchBlockSize() >= minimumSize) {
215 // Activate the scratch block instead of making a new block
216 SkASSERT(fHead.fPrev->isScratch());
217 allocSize = fHead.fPrev->fSize;
218 mem = fHead.fPrev;
219 fHead.fPrev = nullptr;
220 } else if (minimumSize < maxSize) {
221 // Calculate the 'next' size per growth policy sequence
222 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
223 int nextN1 = fN0 + fN1;
224 int nextN0;
225 if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
226 nextN0 = fN0;
227 } else if (gp == GrowthPolicy::kFibonacci) {
228 nextN0 = fN1;
229 } else {
230 SkASSERT(gp == GrowthPolicy::kExponential);
231 nextN0 = nextN1;
232 }
233 fN0 = std::min(kMaxN, nextN0);
234 fN1 = std::min(kMaxN, nextN1);
235
236 // However, must guard against overflow here, since all the size-based asserts prevented
237 // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
238 int sizeIncrement = fBlockIncrement * kAddressAlign;
239 if (maxSize / sizeIncrement < nextN1) {
240 // The growth policy would overflow, so use the max. We've already confirmed that
241 // maxSize will be sufficient for the requested minimumSize
242 allocSize = maxSize;
243 } else {
244 allocSize = std::min(alignAllocSize(std::max(minimumSize, sizeIncrement * nextN1)),
245 maxSize);
246 }
247 } else {
248 SkASSERT(minimumSize == maxSize);
249 // Still align on a nice boundary, no max clamping since that would just undo the alignment
250 allocSize = alignAllocSize(minimumSize);
Michael Ludwigcd019792020-03-17 10:14:48 -0400251 }
252
253 // Create new block and append to the linked list of blocks in this allocator
Michael Ludwig68e5f292020-07-20 16:17:14 -0400254 if (!mem) {
255 mem = operator new(allocSize);
256 }
Michael Ludwigcd019792020-03-17 10:14:48 -0400257 fTail->fNext = new (mem) Block(fTail, allocSize);
258 fTail = fTail->fNext;
259}
260
261#ifdef SK_DEBUG
262void GrBlockAllocator::validate() const {
263 std::vector<const Block*> blocks;
264 const Block* prev = nullptr;
265 for (const Block* block : this->blocks()) {
266 blocks.push_back(block);
267
268 SkASSERT(kAssignedMarker == block->fSentinel);
Michael Ludwig68e5f292020-07-20 16:17:14 -0400269 if (block == &fHead) {
270 // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
271 // considered part of the linked list
272 SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch()));
273 } else {
274 SkASSERT(prev == block->fPrev);
275 }
Michael Ludwigcd019792020-03-17 10:14:48 -0400276 if (prev) {
277 SkASSERT(prev->fNext == block);
278 }
279
280 SkASSERT(block->fSize >= (int) sizeof(Block));
281 SkASSERT(block->fCursor >= kDataStart);
282 SkASSERT(block->fCursor <= block->fSize);
283
284 prev = block;
285 }
286 SkASSERT(prev == fTail);
287 SkASSERT(blocks.size() > 0);
288 SkASSERT(blocks[0] == &fHead);
289
290 // Confirm reverse iteration matches forward iteration
291 size_t j = blocks.size();
292 for (const Block* b : this->rblocks()) {
293 SkASSERT(b == blocks[j - 1]);
294 j--;
295 }
296 SkASSERT(j == 0);
297}
298#endif