blob: 18c854da0351d1890283f877b9ff72e10c05fdd3 [file] [log] [blame]
Michael Ludwigcd019792020-03-17 10:14:48 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrBlockAllocator.h"
9
10#ifdef SK_DEBUG
11#include <vector>
12#endif
13
14GrBlockAllocator::GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
15 size_t additionalPreallocBytes)
16 : fTail(&fHead)
17 // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
18 // can effectively fit higher byte counts in its 16 bits of storage
Michael Ludwigad4760a2020-07-17 15:53:22 -040019 , fBlockIncrement(SkTo<uint16_t>(GrAlignTo(blockIncrementBytes, kAddressAlign)
20 / kAddressAlign))
Michael Ludwigcd019792020-03-17 10:14:48 -040021 , fGrowthPolicy(static_cast<uint64_t>(policy))
22 , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
23 , fN1(1)
Michael Ludwiga291b372020-07-16 14:20:49 -040024 // The head block always fills remaining space from GrBlockAllocator's size, because it's
Michael Ludwigcd019792020-03-17 10:14:48 -040025 // inline, but can take over the specified number of bytes immediately after it.
26 , fHead(nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
27 SkASSERT(fBlockIncrement >= 1);
28 SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
29}
30
31GrBlockAllocator::Block::Block(Block* prev, int allocationSize)
32 : fNext(nullptr)
33 , fPrev(prev)
34 , fSize(allocationSize)
35 , fCursor(kDataStart)
Michael Ludwigc97ebe02020-07-17 08:39:46 -040036 , fMetadata(0)
37 , fAllocatorMetadata(0) {
Michael Ludwigcd019792020-03-17 10:14:48 -040038 SkASSERT(allocationSize >= (int) sizeof(Block));
39 SkDEBUGCODE(fSentinel = kAssignedMarker;)
40}
41
42GrBlockAllocator::Block::~Block() {
43 SkASSERT(fSentinel == kAssignedMarker);
44 SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
45}
46
47size_t GrBlockAllocator::totalSize() const {
48 // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
49 size_t size = offsetof(GrBlockAllocator, fHead);
50 for (const Block* b : this->blocks()) {
51 size += b->fSize;
52 }
53 SkASSERT(size >= this->preallocSize());
54 return size;
55}
56
57size_t GrBlockAllocator::totalUsableSpace() const {
58 size_t size = 0;
59 for (const Block* b : this->blocks()) {
60 size += (b->fSize - kDataStart);
61 }
62 SkASSERT(size >= this->preallocUsableSpace());
63 return size;
64}
65
66size_t GrBlockAllocator::totalSpaceInUse() const {
67 size_t size = 0;
68 for (const Block* b : this->blocks()) {
69 size += (b->fCursor - kDataStart);
70 }
71 SkASSERT(size <= this->totalUsableSpace());
72 return size;
73}
74
75GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
76 // When in doubt, search in reverse to find an overlapping block.
77 uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
78 for (Block* b : this->rblocks()) {
79 uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
80 uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
81 if (lowerBound <= ptr && ptr < upperBound) {
82 SkASSERT(b->fSentinel == kAssignedMarker);
83 return b;
84 }
85 }
86 return nullptr;
87}
88
89void GrBlockAllocator::releaseBlock(Block* block) {
90 if (block->fPrev) {
91 // Unlink block from the double-linked list of blocks
92 SkASSERT(block != &fHead);
93 block->fPrev->fNext = block->fNext;
94 if (block->fNext) {
95 SkASSERT(fTail != block);
96 block->fNext->fPrev = block->fPrev;
97 } else {
98 SkASSERT(fTail == block);
99 fTail = block->fPrev;
100 }
101
102 delete block;
103 } else {
104 // Reset the cursor of the head block so that it can be reused
105 SkASSERT(block == &fHead);
106 block->fCursor = kDataStart;
107 block->fMetadata = 0;
108 // Unlike in reset(), we don't set the head's next block to null because there are
109 // potentially heap-allocated blocks that are still connected to it.
110 }
111
112 // Decrement growth policy (opposite of addBlock()'s increment operations)
113 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
114 if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
115 SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
116 if (gp == GrowthPolicy::kLinear) {
117 fN1 = fN1 - fN0;
118 } else if (gp == GrowthPolicy::kFibonacci) {
119 // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
120 int temp = fN1 - fN0; // yields prior fN0
121 fN1 = fN1 - temp; // yields prior fN1
122 fN0 = temp;
123 } else {
124 SkASSERT(gp == GrowthPolicy::kExponential);
125 // Divide by 2 to undo the 2N update from addBlock
126 fN1 = fN1 >> 1;
127 fN0 = fN1;
128 }
129 }
130
131 SkASSERT(fN1 >= 1 && fN0 >= 0);
132}
133
134void GrBlockAllocator::reset() {
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400135 for (Block* b : this->rblocks()) {
136 if (b == &fHead) {
137 // Reset metadata and cursor, tail points to the head block again
138 fTail = b;
139 b->fNext = nullptr;
140 b->fCursor = kDataStart;
141 b->fMetadata = 0;
Michael Ludwigc97ebe02020-07-17 08:39:46 -0400142
143 // For reset(), but NOT releaseBlock(), the head allocatorMetadata resets too
144 b->fAllocatorMetadata = 0;
Michael Ludwigcd019792020-03-17 10:14:48 -0400145 } else {
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400146 delete b;
Michael Ludwigcd019792020-03-17 10:14:48 -0400147 }
Michael Ludwigcd019792020-03-17 10:14:48 -0400148 }
Michael Ludwig0e15cfb2020-07-15 09:09:40 -0400149 SkASSERT(fTail == &fHead && fHead.fNext == nullptr &&
150 fHead.metadata() == 0 && fHead.fCursor == kDataStart);
Michael Ludwigcd019792020-03-17 10:14:48 -0400151
152 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
153 fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
154 fN1 = 1;
155}
156
157void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
158 SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize);
159
160 // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
161 static constexpr int kMaxN = (1 << 23) - 1;
162 static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
163
164 // Calculate the 'next' size per growth policy sequence
165 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
166 int nextN1 = fN0 + fN1;
167 int nextN0;
168 if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
169 nextN0 = fN0;
170 } else if (gp == GrowthPolicy::kFibonacci) {
171 nextN0 = fN1;
172 } else {
173 SkASSERT(gp == GrowthPolicy::kExponential);
174 nextN0 = nextN1;
175 }
176 fN0 = std::min(kMaxN, nextN0);
177 fN1 = std::min(kMaxN, nextN1);
178
179 // However, must guard against overflow here, since all the size-based asserts prevented
180 // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
Michael Ludwigad4760a2020-07-17 15:53:22 -0400181 int sizeIncrement = fBlockIncrement * kAddressAlign;
Michael Ludwigcd019792020-03-17 10:14:48 -0400182 int allocSize;
183 if (maxSize / sizeIncrement < nextN1) {
184 // The growth policy would overflow, so use the max. We've already confirmed that maxSize
185 // will be sufficient for the requested minimumSize
186 allocSize = maxSize;
187 } else {
188 allocSize = std::max(minimumSize, sizeIncrement * nextN1);
189 // Then round to a nice boundary since the block isn't maxing out:
190 // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
191 // nicely with jeMalloc (from SkArenaAlloc).
Michael Ludwigad4760a2020-07-17 15:53:22 -0400192 int mask = allocSize > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
Michael Ludwigcd019792020-03-17 10:14:48 -0400193 allocSize = std::min((allocSize + mask) & ~mask, maxSize);
194 }
195
196 // Create new block and append to the linked list of blocks in this allocator
197 void* mem = operator new(allocSize);
198 fTail->fNext = new (mem) Block(fTail, allocSize);
199 fTail = fTail->fNext;
200}
201
202#ifdef SK_DEBUG
203void GrBlockAllocator::validate() const {
204 std::vector<const Block*> blocks;
205 const Block* prev = nullptr;
206 for (const Block* block : this->blocks()) {
207 blocks.push_back(block);
208
209 SkASSERT(kAssignedMarker == block->fSentinel);
210 SkASSERT(prev == block->fPrev);
211 if (prev) {
212 SkASSERT(prev->fNext == block);
213 }
214
215 SkASSERT(block->fSize >= (int) sizeof(Block));
216 SkASSERT(block->fCursor >= kDataStart);
217 SkASSERT(block->fCursor <= block->fSize);
218
219 prev = block;
220 }
221 SkASSERT(prev == fTail);
222 SkASSERT(blocks.size() > 0);
223 SkASSERT(blocks[0] == &fHead);
224
225 // Confirm reverse iteration matches forward iteration
226 size_t j = blocks.size();
227 for (const Block* b : this->rblocks()) {
228 SkASSERT(b == blocks[j - 1]);
229 j--;
230 }
231 SkASSERT(j == 0);
232}
233#endif