Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2020 Google LLC |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "src/gpu/GrBlockAllocator.h" |
| 9 | |
| 10 | #ifdef SK_DEBUG |
| 11 | #include <vector> |
| 12 | #endif |
| 13 | |
| 14 | GrBlockAllocator::GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes, |
| 15 | size_t additionalPreallocBytes) |
| 16 | : fTail(&fHead) |
| 17 | // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement |
| 18 | // can effectively fit higher byte counts in its 16 bits of storage |
Michael Ludwig | ad4760a | 2020-07-17 15:53:22 -0400 | [diff] [blame] | 19 | , fBlockIncrement(SkTo<uint16_t>(GrAlignTo(blockIncrementBytes, kAddressAlign) |
| 20 | / kAddressAlign)) |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 21 | , fGrowthPolicy(static_cast<uint64_t>(policy)) |
| 22 | , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0) |
| 23 | , fN1(1) |
Michael Ludwig | a291b37 | 2020-07-16 14:20:49 -0400 | [diff] [blame] | 24 | // The head block always fills remaining space from GrBlockAllocator's size, because it's |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 25 | // inline, but can take over the specified number of bytes immediately after it. |
| 26 | , fHead(nullptr, additionalPreallocBytes + BaseHeadBlockSize()) { |
| 27 | SkASSERT(fBlockIncrement >= 1); |
| 28 | SkASSERT(additionalPreallocBytes <= kMaxAllocationSize); |
| 29 | } |
| 30 | |
| 31 | GrBlockAllocator::Block::Block(Block* prev, int allocationSize) |
| 32 | : fNext(nullptr) |
| 33 | , fPrev(prev) |
| 34 | , fSize(allocationSize) |
| 35 | , fCursor(kDataStart) |
Michael Ludwig | c97ebe0 | 2020-07-17 08:39:46 -0400 | [diff] [blame] | 36 | , fMetadata(0) |
| 37 | , fAllocatorMetadata(0) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 38 | SkASSERT(allocationSize >= (int) sizeof(Block)); |
| 39 | SkDEBUGCODE(fSentinel = kAssignedMarker;) |
| 40 | } |
| 41 | |
| 42 | GrBlockAllocator::Block::~Block() { |
| 43 | SkASSERT(fSentinel == kAssignedMarker); |
| 44 | SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW |
| 45 | } |
| 46 | |
| 47 | size_t GrBlockAllocator::totalSize() const { |
| 48 | // Use size_t since the sum across all blocks could exceed 'int', even though each block won't |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 49 | size_t size = offsetof(GrBlockAllocator, fHead) + this->scratchBlockSize(); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 50 | for (const Block* b : this->blocks()) { |
| 51 | size += b->fSize; |
| 52 | } |
| 53 | SkASSERT(size >= this->preallocSize()); |
| 54 | return size; |
| 55 | } |
| 56 | |
| 57 | size_t GrBlockAllocator::totalUsableSpace() const { |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 58 | size_t size = this->scratchBlockSize(); |
| 59 | if (size > 0) { |
| 60 | size -= kDataStart; // scratchBlockSize reports total block size, not usable size |
| 61 | } |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 62 | for (const Block* b : this->blocks()) { |
| 63 | size += (b->fSize - kDataStart); |
| 64 | } |
| 65 | SkASSERT(size >= this->preallocUsableSpace()); |
| 66 | return size; |
| 67 | } |
| 68 | |
| 69 | size_t GrBlockAllocator::totalSpaceInUse() const { |
| 70 | size_t size = 0; |
| 71 | for (const Block* b : this->blocks()) { |
| 72 | size += (b->fCursor - kDataStart); |
| 73 | } |
| 74 | SkASSERT(size <= this->totalUsableSpace()); |
| 75 | return size; |
| 76 | } |
| 77 | |
| 78 | GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) { |
| 79 | // When in doubt, search in reverse to find an overlapping block. |
| 80 | uintptr_t ptr = reinterpret_cast<uintptr_t>(p); |
| 81 | for (Block* b : this->rblocks()) { |
| 82 | uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart; |
| 83 | uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize; |
| 84 | if (lowerBound <= ptr && ptr < upperBound) { |
| 85 | SkASSERT(b->fSentinel == kAssignedMarker); |
| 86 | return b; |
| 87 | } |
| 88 | } |
| 89 | return nullptr; |
| 90 | } |
| 91 | |
| 92 | void GrBlockAllocator::releaseBlock(Block* block) { |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 93 | if (block == &fHead) { |
| 94 | // Reset the cursor of the head block so that it can be reused if it becomes the new tail |
| 95 | block->fCursor = kDataStart; |
| 96 | block->fMetadata = 0; |
| 97 | // Unlike in reset(), we don't set the head's next block to null because there are |
| 98 | // potentially heap-allocated blocks that are still connected to it. |
| 99 | } else { |
| 100 | SkASSERT(block->fPrev); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 101 | block->fPrev->fNext = block->fNext; |
| 102 | if (block->fNext) { |
| 103 | SkASSERT(fTail != block); |
| 104 | block->fNext->fPrev = block->fPrev; |
| 105 | } else { |
| 106 | SkASSERT(fTail == block); |
| 107 | fTail = block->fPrev; |
| 108 | } |
| 109 | |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 110 | // The released block becomes the new scratch block (if it's bigger), or delete it |
| 111 | if (this->scratchBlockSize() < block->fSize) { |
Leon Scroggins III | 982fff2 | 2020-07-31 14:09:06 -0400 | [diff] [blame] | 112 | SkASSERT(block != fHead.fPrev); // shouldn't already be the scratch block |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 113 | if (fHead.fPrev) { |
| 114 | delete fHead.fPrev; |
| 115 | } |
| 116 | block->markAsScratch(); |
| 117 | fHead.fPrev = block; |
| 118 | } else { |
| 119 | delete block; |
| 120 | } |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | // Decrement growth policy (opposite of addBlock()'s increment operations) |
| 124 | GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy); |
| 125 | if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) { |
| 126 | SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0 |
| 127 | if (gp == GrowthPolicy::kLinear) { |
| 128 | fN1 = fN1 - fN0; |
| 129 | } else if (gp == GrowthPolicy::kFibonacci) { |
| 130 | // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence |
| 131 | int temp = fN1 - fN0; // yields prior fN0 |
| 132 | fN1 = fN1 - temp; // yields prior fN1 |
| 133 | fN0 = temp; |
| 134 | } else { |
| 135 | SkASSERT(gp == GrowthPolicy::kExponential); |
| 136 | // Divide by 2 to undo the 2N update from addBlock |
| 137 | fN1 = fN1 >> 1; |
| 138 | fN0 = fN1; |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | SkASSERT(fN1 >= 1 && fN0 >= 0); |
| 143 | } |
| 144 | |
Michael Ludwig | 1d4c08f | 2020-07-21 13:04:42 -0400 | [diff] [blame] | 145 | void GrBlockAllocator::stealHeapBlocks(GrBlockAllocator* other) { |
| 146 | Block* toSteal = other->fHead.fNext; |
| 147 | if (toSteal) { |
| 148 | // The other's next block connects back to this allocator's current tail, and its new tail |
| 149 | // becomes the end of other's block linked list. |
| 150 | SkASSERT(other->fTail != &other->fHead); |
| 151 | toSteal->fPrev = fTail; |
| 152 | fTail->fNext = toSteal; |
| 153 | fTail = other->fTail; |
| 154 | // The other allocator becomes just its inline head block |
| 155 | other->fTail = &other->fHead; |
| 156 | other->fHead.fNext = nullptr; |
| 157 | } // else no block to steal |
| 158 | } |
| 159 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 160 | void GrBlockAllocator::reset() { |
Michael Ludwig | 0e15cfb | 2020-07-15 09:09:40 -0400 | [diff] [blame] | 161 | for (Block* b : this->rblocks()) { |
| 162 | if (b == &fHead) { |
| 163 | // Reset metadata and cursor, tail points to the head block again |
| 164 | fTail = b; |
| 165 | b->fNext = nullptr; |
| 166 | b->fCursor = kDataStart; |
| 167 | b->fMetadata = 0; |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 168 | // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block |
| 169 | // are reset/destroyed. |
Michael Ludwig | c97ebe0 | 2020-07-17 08:39:46 -0400 | [diff] [blame] | 170 | b->fAllocatorMetadata = 0; |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 171 | this->resetScratchSpace(); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 172 | } else { |
Michael Ludwig | 0e15cfb | 2020-07-15 09:09:40 -0400 | [diff] [blame] | 173 | delete b; |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 174 | } |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 175 | } |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 176 | SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr && |
Michael Ludwig | 0e15cfb | 2020-07-15 09:09:40 -0400 | [diff] [blame] | 177 | fHead.metadata() == 0 && fHead.fCursor == kDataStart); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 178 | |
| 179 | GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy); |
| 180 | fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0; |
| 181 | fN1 = 1; |
| 182 | } |
| 183 | |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 184 | void GrBlockAllocator::resetScratchSpace() { |
| 185 | if (fHead.fPrev) { |
| 186 | delete fHead.fPrev; |
| 187 | fHead.fPrev = nullptr; |
| 188 | } |
| 189 | } |
| 190 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 191 | void GrBlockAllocator::addBlock(int minimumSize, int maxSize) { |
| 192 | SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize); |
| 193 | |
| 194 | // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23). |
| 195 | static constexpr int kMaxN = (1 << 23) - 1; |
| 196 | static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow |
| 197 | |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 198 | auto alignAllocSize = [](int size) { |
| 199 | // Round to a nice boundary since the block isn't maxing out: |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 200 | // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play |
| 201 | // nicely with jeMalloc (from SkArenaAlloc). |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 202 | int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1); |
| 203 | return (size + mask) & ~mask; |
| 204 | }; |
| 205 | |
| 206 | int allocSize; |
| 207 | void* mem = nullptr; |
| 208 | if (this->scratchBlockSize() >= minimumSize) { |
| 209 | // Activate the scratch block instead of making a new block |
| 210 | SkASSERT(fHead.fPrev->isScratch()); |
| 211 | allocSize = fHead.fPrev->fSize; |
| 212 | mem = fHead.fPrev; |
| 213 | fHead.fPrev = nullptr; |
| 214 | } else if (minimumSize < maxSize) { |
| 215 | // Calculate the 'next' size per growth policy sequence |
| 216 | GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy); |
| 217 | int nextN1 = fN0 + fN1; |
| 218 | int nextN0; |
| 219 | if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) { |
| 220 | nextN0 = fN0; |
| 221 | } else if (gp == GrowthPolicy::kFibonacci) { |
| 222 | nextN0 = fN1; |
| 223 | } else { |
| 224 | SkASSERT(gp == GrowthPolicy::kExponential); |
| 225 | nextN0 = nextN1; |
| 226 | } |
| 227 | fN0 = std::min(kMaxN, nextN0); |
| 228 | fN1 = std::min(kMaxN, nextN1); |
| 229 | |
| 230 | // However, must guard against overflow here, since all the size-based asserts prevented |
| 231 | // alignment/addition overflows, while multiplication requires 2x bits instead of x+1. |
| 232 | int sizeIncrement = fBlockIncrement * kAddressAlign; |
| 233 | if (maxSize / sizeIncrement < nextN1) { |
| 234 | // The growth policy would overflow, so use the max. We've already confirmed that |
| 235 | // maxSize will be sufficient for the requested minimumSize |
| 236 | allocSize = maxSize; |
| 237 | } else { |
| 238 | allocSize = std::min(alignAllocSize(std::max(minimumSize, sizeIncrement * nextN1)), |
| 239 | maxSize); |
| 240 | } |
| 241 | } else { |
| 242 | SkASSERT(minimumSize == maxSize); |
| 243 | // Still align on a nice boundary, no max clamping since that would just undo the alignment |
| 244 | allocSize = alignAllocSize(minimumSize); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | // Create new block and append to the linked list of blocks in this allocator |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 248 | if (!mem) { |
| 249 | mem = operator new(allocSize); |
| 250 | } |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 251 | fTail->fNext = new (mem) Block(fTail, allocSize); |
| 252 | fTail = fTail->fNext; |
| 253 | } |
| 254 | |
| 255 | #ifdef SK_DEBUG |
| 256 | void GrBlockAllocator::validate() const { |
| 257 | std::vector<const Block*> blocks; |
| 258 | const Block* prev = nullptr; |
| 259 | for (const Block* block : this->blocks()) { |
| 260 | blocks.push_back(block); |
| 261 | |
| 262 | SkASSERT(kAssignedMarker == block->fSentinel); |
Michael Ludwig | 68e5f29 | 2020-07-20 16:17:14 -0400 | [diff] [blame] | 263 | if (block == &fHead) { |
| 264 | // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not |
| 265 | // considered part of the linked list |
| 266 | SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch())); |
| 267 | } else { |
| 268 | SkASSERT(prev == block->fPrev); |
| 269 | } |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 270 | if (prev) { |
| 271 | SkASSERT(prev->fNext == block); |
| 272 | } |
| 273 | |
| 274 | SkASSERT(block->fSize >= (int) sizeof(Block)); |
| 275 | SkASSERT(block->fCursor >= kDataStart); |
| 276 | SkASSERT(block->fCursor <= block->fSize); |
| 277 | |
| 278 | prev = block; |
| 279 | } |
| 280 | SkASSERT(prev == fTail); |
| 281 | SkASSERT(blocks.size() > 0); |
| 282 | SkASSERT(blocks[0] == &fHead); |
| 283 | |
| 284 | // Confirm reverse iteration matches forward iteration |
| 285 | size_t j = blocks.size(); |
| 286 | for (const Block* b : this->rblocks()) { |
| 287 | SkASSERT(b == blocks[j - 1]); |
| 288 | j--; |
| 289 | } |
| 290 | SkASSERT(j == 0); |
| 291 | } |
| 292 | #endif |