bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2012 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 8 | #include "src/gpu/GrMemoryPool.h" |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 9 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 10 | #include "src/gpu/ops/GrOp.h" |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 11 | |
Mike Klein | 0ec1c57 | 2018-12-04 11:52:51 -0500 | [diff] [blame] | 12 | #ifdef SK_DEBUG |
| 13 | #include <atomic> |
| 14 | #endif |
Herb Derby | d7b34a5 | 2017-03-20 11:19:23 -0400 | [diff] [blame] | 15 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 16 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 17 | |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 18 | std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 19 | static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize); |
| 20 | |
| 21 | preallocSize = SkTPin(preallocSize, kMinAllocationSize, |
| 22 | (size_t) GrBlockAllocator::kMaxAllocationSize); |
| 23 | minAllocSize = SkTPin(minAllocSize, kMinAllocationSize, |
| 24 | (size_t) GrBlockAllocator::kMaxAllocationSize); |
| 25 | void* mem = operator new(preallocSize); |
| 26 | return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize)); |
Robert Phillips | 7c525e6 | 2018-06-12 10:11:12 -0400 | [diff] [blame] | 27 | } |
| 28 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 29 | GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) |
| 30 | : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize, |
| 31 | preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) { |
| 32 | SkDEBUGCODE(fAllocationCount = 0;) |
| 33 | } |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 34 | |
| 35 | GrMemoryPool::~GrMemoryPool() { |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 36 | #ifdef SK_DEBUG |
| 37 | int i = 0; |
| 38 | int n = fAllocatedIDs.count(); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 39 | fAllocatedIDs.foreach([&i, n] (int id) { |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 40 | if (++i == 1) { |
Robert Phillips | 19f466d | 2020-02-26 10:27:07 -0500 | [diff] [blame] | 41 | SkDebugf("Leaked %d IDs (in no particular order): %d%s", n, id, (n == i) ? "\n" : ""); |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 42 | } else if (i < 11) { |
| 43 | SkDebugf(", %d%s", id, (n == i ? "\n" : "")); |
| 44 | } else if (i == 11) { |
| 45 | SkDebugf(", ...\n"); |
| 46 | } |
| 47 | }); |
| 48 | #endif |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 49 | SkASSERT(0 == fAllocationCount); |
| 50 | SkASSERT(this->isEmpty()); |
| 51 | } |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 52 | |
| 53 | void* GrMemoryPool::allocate(size_t size) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 54 | static_assert(alignof(Header) <= kAlignment); |
| 55 | SkDEBUGCODE(this->validate();) |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 56 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 57 | GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size); |
| 58 | |
| 59 | // Initialize GrMemoryPool's custom header at the start of the allocation |
| 60 | Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header))); |
| 61 | header->fStart = alloc.fStart; |
| 62 | header->fEnd = alloc.fEnd; |
| 63 | |
| 64 | // Update live count within the block |
| 65 | alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1); |
| 66 | |
| 67 | #ifdef SK_DEBUG |
| 68 | header->fSentinel = GrBlockAllocator::kAssignedMarker; |
| 69 | header->fID = []{ |
| 70 | static std::atomic<int> nextID{1}; |
Mike Klein | 0ec1c57 | 2018-12-04 11:52:51 -0500 | [diff] [blame] | 71 | return nextID++; |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 72 | }(); |
| 73 | |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 74 | // You can set a breakpoint here when a leaked ID is allocated to see the stack frame. |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 75 | fAllocatedIDs.add(header->fID); |
| 76 | fAllocationCount++; |
| 77 | #endif |
| 78 | |
| 79 | // User-facing pointer is after the header padding |
| 80 | return alloc.fBlock->ptr(alloc.fAlignedOffset); |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | void GrMemoryPool::release(void* p) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 84 | // NOTE: if we needed it, (p - block) would equal the original alignedOffset value returned by |
| 85 | // GrBlockAllocator::allocate() |
| 86 | Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header)); |
| 87 | SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel); |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 88 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 89 | GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart); |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 90 | |
humper@google.com | 0e51577 | 2013-01-07 19:54:40 +0000 | [diff] [blame] | 91 | #ifdef SK_DEBUG |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 92 | header->fSentinel = GrBlockAllocator::kFreedMarker; |
| 93 | fAllocatedIDs.remove(header->fID); |
| 94 | fAllocationCount--; |
humper@google.com | 0e51577 | 2013-01-07 19:54:40 +0000 | [diff] [blame] | 95 | #endif |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 96 | |
| 97 | int alive = block->metadata(); |
| 98 | if (alive == 1) { |
| 99 | // This was last allocation in the block, so remove it |
| 100 | fAllocator.releaseBlock(block); |
| 101 | } else { |
| 102 | // Update count and release storage of the allocation itself |
| 103 | block->setMetadata(alive - 1); |
| 104 | block->release(header->fStart, header->fEnd); |
| 105 | } |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 106 | } |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 107 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 108 | #ifdef SK_DEBUG |
| 109 | void GrMemoryPool::validate() const { |
| 110 | fAllocator.validate(); |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 111 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 112 | int allocCount = 0; |
| 113 | for (const auto* b : fAllocator.blocks()) { |
| 114 | allocCount += b->metadata(); |
| 115 | } |
| 116 | SkASSERT(allocCount == fAllocationCount); |
| 117 | SkASSERT(fAllocationCount == fAllocatedIDs.count()); |
| 118 | SkASSERT(allocCount > 0 || this->isEmpty()); |
| 119 | } |
| 120 | #endif |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 121 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 122 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 123 | |
| 124 | std::unique_ptr<GrOpMemoryPool> GrOpMemoryPool::Make(size_t preallocSize, size_t minAllocSize) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 125 | static_assert(sizeof(GrOpMemoryPool) < GrMemoryPool::kMinAllocationSize); |
| 126 | |
| 127 | preallocSize = SkTPin(preallocSize, GrMemoryPool::kMinAllocationSize, |
| 128 | (size_t) GrBlockAllocator::kMaxAllocationSize); |
| 129 | minAllocSize = SkTPin(minAllocSize, GrMemoryPool::kMinAllocationSize, |
| 130 | (size_t) GrBlockAllocator::kMaxAllocationSize); |
| 131 | void* mem = operator new(preallocSize); |
| 132 | return std::unique_ptr<GrOpMemoryPool>(new (mem) GrOpMemoryPool(preallocSize, minAllocSize)); |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 133 | } |
| 134 | |
Herb Derby | d6cfe72 | 2020-10-05 15:50:47 -0400 | [diff] [blame] | 135 | #if !defined(GR_OP_ALLOCATE_USE_NEW) |
| 136 | void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) { |
| 137 | GrOp* tmp = op.release(); |
| 138 | SkASSERT(tmp); |
| 139 | tmp->~GrOp(); |
| 140 | fPool.release(tmp); |
| 141 | } |
| 142 | #endif |