blob: 2cdb6faffe7ff8c7d72726811aac10b2b15b9778 [file] [log] [blame]
bsalomon@google.com4da34e32012-06-19 15:40:27 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrMemoryPool.h"
Michael Ludwigcd019792020-03-17 10:14:48 -04009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "src/gpu/ops/GrOp.h"
Michael Ludwigcd019792020-03-17 10:14:48 -040011
Mike Klein0ec1c572018-12-04 11:52:51 -050012#ifdef SK_DEBUG
13 #include <atomic>
14#endif
Herb Derbyd7b34a52017-03-20 11:19:23 -040015
Michael Ludwigcd019792020-03-17 10:14:48 -040016///////////////////////////////////////////////////////////////////////////////////////////////////
bsalomon@google.com4da34e32012-06-19 15:40:27 +000017
Brian Salomon6986c652019-12-12 10:58:47 -050018std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
Michael Ludwigcd019792020-03-17 10:14:48 -040019 static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize);
20
21 preallocSize = SkTPin(preallocSize, kMinAllocationSize,
22 (size_t) GrBlockAllocator::kMaxAllocationSize);
23 minAllocSize = SkTPin(minAllocSize, kMinAllocationSize,
24 (size_t) GrBlockAllocator::kMaxAllocationSize);
25 void* mem = operator new(preallocSize);
26 return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize));
Robert Phillips7c525e62018-06-12 10:11:12 -040027}
28
Michael Ludwigcd019792020-03-17 10:14:48 -040029GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize)
30 : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize,
31 preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) {
32 SkDEBUGCODE(fAllocationCount = 0;)
33}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000034
35GrMemoryPool::~GrMemoryPool() {
Brian Salomona5002c32017-03-28 16:51:02 -040036#ifdef SK_DEBUG
37 int i = 0;
38 int n = fAllocatedIDs.count();
Michael Ludwigcd019792020-03-17 10:14:48 -040039 fAllocatedIDs.foreach([&i, n] (int id) {
Brian Salomona5002c32017-03-28 16:51:02 -040040 if (++i == 1) {
Robert Phillips19f466d2020-02-26 10:27:07 -050041 SkDebugf("Leaked %d IDs (in no particular order): %d%s", n, id, (n == i) ? "\n" : "");
Brian Salomona5002c32017-03-28 16:51:02 -040042 } else if (i < 11) {
43 SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
44 } else if (i == 11) {
45 SkDebugf(", ...\n");
46 }
47 });
48#endif
Michael Ludwigcd019792020-03-17 10:14:48 -040049 SkASSERT(0 == fAllocationCount);
50 SkASSERT(this->isEmpty());
51}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000052
53void* GrMemoryPool::allocate(size_t size) {
Michael Ludwigcd019792020-03-17 10:14:48 -040054 static_assert(alignof(Header) <= kAlignment);
55 SkDEBUGCODE(this->validate();)
bsalomon@google.com4da34e32012-06-19 15:40:27 +000056
Michael Ludwigcd019792020-03-17 10:14:48 -040057 GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size);
58
59 // Initialize GrMemoryPool's custom header at the start of the allocation
60 Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header)));
61 header->fStart = alloc.fStart;
62 header->fEnd = alloc.fEnd;
63
64 // Update live count within the block
65 alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1);
66
67#ifdef SK_DEBUG
68 header->fSentinel = GrBlockAllocator::kAssignedMarker;
69 header->fID = []{
70 static std::atomic<int> nextID{1};
Mike Klein0ec1c572018-12-04 11:52:51 -050071 return nextID++;
Michael Ludwigcd019792020-03-17 10:14:48 -040072 }();
73
Brian Salomona5002c32017-03-28 16:51:02 -040074 // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
Michael Ludwigcd019792020-03-17 10:14:48 -040075 fAllocatedIDs.add(header->fID);
76 fAllocationCount++;
77#endif
78
79 // User-facing pointer is after the header padding
80 return alloc.fBlock->ptr(alloc.fAlignedOffset);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000081}
82
83void GrMemoryPool::release(void* p) {
Michael Ludwigcd019792020-03-17 10:14:48 -040084 // NOTE: if we needed it, (p - block) would equal the original alignedOffset value returned by
85 // GrBlockAllocator::allocate()
86 Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header));
87 SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000088
Michael Ludwigcd019792020-03-17 10:14:48 -040089 GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
Brian Salomon6986c652019-12-12 10:58:47 -050090
humper@google.com0e515772013-01-07 19:54:40 +000091#ifdef SK_DEBUG
Michael Ludwigcd019792020-03-17 10:14:48 -040092 header->fSentinel = GrBlockAllocator::kFreedMarker;
93 fAllocatedIDs.remove(header->fID);
94 fAllocationCount--;
humper@google.com0e515772013-01-07 19:54:40 +000095#endif
Michael Ludwigcd019792020-03-17 10:14:48 -040096
97 int alive = block->metadata();
98 if (alive == 1) {
99 // This was last allocation in the block, so remove it
100 fAllocator.releaseBlock(block);
101 } else {
102 // Update count and release storage of the allocation itself
103 block->setMetadata(alive - 1);
104 block->release(header->fStart, header->fEnd);
105 }
bsalomon@google.com4da34e32012-06-19 15:40:27 +0000106}
Brian Salomon6986c652019-12-12 10:58:47 -0500107
Michael Ludwigcd019792020-03-17 10:14:48 -0400108#ifdef SK_DEBUG
109void GrMemoryPool::validate() const {
110 fAllocator.validate();
Brian Salomon6986c652019-12-12 10:58:47 -0500111
Michael Ludwigcd019792020-03-17 10:14:48 -0400112 int allocCount = 0;
113 for (const auto* b : fAllocator.blocks()) {
114 allocCount += b->metadata();
115 }
116 SkASSERT(allocCount == fAllocationCount);
117 SkASSERT(fAllocationCount == fAllocatedIDs.count());
118 SkASSERT(allocCount > 0 || this->isEmpty());
119}
120#endif
Brian Salomon6986c652019-12-12 10:58:47 -0500121
Michael Ludwigcd019792020-03-17 10:14:48 -0400122///////////////////////////////////////////////////////////////////////////////////////////////////
Brian Salomon6986c652019-12-12 10:58:47 -0500123
124std::unique_ptr<GrOpMemoryPool> GrOpMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400125 static_assert(sizeof(GrOpMemoryPool) < GrMemoryPool::kMinAllocationSize);
126
127 preallocSize = SkTPin(preallocSize, GrMemoryPool::kMinAllocationSize,
128 (size_t) GrBlockAllocator::kMaxAllocationSize);
129 minAllocSize = SkTPin(minAllocSize, GrMemoryPool::kMinAllocationSize,
130 (size_t) GrBlockAllocator::kMaxAllocationSize);
131 void* mem = operator new(preallocSize);
132 return std::unique_ptr<GrOpMemoryPool>(new (mem) GrOpMemoryPool(preallocSize, minAllocSize));
Brian Salomon6986c652019-12-12 10:58:47 -0500133}
134
Herb Derbyd6cfe722020-10-05 15:50:47 -0400135#if !defined(GR_OP_ALLOCATE_USE_NEW)
136 void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) {
137 GrOp* tmp = op.release();
138 SkASSERT(tmp);
139 tmp->~GrOp();
140 fPool.release(tmp);
141 }
142#endif