blob: d1d2c82bb0a8acb39d15b861b314919104292b5c [file] [log] [blame]
bsalomon@google.com4da34e32012-06-19 15:40:27 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrMemoryPool.h"
Michael Ludwigcd019792020-03-17 10:14:48 -04009
Mike Klein8aa0edf2020-10-16 11:04:18 -050010#include "include/private/SkTPin.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "src/gpu/ops/GrOp.h"
Michael Ludwigcd019792020-03-17 10:14:48 -040012
Mike Klein0ec1c572018-12-04 11:52:51 -050013#ifdef SK_DEBUG
14 #include <atomic>
15#endif
Herb Derbyd7b34a52017-03-20 11:19:23 -040016
Michael Ludwigcd019792020-03-17 10:14:48 -040017///////////////////////////////////////////////////////////////////////////////////////////////////
bsalomon@google.com4da34e32012-06-19 15:40:27 +000018
Brian Salomon6986c652019-12-12 10:58:47 -050019std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
Michael Ludwigcd019792020-03-17 10:14:48 -040020 static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize);
21
22 preallocSize = SkTPin(preallocSize, kMinAllocationSize,
23 (size_t) GrBlockAllocator::kMaxAllocationSize);
24 minAllocSize = SkTPin(minAllocSize, kMinAllocationSize,
25 (size_t) GrBlockAllocator::kMaxAllocationSize);
26 void* mem = operator new(preallocSize);
27 return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize));
Robert Phillips7c525e62018-06-12 10:11:12 -040028}
29
Michael Ludwigcd019792020-03-17 10:14:48 -040030GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize)
31 : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize,
32 preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) {
33 SkDEBUGCODE(fAllocationCount = 0;)
34}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000035
36GrMemoryPool::~GrMemoryPool() {
Brian Salomona5002c32017-03-28 16:51:02 -040037#ifdef SK_DEBUG
38 int i = 0;
39 int n = fAllocatedIDs.count();
Michael Ludwigcd019792020-03-17 10:14:48 -040040 fAllocatedIDs.foreach([&i, n] (int id) {
Brian Salomona5002c32017-03-28 16:51:02 -040041 if (++i == 1) {
Robert Phillips19f466d2020-02-26 10:27:07 -050042 SkDebugf("Leaked %d IDs (in no particular order): %d%s", n, id, (n == i) ? "\n" : "");
Brian Salomona5002c32017-03-28 16:51:02 -040043 } else if (i < 11) {
44 SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
45 } else if (i == 11) {
46 SkDebugf(", ...\n");
47 }
48 });
49#endif
Michael Ludwigcd019792020-03-17 10:14:48 -040050 SkASSERT(0 == fAllocationCount);
51 SkASSERT(this->isEmpty());
52}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000053
54void* GrMemoryPool::allocate(size_t size) {
Michael Ludwigcd019792020-03-17 10:14:48 -040055 static_assert(alignof(Header) <= kAlignment);
56 SkDEBUGCODE(this->validate();)
bsalomon@google.com4da34e32012-06-19 15:40:27 +000057
Michael Ludwigcd019792020-03-17 10:14:48 -040058 GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size);
59
60 // Initialize GrMemoryPool's custom header at the start of the allocation
61 Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header)));
62 header->fStart = alloc.fStart;
63 header->fEnd = alloc.fEnd;
64
65 // Update live count within the block
66 alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1);
67
68#ifdef SK_DEBUG
69 header->fSentinel = GrBlockAllocator::kAssignedMarker;
70 header->fID = []{
71 static std::atomic<int> nextID{1};
Mike Klein0ec1c572018-12-04 11:52:51 -050072 return nextID++;
Michael Ludwigcd019792020-03-17 10:14:48 -040073 }();
74
Brian Salomona5002c32017-03-28 16:51:02 -040075 // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
Michael Ludwigcd019792020-03-17 10:14:48 -040076 fAllocatedIDs.add(header->fID);
77 fAllocationCount++;
78#endif
79
80 // User-facing pointer is after the header padding
81 return alloc.fBlock->ptr(alloc.fAlignedOffset);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000082}
83
84void GrMemoryPool::release(void* p) {
Michael Ludwigcd019792020-03-17 10:14:48 -040085 // NOTE: if we needed it, (p - block) would equal the original alignedOffset value returned by
86 // GrBlockAllocator::allocate()
87 Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header));
88 SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000089
Michael Ludwigcd019792020-03-17 10:14:48 -040090 GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
Brian Salomon6986c652019-12-12 10:58:47 -050091
humper@google.com0e515772013-01-07 19:54:40 +000092#ifdef SK_DEBUG
Michael Ludwigcd019792020-03-17 10:14:48 -040093 header->fSentinel = GrBlockAllocator::kFreedMarker;
94 fAllocatedIDs.remove(header->fID);
95 fAllocationCount--;
humper@google.com0e515772013-01-07 19:54:40 +000096#endif
Michael Ludwigcd019792020-03-17 10:14:48 -040097
98 int alive = block->metadata();
99 if (alive == 1) {
100 // This was last allocation in the block, so remove it
101 fAllocator.releaseBlock(block);
102 } else {
103 // Update count and release storage of the allocation itself
104 block->setMetadata(alive - 1);
105 block->release(header->fStart, header->fEnd);
106 }
bsalomon@google.com4da34e32012-06-19 15:40:27 +0000107}
Brian Salomon6986c652019-12-12 10:58:47 -0500108
Michael Ludwigcd019792020-03-17 10:14:48 -0400109#ifdef SK_DEBUG
110void GrMemoryPool::validate() const {
111 fAllocator.validate();
Brian Salomon6986c652019-12-12 10:58:47 -0500112
Michael Ludwigcd019792020-03-17 10:14:48 -0400113 int allocCount = 0;
114 for (const auto* b : fAllocator.blocks()) {
115 allocCount += b->metadata();
116 }
117 SkASSERT(allocCount == fAllocationCount);
118 SkASSERT(fAllocationCount == fAllocatedIDs.count());
119 SkASSERT(allocCount > 0 || this->isEmpty());
120}
121#endif
Brian Salomon6986c652019-12-12 10:58:47 -0500122
Michael Ludwigcd019792020-03-17 10:14:48 -0400123///////////////////////////////////////////////////////////////////////////////////////////////////
Brian Salomon6986c652019-12-12 10:58:47 -0500124
125std::unique_ptr<GrOpMemoryPool> GrOpMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
Michael Ludwigcd019792020-03-17 10:14:48 -0400126 static_assert(sizeof(GrOpMemoryPool) < GrMemoryPool::kMinAllocationSize);
127
128 preallocSize = SkTPin(preallocSize, GrMemoryPool::kMinAllocationSize,
129 (size_t) GrBlockAllocator::kMaxAllocationSize);
130 minAllocSize = SkTPin(minAllocSize, GrMemoryPool::kMinAllocationSize,
131 (size_t) GrBlockAllocator::kMaxAllocationSize);
132 void* mem = operator new(preallocSize);
133 return std::unique_ptr<GrOpMemoryPool>(new (mem) GrOpMemoryPool(preallocSize, minAllocSize));
Brian Salomon6986c652019-12-12 10:58:47 -0500134}
135
Herb Derbyd6cfe722020-10-05 15:50:47 -0400136#if !defined(GR_OP_ALLOCATE_USE_NEW)
137 void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) {
138 GrOp* tmp = op.release();
139 SkASSERT(tmp);
140 tmp->~GrOp();
141 fPool.release(tmp);
142 }
143#endif