blob: 2686cbf7b096ea1345f9b84c34028cd8dff34eb8 [file] [log] [blame]
bsalomon@google.com4da34e32012-06-19 15:40:27 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrMemoryPool.h"
Michael Ludwigcd019792020-03-17 10:14:48 -04009
Mike Klein8aa0edf2020-10-16 11:04:18 -050010#include "include/private/SkTPin.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "src/gpu/ops/GrOp.h"
Michael Ludwigcd019792020-03-17 10:14:48 -040012
Mike Klein0ec1c572018-12-04 11:52:51 -050013#ifdef SK_DEBUG
14 #include <atomic>
15#endif
Herb Derbyd7b34a52017-03-20 11:19:23 -040016
Michael Ludwigcd019792020-03-17 10:14:48 -040017///////////////////////////////////////////////////////////////////////////////////////////////////
bsalomon@google.com4da34e32012-06-19 15:40:27 +000018
Brian Salomon6986c652019-12-12 10:58:47 -050019std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
Michael Ludwigcd019792020-03-17 10:14:48 -040020 static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize);
21
22 preallocSize = SkTPin(preallocSize, kMinAllocationSize,
23 (size_t) GrBlockAllocator::kMaxAllocationSize);
24 minAllocSize = SkTPin(minAllocSize, kMinAllocationSize,
25 (size_t) GrBlockAllocator::kMaxAllocationSize);
26 void* mem = operator new(preallocSize);
27 return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize));
Robert Phillips7c525e62018-06-12 10:11:12 -040028}
29
Michael Ludwigcd019792020-03-17 10:14:48 -040030GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize)
31 : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize,
32 preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) {
33 SkDEBUGCODE(fAllocationCount = 0;)
34}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000035
36GrMemoryPool::~GrMemoryPool() {
John Stiles244ebf72020-10-28 11:08:06 -040037 this->reportLeaks();
38 SkASSERT(0 == fAllocationCount);
39 SkASSERT(this->isEmpty());
40}
41
42void GrMemoryPool::reportLeaks() const {
Brian Salomona5002c32017-03-28 16:51:02 -040043#ifdef SK_DEBUG
44 int i = 0;
45 int n = fAllocatedIDs.count();
Michael Ludwigcd019792020-03-17 10:14:48 -040046 fAllocatedIDs.foreach([&i, n] (int id) {
Brian Salomona5002c32017-03-28 16:51:02 -040047 if (++i == 1) {
Robert Phillips19f466d2020-02-26 10:27:07 -050048 SkDebugf("Leaked %d IDs (in no particular order): %d%s", n, id, (n == i) ? "\n" : "");
Brian Salomona5002c32017-03-28 16:51:02 -040049 } else if (i < 11) {
50 SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
51 } else if (i == 11) {
52 SkDebugf(", ...\n");
53 }
54 });
55#endif
Michael Ludwigcd019792020-03-17 10:14:48 -040056}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000057
58void* GrMemoryPool::allocate(size_t size) {
Michael Ludwigcd019792020-03-17 10:14:48 -040059 static_assert(alignof(Header) <= kAlignment);
60 SkDEBUGCODE(this->validate();)
bsalomon@google.com4da34e32012-06-19 15:40:27 +000061
Michael Ludwigcd019792020-03-17 10:14:48 -040062 GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size);
63
64 // Initialize GrMemoryPool's custom header at the start of the allocation
65 Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header)));
66 header->fStart = alloc.fStart;
67 header->fEnd = alloc.fEnd;
68
69 // Update live count within the block
70 alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1);
71
72#ifdef SK_DEBUG
73 header->fSentinel = GrBlockAllocator::kAssignedMarker;
74 header->fID = []{
75 static std::atomic<int> nextID{1};
Mike Klein0ec1c572018-12-04 11:52:51 -050076 return nextID++;
Michael Ludwigcd019792020-03-17 10:14:48 -040077 }();
78
Brian Salomona5002c32017-03-28 16:51:02 -040079 // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
Michael Ludwigcd019792020-03-17 10:14:48 -040080 fAllocatedIDs.add(header->fID);
81 fAllocationCount++;
82#endif
83
84 // User-facing pointer is after the header padding
85 return alloc.fBlock->ptr(alloc.fAlignedOffset);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000086}
87
88void GrMemoryPool::release(void* p) {
Michael Ludwigcd019792020-03-17 10:14:48 -040089 // NOTE: if we needed it, (p - block) would equal the original alignedOffset value returned by
90 // GrBlockAllocator::allocate()
91 Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header));
92 SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000093
Michael Ludwigcd019792020-03-17 10:14:48 -040094 GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
Brian Salomon6986c652019-12-12 10:58:47 -050095
humper@google.com0e515772013-01-07 19:54:40 +000096#ifdef SK_DEBUG
Michael Ludwigcd019792020-03-17 10:14:48 -040097 header->fSentinel = GrBlockAllocator::kFreedMarker;
98 fAllocatedIDs.remove(header->fID);
99 fAllocationCount--;
humper@google.com0e515772013-01-07 19:54:40 +0000100#endif
Michael Ludwigcd019792020-03-17 10:14:48 -0400101
102 int alive = block->metadata();
103 if (alive == 1) {
104 // This was last allocation in the block, so remove it
105 fAllocator.releaseBlock(block);
106 } else {
107 // Update count and release storage of the allocation itself
108 block->setMetadata(alive - 1);
109 block->release(header->fStart, header->fEnd);
110 }
bsalomon@google.com4da34e32012-06-19 15:40:27 +0000111}
Brian Salomon6986c652019-12-12 10:58:47 -0500112
Michael Ludwigcd019792020-03-17 10:14:48 -0400113#ifdef SK_DEBUG
114void GrMemoryPool::validate() const {
115 fAllocator.validate();
Brian Salomon6986c652019-12-12 10:58:47 -0500116
Michael Ludwigcd019792020-03-17 10:14:48 -0400117 int allocCount = 0;
118 for (const auto* b : fAllocator.blocks()) {
119 allocCount += b->metadata();
120 }
121 SkASSERT(allocCount == fAllocationCount);
122 SkASSERT(fAllocationCount == fAllocatedIDs.count());
123 SkASSERT(allocCount > 0 || this->isEmpty());
124}
125#endif