blob: 21be212cd6b01a46f212818cffa785277cbb772e [file] [log] [blame]
bsalomon@google.com4da34e32012-06-19 15:40:27 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrMemoryPool.h"
Michael Ludwigcd019792020-03-17 10:14:48 -04009
Mike Klein8aa0edf2020-10-16 11:04:18 -050010#include "include/private/SkTPin.h"
John Stiles5c7e1a12020-11-04 09:59:36 -050011#include "src/core/SkASAN.h"
Michael Ludwigcd019792020-03-17 10:14:48 -040012
Mike Klein0ec1c572018-12-04 11:52:51 -050013#ifdef SK_DEBUG
14 #include <atomic>
15#endif
Herb Derbyd7b34a52017-03-20 11:19:23 -040016
John Stiles7df731b2020-12-22 14:57:16 -050017#include <tuple>
18
Michael Ludwigcd019792020-03-17 10:14:48 -040019///////////////////////////////////////////////////////////////////////////////////////////////////
bsalomon@google.com4da34e32012-06-19 15:40:27 +000020
Brian Salomon6986c652019-12-12 10:58:47 -050021std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
Michael Ludwigcd019792020-03-17 10:14:48 -040022 static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize);
23
24 preallocSize = SkTPin(preallocSize, kMinAllocationSize,
25 (size_t) GrBlockAllocator::kMaxAllocationSize);
26 minAllocSize = SkTPin(minAllocSize, kMinAllocationSize,
27 (size_t) GrBlockAllocator::kMaxAllocationSize);
28 void* mem = operator new(preallocSize);
29 return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize));
Robert Phillips7c525e62018-06-12 10:11:12 -040030}
31
Michael Ludwigcd019792020-03-17 10:14:48 -040032GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize)
33 : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize,
34 preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) {
35 SkDEBUGCODE(fAllocationCount = 0;)
36}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000037
38GrMemoryPool::~GrMemoryPool() {
John Stiles244ebf72020-10-28 11:08:06 -040039 this->reportLeaks();
40 SkASSERT(0 == fAllocationCount);
41 SkASSERT(this->isEmpty());
42}
43
44void GrMemoryPool::reportLeaks() const {
Brian Salomona5002c32017-03-28 16:51:02 -040045#ifdef SK_DEBUG
46 int i = 0;
47 int n = fAllocatedIDs.count();
John Stiles7df731b2020-12-22 14:57:16 -050048 for (int id : fAllocatedIDs) {
Brian Salomona5002c32017-03-28 16:51:02 -040049 if (++i == 1) {
Robert Phillips19f466d2020-02-26 10:27:07 -050050 SkDebugf("Leaked %d IDs (in no particular order): %d%s", n, id, (n == i) ? "\n" : "");
Brian Salomona5002c32017-03-28 16:51:02 -040051 } else if (i < 11) {
52 SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
53 } else if (i == 11) {
54 SkDebugf(", ...\n");
John Stiles7df731b2020-12-22 14:57:16 -050055 break;
Brian Salomona5002c32017-03-28 16:51:02 -040056 }
John Stiles7df731b2020-12-22 14:57:16 -050057 }
Brian Salomona5002c32017-03-28 16:51:02 -040058#endif
Michael Ludwigcd019792020-03-17 10:14:48 -040059}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000060
61void* GrMemoryPool::allocate(size_t size) {
Michael Ludwigcd019792020-03-17 10:14:48 -040062 static_assert(alignof(Header) <= kAlignment);
63 SkDEBUGCODE(this->validate();)
bsalomon@google.com4da34e32012-06-19 15:40:27 +000064
Michael Ludwigcd019792020-03-17 10:14:48 -040065 GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size);
66
67 // Initialize GrMemoryPool's custom header at the start of the allocation
68 Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header)));
69 header->fStart = alloc.fStart;
70 header->fEnd = alloc.fEnd;
71
72 // Update live count within the block
73 alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1);
74
John Stiles5c7e1a12020-11-04 09:59:36 -050075#if defined(SK_SANITIZE_ADDRESS)
76 sk_asan_poison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
77#elif defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -040078 header->fSentinel = GrBlockAllocator::kAssignedMarker;
John Stiles5c7e1a12020-11-04 09:59:36 -050079#endif
80
81#if defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -040082 header->fID = []{
83 static std::atomic<int> nextID{1};
Adlai Holler4888cda2020-11-06 16:37:37 -050084 return nextID.fetch_add(1, std::memory_order_relaxed);
Michael Ludwigcd019792020-03-17 10:14:48 -040085 }();
86
Brian Salomona5002c32017-03-28 16:51:02 -040087 // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
Michael Ludwigcd019792020-03-17 10:14:48 -040088 fAllocatedIDs.add(header->fID);
89 fAllocationCount++;
90#endif
91
92 // User-facing pointer is after the header padding
93 return alloc.fBlock->ptr(alloc.fAlignedOffset);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000094}
95
96void GrMemoryPool::release(void* p) {
Michael Ludwigcd019792020-03-17 10:14:48 -040097 Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header));
John Stiles5c7e1a12020-11-04 09:59:36 -050098
99#if defined(SK_SANITIZE_ADDRESS)
100 sk_asan_unpoison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
101#elif defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -0400102 SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel);
Michael Ludwigcd019792020-03-17 10:14:48 -0400103 header->fSentinel = GrBlockAllocator::kFreedMarker;
John Stiles5c7e1a12020-11-04 09:59:36 -0500104#endif
105
106#if defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -0400107 fAllocatedIDs.remove(header->fID);
108 fAllocationCount--;
humper@google.com0e515772013-01-07 19:54:40 +0000109#endif
Michael Ludwigcd019792020-03-17 10:14:48 -0400110
John Stiles5c7e1a12020-11-04 09:59:36 -0500111 GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
John Stiles56a91052021-01-13 11:05:59 -0500112
113#if defined(SK_DEBUG)
114 // (p - block) matches the original alignedOffset value from GrBlockAllocator::allocate().
115 intptr_t alignedOffset = (intptr_t)p - (intptr_t)block;
116 SkASSERT(p == block->ptr(alignedOffset));
117
118 // Scrub the block contents to prevent use-after-free errors.
119 memset(p, 0xDD, header->fEnd - alignedOffset);
120#endif
121
Michael Ludwigcd019792020-03-17 10:14:48 -0400122 int alive = block->metadata();
123 if (alive == 1) {
124 // This was last allocation in the block, so remove it
125 fAllocator.releaseBlock(block);
126 } else {
127 // Update count and release storage of the allocation itself
128 block->setMetadata(alive - 1);
129 block->release(header->fStart, header->fEnd);
130 }
bsalomon@google.com4da34e32012-06-19 15:40:27 +0000131}
Brian Salomon6986c652019-12-12 10:58:47 -0500132
Michael Ludwigcd019792020-03-17 10:14:48 -0400133#ifdef SK_DEBUG
134void GrMemoryPool::validate() const {
135 fAllocator.validate();
Brian Salomon6986c652019-12-12 10:58:47 -0500136
Michael Ludwigcd019792020-03-17 10:14:48 -0400137 int allocCount = 0;
138 for (const auto* b : fAllocator.blocks()) {
139 allocCount += b->metadata();
140 }
141 SkASSERT(allocCount == fAllocationCount);
142 SkASSERT(fAllocationCount == fAllocatedIDs.count());
143 SkASSERT(allocCount > 0 || this->isEmpty());
144}
145#endif