blob: 7a6b7e6a3795d4de320b2fa9e5e7a3c50e4d5b5d [file] [log] [blame]
bsalomon@google.com4da34e32012-06-19 15:40:27 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrMemoryPool.h"
Michael Ludwigcd019792020-03-17 10:14:48 -04009
Mike Klein8aa0edf2020-10-16 11:04:18 -050010#include "include/private/SkTPin.h"
John Stiles5c7e1a12020-11-04 09:59:36 -050011#include "src/core/SkASAN.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050012#include "src/gpu/ops/GrOp.h"
Michael Ludwigcd019792020-03-17 10:14:48 -040013
Mike Klein0ec1c572018-12-04 11:52:51 -050014#ifdef SK_DEBUG
15 #include <atomic>
16#endif
Herb Derbyd7b34a52017-03-20 11:19:23 -040017
John Stiles7df731b2020-12-22 14:57:16 -050018#include <tuple>
19
Michael Ludwigcd019792020-03-17 10:14:48 -040020///////////////////////////////////////////////////////////////////////////////////////////////////
bsalomon@google.com4da34e32012-06-19 15:40:27 +000021
Brian Salomon6986c652019-12-12 10:58:47 -050022std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
Michael Ludwigcd019792020-03-17 10:14:48 -040023 static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize);
24
25 preallocSize = SkTPin(preallocSize, kMinAllocationSize,
26 (size_t) GrBlockAllocator::kMaxAllocationSize);
27 minAllocSize = SkTPin(minAllocSize, kMinAllocationSize,
28 (size_t) GrBlockAllocator::kMaxAllocationSize);
29 void* mem = operator new(preallocSize);
30 return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize));
Robert Phillips7c525e62018-06-12 10:11:12 -040031}
32
Michael Ludwigcd019792020-03-17 10:14:48 -040033GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize)
34 : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize,
35 preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) {
36 SkDEBUGCODE(fAllocationCount = 0;)
37}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000038
39GrMemoryPool::~GrMemoryPool() {
John Stiles244ebf72020-10-28 11:08:06 -040040 this->reportLeaks();
41 SkASSERT(0 == fAllocationCount);
42 SkASSERT(this->isEmpty());
43}
44
45void GrMemoryPool::reportLeaks() const {
Brian Salomona5002c32017-03-28 16:51:02 -040046#ifdef SK_DEBUG
47 int i = 0;
48 int n = fAllocatedIDs.count();
John Stiles7df731b2020-12-22 14:57:16 -050049 for (int id : fAllocatedIDs) {
Brian Salomona5002c32017-03-28 16:51:02 -040050 if (++i == 1) {
Robert Phillips19f466d2020-02-26 10:27:07 -050051 SkDebugf("Leaked %d IDs (in no particular order): %d%s", n, id, (n == i) ? "\n" : "");
Brian Salomona5002c32017-03-28 16:51:02 -040052 } else if (i < 11) {
53 SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
54 } else if (i == 11) {
55 SkDebugf(", ...\n");
John Stiles7df731b2020-12-22 14:57:16 -050056 break;
Brian Salomona5002c32017-03-28 16:51:02 -040057 }
John Stiles7df731b2020-12-22 14:57:16 -050058 }
Brian Salomona5002c32017-03-28 16:51:02 -040059#endif
Michael Ludwigcd019792020-03-17 10:14:48 -040060}
bsalomon@google.com4da34e32012-06-19 15:40:27 +000061
62void* GrMemoryPool::allocate(size_t size) {
Michael Ludwigcd019792020-03-17 10:14:48 -040063 static_assert(alignof(Header) <= kAlignment);
64 SkDEBUGCODE(this->validate();)
bsalomon@google.com4da34e32012-06-19 15:40:27 +000065
Michael Ludwigcd019792020-03-17 10:14:48 -040066 GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size);
67
68 // Initialize GrMemoryPool's custom header at the start of the allocation
69 Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header)));
70 header->fStart = alloc.fStart;
71 header->fEnd = alloc.fEnd;
72
73 // Update live count within the block
74 alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1);
75
John Stiles5c7e1a12020-11-04 09:59:36 -050076#if defined(SK_SANITIZE_ADDRESS)
77 sk_asan_poison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
78#elif defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -040079 header->fSentinel = GrBlockAllocator::kAssignedMarker;
John Stiles5c7e1a12020-11-04 09:59:36 -050080#endif
81
82#if defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -040083 header->fID = []{
84 static std::atomic<int> nextID{1};
Adlai Holler4888cda2020-11-06 16:37:37 -050085 return nextID.fetch_add(1, std::memory_order_relaxed);
Michael Ludwigcd019792020-03-17 10:14:48 -040086 }();
87
Brian Salomona5002c32017-03-28 16:51:02 -040088 // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
Michael Ludwigcd019792020-03-17 10:14:48 -040089 fAllocatedIDs.add(header->fID);
90 fAllocationCount++;
91#endif
92
93 // User-facing pointer is after the header padding
94 return alloc.fBlock->ptr(alloc.fAlignedOffset);
bsalomon@google.com4da34e32012-06-19 15:40:27 +000095}
96
97void GrMemoryPool::release(void* p) {
Michael Ludwigcd019792020-03-17 10:14:48 -040098 Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header));
John Stiles5c7e1a12020-11-04 09:59:36 -050099
100#if defined(SK_SANITIZE_ADDRESS)
101 sk_asan_unpoison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
102#elif defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -0400103 SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel);
Michael Ludwigcd019792020-03-17 10:14:48 -0400104 header->fSentinel = GrBlockAllocator::kFreedMarker;
John Stiles5c7e1a12020-11-04 09:59:36 -0500105#endif
106
107#if defined(SK_DEBUG)
Michael Ludwigcd019792020-03-17 10:14:48 -0400108 fAllocatedIDs.remove(header->fID);
109 fAllocationCount--;
humper@google.com0e515772013-01-07 19:54:40 +0000110#endif
Michael Ludwigcd019792020-03-17 10:14:48 -0400111
John Stiles5c7e1a12020-11-04 09:59:36 -0500112 GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
John Stiles56a91052021-01-13 11:05:59 -0500113
114#if defined(SK_DEBUG)
115 // (p - block) matches the original alignedOffset value from GrBlockAllocator::allocate().
116 intptr_t alignedOffset = (intptr_t)p - (intptr_t)block;
117 SkASSERT(p == block->ptr(alignedOffset));
118
119 // Scrub the block contents to prevent use-after-free errors.
120 memset(p, 0xDD, header->fEnd - alignedOffset);
121#endif
122
Michael Ludwigcd019792020-03-17 10:14:48 -0400123 int alive = block->metadata();
124 if (alive == 1) {
125 // This was last allocation in the block, so remove it
126 fAllocator.releaseBlock(block);
127 } else {
128 // Update count and release storage of the allocation itself
129 block->setMetadata(alive - 1);
130 block->release(header->fStart, header->fEnd);
131 }
bsalomon@google.com4da34e32012-06-19 15:40:27 +0000132}
Brian Salomon6986c652019-12-12 10:58:47 -0500133
Michael Ludwigcd019792020-03-17 10:14:48 -0400134#ifdef SK_DEBUG
135void GrMemoryPool::validate() const {
136 fAllocator.validate();
Brian Salomon6986c652019-12-12 10:58:47 -0500137
Michael Ludwigcd019792020-03-17 10:14:48 -0400138 int allocCount = 0;
139 for (const auto* b : fAllocator.blocks()) {
140 allocCount += b->metadata();
141 }
142 SkASSERT(allocCount == fAllocationCount);
143 SkASSERT(fAllocationCount == fAllocatedIDs.count());
144 SkASSERT(allocCount > 0 || this->isEmpty());
145}
146#endif