bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2012 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 8 | #include "src/gpu/GrMemoryPool.h" |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 9 | |
Mike Klein | 8aa0edf | 2020-10-16 11:04:18 -0500 | [diff] [blame] | 10 | #include "include/private/SkTPin.h" |
John Stiles | 5c7e1a1 | 2020-11-04 09:59:36 -0500 | [diff] [blame] | 11 | #include "src/core/SkASAN.h" |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 12 | #include "src/gpu/ops/GrOp.h" |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 13 | |
Mike Klein | 0ec1c57 | 2018-12-04 11:52:51 -0500 | [diff] [blame] | 14 | #ifdef SK_DEBUG |
| 15 | #include <atomic> |
| 16 | #endif |
Herb Derby | d7b34a5 | 2017-03-20 11:19:23 -0400 | [diff] [blame] | 17 | |
John Stiles | 7df731b | 2020-12-22 14:57:16 -0500 | [diff] [blame] | 18 | #include <tuple> |
| 19 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 20 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 21 | |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 22 | std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 23 | static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize); |
| 24 | |
| 25 | preallocSize = SkTPin(preallocSize, kMinAllocationSize, |
| 26 | (size_t) GrBlockAllocator::kMaxAllocationSize); |
| 27 | minAllocSize = SkTPin(minAllocSize, kMinAllocationSize, |
| 28 | (size_t) GrBlockAllocator::kMaxAllocationSize); |
| 29 | void* mem = operator new(preallocSize); |
| 30 | return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize)); |
Robert Phillips | 7c525e6 | 2018-06-12 10:11:12 -0400 | [diff] [blame] | 31 | } |
| 32 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 33 | GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) |
| 34 | : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize, |
| 35 | preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) { |
| 36 | SkDEBUGCODE(fAllocationCount = 0;) |
| 37 | } |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 38 | |
| 39 | GrMemoryPool::~GrMemoryPool() { |
John Stiles | 244ebf7 | 2020-10-28 11:08:06 -0400 | [diff] [blame] | 40 | this->reportLeaks(); |
| 41 | SkASSERT(0 == fAllocationCount); |
| 42 | SkASSERT(this->isEmpty()); |
| 43 | } |
| 44 | |
| 45 | void GrMemoryPool::reportLeaks() const { |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 46 | #ifdef SK_DEBUG |
| 47 | int i = 0; |
| 48 | int n = fAllocatedIDs.count(); |
John Stiles | 7df731b | 2020-12-22 14:57:16 -0500 | [diff] [blame] | 49 | for (int id : fAllocatedIDs) { |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 50 | if (++i == 1) { |
Robert Phillips | 19f466d | 2020-02-26 10:27:07 -0500 | [diff] [blame] | 51 | SkDebugf("Leaked %d IDs (in no particular order): %d%s", n, id, (n == i) ? "\n" : ""); |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 52 | } else if (i < 11) { |
| 53 | SkDebugf(", %d%s", id, (n == i ? "\n" : "")); |
| 54 | } else if (i == 11) { |
| 55 | SkDebugf(", ...\n"); |
John Stiles | 7df731b | 2020-12-22 14:57:16 -0500 | [diff] [blame] | 56 | break; |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 57 | } |
John Stiles | 7df731b | 2020-12-22 14:57:16 -0500 | [diff] [blame] | 58 | } |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 59 | #endif |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 60 | } |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 61 | |
| 62 | void* GrMemoryPool::allocate(size_t size) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 63 | static_assert(alignof(Header) <= kAlignment); |
| 64 | SkDEBUGCODE(this->validate();) |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 65 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 66 | GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size); |
| 67 | |
| 68 | // Initialize GrMemoryPool's custom header at the start of the allocation |
| 69 | Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header))); |
| 70 | header->fStart = alloc.fStart; |
| 71 | header->fEnd = alloc.fEnd; |
| 72 | |
| 73 | // Update live count within the block |
| 74 | alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1); |
| 75 | |
John Stiles | 5c7e1a1 | 2020-11-04 09:59:36 -0500 | [diff] [blame] | 76 | #if defined(SK_SANITIZE_ADDRESS) |
| 77 | sk_asan_poison_memory_region(&header->fSentinel, sizeof(header->fSentinel)); |
| 78 | #elif defined(SK_DEBUG) |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 79 | header->fSentinel = GrBlockAllocator::kAssignedMarker; |
John Stiles | 5c7e1a1 | 2020-11-04 09:59:36 -0500 | [diff] [blame] | 80 | #endif |
| 81 | |
| 82 | #if defined(SK_DEBUG) |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 83 | header->fID = []{ |
| 84 | static std::atomic<int> nextID{1}; |
Adlai Holler | 4888cda | 2020-11-06 16:37:37 -0500 | [diff] [blame] | 85 | return nextID.fetch_add(1, std::memory_order_relaxed); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 86 | }(); |
| 87 | |
Brian Salomon | a5002c3 | 2017-03-28 16:51:02 -0400 | [diff] [blame] | 88 | // You can set a breakpoint here when a leaked ID is allocated to see the stack frame. |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 89 | fAllocatedIDs.add(header->fID); |
| 90 | fAllocationCount++; |
| 91 | #endif |
| 92 | |
| 93 | // User-facing pointer is after the header padding |
| 94 | return alloc.fBlock->ptr(alloc.fAlignedOffset); |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | void GrMemoryPool::release(void* p) { |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 98 | Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header)); |
John Stiles | 5c7e1a1 | 2020-11-04 09:59:36 -0500 | [diff] [blame] | 99 | |
| 100 | #if defined(SK_SANITIZE_ADDRESS) |
| 101 | sk_asan_unpoison_memory_region(&header->fSentinel, sizeof(header->fSentinel)); |
| 102 | #elif defined(SK_DEBUG) |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 103 | SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel); |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 104 | header->fSentinel = GrBlockAllocator::kFreedMarker; |
John Stiles | 5c7e1a1 | 2020-11-04 09:59:36 -0500 | [diff] [blame] | 105 | #endif |
| 106 | |
| 107 | #if defined(SK_DEBUG) |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 108 | fAllocatedIDs.remove(header->fID); |
| 109 | fAllocationCount--; |
humper@google.com | 0e51577 | 2013-01-07 19:54:40 +0000 | [diff] [blame] | 110 | #endif |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 111 | |
John Stiles | 5c7e1a1 | 2020-11-04 09:59:36 -0500 | [diff] [blame] | 112 | GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart); |
John Stiles | 56a9105 | 2021-01-13 11:05:59 -0500 | [diff] [blame] | 113 | |
| 114 | #if defined(SK_DEBUG) |
| 115 | // (p - block) matches the original alignedOffset value from GrBlockAllocator::allocate(). |
| 116 | intptr_t alignedOffset = (intptr_t)p - (intptr_t)block; |
| 117 | SkASSERT(p == block->ptr(alignedOffset)); |
| 118 | |
| 119 | // Scrub the block contents to prevent use-after-free errors. |
| 120 | memset(p, 0xDD, header->fEnd - alignedOffset); |
| 121 | #endif |
| 122 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 123 | int alive = block->metadata(); |
| 124 | if (alive == 1) { |
| 125 | // This was last allocation in the block, so remove it |
| 126 | fAllocator.releaseBlock(block); |
| 127 | } else { |
| 128 | // Update count and release storage of the allocation itself |
| 129 | block->setMetadata(alive - 1); |
| 130 | block->release(header->fStart, header->fEnd); |
| 131 | } |
bsalomon@google.com | 4da34e3 | 2012-06-19 15:40:27 +0000 | [diff] [blame] | 132 | } |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 133 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 134 | #ifdef SK_DEBUG |
| 135 | void GrMemoryPool::validate() const { |
| 136 | fAllocator.validate(); |
Brian Salomon | 6986c65 | 2019-12-12 10:58:47 -0500 | [diff] [blame] | 137 | |
Michael Ludwig | cd01979 | 2020-03-17 10:14:48 -0400 | [diff] [blame] | 138 | int allocCount = 0; |
| 139 | for (const auto* b : fAllocator.blocks()) { |
| 140 | allocCount += b->metadata(); |
| 141 | } |
| 142 | SkASSERT(allocCount == fAllocationCount); |
| 143 | SkASSERT(fAllocationCount == fAllocatedIDs.count()); |
| 144 | SkASSERT(allocCount > 0 || this->isEmpty()); |
| 145 | } |
| 146 | #endif |