blob: 8263bbbf93ec4d13cceeaefd4ce1188d0f00ddf6 [file] [log] [blame]
John Stiles5c7bb322020-10-22 11:09:15 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/sksl/SkSLPool.h"
9
John Stiles2d68ea32020-10-22 15:42:27 -040010#include "include/private/SkMutex.h"
John Stiles5c7bb322020-10-22 11:09:15 -040011#include "src/sksl/ir/SkSLIRNode.h"
12
13#define VLOG(...) // printf(__VA_ARGS__)
14
15namespace SkSL {
16
John Stiles270b5c02020-10-27 17:49:37 -040017static constexpr int kSmallNodeSize = 120;
18static constexpr int kNodesInPool = 512;
19
John Stiles5c7bb322020-10-22 11:09:15 -040020namespace { struct IRNodeData {
21 union {
John Stiles270b5c02020-10-27 17:49:37 -040022 uint8_t fBuffer[kSmallNodeSize];
John Stiles5c7bb322020-10-22 11:09:15 -040023 IRNodeData* fFreeListNext;
24 };
25}; }
26
27struct PoolData {
28 // This holds the first free node in the pool. It will be null when the pool is exhausted.
29 IRNodeData* fFreeListHead = fNodes;
30
31 // This points to end of our pooled data, and implies the number of nodes.
32 IRNodeData* fNodesEnd = nullptr;
33
34 // Our pooled data lives here. (We allocate lots of nodes here, not just one.)
35 IRNodeData fNodes[1];
36
37 // Accessors.
38 ptrdiff_t nodeCount() { return fNodesEnd - fNodes; }
39
John Stiles0bb9ec52020-10-22 11:35:18 -040040 int nodeIndex(IRNodeData* node) {
John Stiles5c7bb322020-10-22 11:09:15 -040041 SkASSERT(node >= fNodes);
42 SkASSERT(node < fNodesEnd);
John Stiles0bb9ec52020-10-22 11:35:18 -040043 return SkToInt(node - fNodes);
John Stiles5c7bb322020-10-22 11:09:15 -040044 }
45};
46
John Stiles0bb9ec52020-10-22 11:35:18 -040047#if defined(SK_BUILD_FOR_IOS) && \
48 (!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
49
50#include <pthread.h>
51
52static pthread_key_t get_pthread_key() {
53 static pthread_key_t sKey = []{
54 pthread_key_t key;
55 int result = pthread_key_create(&key, /*destructor=*/nullptr);
56 if (result != 0) {
57 SK_ABORT("pthread_key_create failure: %d", result);
58 }
59 return key;
60 }();
61 return sKey;
62}
63
64static PoolData* get_thread_local_pool_data() {
65 return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
66}
67
68static void set_thread_local_pool_data(PoolData* poolData) {
69 pthread_setspecific(get_pthread_key(), poolData);
70}
71
72#else
73
John Stiles5c7bb322020-10-22 11:09:15 -040074static thread_local PoolData* sPoolData = nullptr;
75
John Stiles0bb9ec52020-10-22 11:35:18 -040076static PoolData* get_thread_local_pool_data() {
77 return sPoolData;
78}
79
80static void set_thread_local_pool_data(PoolData* poolData) {
81 sPoolData = poolData;
82}
83
84#endif
85
John Stiles2d68ea32020-10-22 15:42:27 -040086static Pool* sRecycledPool; // GUARDED_BY recycled_pool_mutex
87static SkMutex& recycled_pool_mutex() {
88 static SkMutex* mutex = new SkMutex;
89 return *mutex;
90}
91
John Stiles5c7bb322020-10-22 11:09:15 -040092static PoolData* create_pool_data(int nodesInPool) {
93 // Create a PoolData structure with extra space at the end for additional IRNode data.
94 int numExtraIRNodes = nodesInPool - 1;
95 PoolData* poolData = static_cast<PoolData*>(malloc(sizeof(PoolData) +
96 (sizeof(IRNodeData) * numExtraIRNodes)));
97
98 // Initialize each pool node as a free node. The free nodes form a singly-linked list, each
99 // pointing to the next free node in sequence.
100 for (int index = 0; index < nodesInPool - 1; ++index) {
101 poolData->fNodes[index].fFreeListNext = &poolData->fNodes[index + 1];
102 }
103 poolData->fNodes[nodesInPool - 1].fFreeListNext = nullptr;
104 poolData->fNodesEnd = &poolData->fNodes[nodesInPool];
105
106 return poolData;
107}
108
109Pool::~Pool() {
John Stiles0bb9ec52020-10-22 11:35:18 -0400110 if (get_thread_local_pool_data() == fData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400111 SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
John Stiles0bb9ec52020-10-22 11:35:18 -0400112 set_thread_local_pool_data(nullptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400113 }
114
John Stiles2d68ea32020-10-22 15:42:27 -0400115 this->checkForLeaks();
John Stiles5c7bb322020-10-22 11:09:15 -0400116
117 VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
118 free(fData);
119}
120
John Stiles2d68ea32020-10-22 15:42:27 -0400121std::unique_ptr<Pool> Pool::Create() {
John Stiles2d68ea32020-10-22 15:42:27 -0400122 SkAutoMutexExclusive lock(recycled_pool_mutex());
123 std::unique_ptr<Pool> pool;
124 if (sRecycledPool) {
125 pool = std::unique_ptr<Pool>(sRecycledPool);
126 sRecycledPool = nullptr;
127 VLOG("REUSE Pool:0x%016llX\n", (uint64_t)pool->fData);
128 } else {
129 pool = std::unique_ptr<Pool>(new Pool);
130 pool->fData = create_pool_data(kNodesInPool);
131 pool->fData->fFreeListHead = &pool->fData->fNodes[0];
132 VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
133 }
John Stiles5c7bb322020-10-22 11:09:15 -0400134 return pool;
135}
136
John Stiles2d68ea32020-10-22 15:42:27 -0400137void Pool::Recycle(std::unique_ptr<Pool> pool) {
138 if (pool) {
139 pool->checkForLeaks();
140 }
141
142 SkAutoMutexExclusive lock(recycled_pool_mutex());
143 if (sRecycledPool) {
144 delete sRecycledPool;
145 }
146
147 VLOG("STASH Pool:0x%016llX\n", pool ? (uint64_t)pool->fData : 0ull);
148 sRecycledPool = pool.release();
John Stiles5c7bb322020-10-22 11:09:15 -0400149}
150
151void Pool::attachToThread() {
152 VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
John Stiles0bb9ec52020-10-22 11:35:18 -0400153 SkASSERT(get_thread_local_pool_data() == nullptr);
154 set_thread_local_pool_data(fData);
John Stiles5c7bb322020-10-22 11:09:15 -0400155}
156
John Stiles2d68ea32020-10-22 15:42:27 -0400157void Pool::detachFromThread() {
158 VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
159 SkASSERT(get_thread_local_pool_data() != nullptr);
160 set_thread_local_pool_data(nullptr);
161}
162
John Stiles270b5c02020-10-27 17:49:37 -0400163void* Pool::AllocIRNode(size_t size) {
John Stiles3898bb52020-10-27 17:03:14 +0000164 // Is a pool attached?
165 PoolData* poolData = get_thread_local_pool_data();
166 if (poolData) {
John Stiles270b5c02020-10-27 17:49:37 -0400167 // Can the requested size fit in a pool node?
168 if (size <= kSmallNodeSize) {
169 // Does the pool contain a free node?
170 IRNodeData* node = poolData->fFreeListHead;
171 if (node) {
172 // Yes. Take a node from the freelist.
173 poolData->fFreeListHead = node->fFreeListNext;
174 VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n",
175 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
176 return node->fBuffer;
177 }
John Stiles5c7bb322020-10-22 11:09:15 -0400178 }
179 }
180
John Stiles270b5c02020-10-27 17:49:37 -0400181 // The pool can't be used for this allocation. Allocate nodes using the system allocator.
182 void* ptr = ::operator new(size);
John Stiles5c7bb322020-10-22 11:09:15 -0400183 VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400184 (uint64_t)poolData, (uint64_t)ptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400185 return ptr;
186}
187
188void Pool::FreeIRNode(void* node_v) {
189 // Is a pool attached?
John Stiles0bb9ec52020-10-22 11:35:18 -0400190 PoolData* poolData = get_thread_local_pool_data();
191 if (poolData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400192 // Did this node come from our pool?
193 auto* node = static_cast<IRNodeData*>(node_v);
John Stiles0bb9ec52020-10-22 11:35:18 -0400194 if (node >= &poolData->fNodes[0] && node < poolData->fNodesEnd) {
John Stiles5c7bb322020-10-22 11:09:15 -0400195 // Yes. Push it back onto the freelist.
196 VLOG("FREE Pool:0x%016llX Index:%04d 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400197 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
198 node->fFreeListNext = poolData->fFreeListHead;
199 poolData->fFreeListHead = node;
John Stiles5c7bb322020-10-22 11:09:15 -0400200 return;
201 }
202 }
203
John Stiles270b5c02020-10-27 17:49:37 -0400204 // We couldn't associate this node with our pool. Free it using the system allocator.
John Stiles5c7bb322020-10-22 11:09:15 -0400205 VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400206 (uint64_t)poolData, (uint64_t)node_v);
John Stiles5c7bb322020-10-22 11:09:15 -0400207 ::operator delete(node_v);
208}
209
John Stiles2d68ea32020-10-22 15:42:27 -0400210void Pool::checkForLeaks() {
211#ifdef SK_DEBUG
212 ptrdiff_t nodeCount = fData->nodeCount();
213 std::vector<bool> freed(nodeCount);
214 for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
215 ptrdiff_t nodeIndex = fData->nodeIndex(node);
216 freed[nodeIndex] = true;
217 }
218 bool foundLeaks = false;
219 for (int index = 0; index < nodeCount; ++index) {
220 if (!freed[index]) {
221 IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
222 SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
223 foundLeaks = true;
224 }
225 }
226 if (foundLeaks) {
227 SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
228 }
229#endif
230}
231
John Stiles5c7bb322020-10-22 11:09:15 -0400232} // namespace SkSL