blob: d71537136545440a0bae6211ff69a02134fc57ba [file] [log] [blame]
John Stiles5c7bb322020-10-22 11:09:15 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/sksl/SkSLPool.h"
9
John Stiles15bfe382020-10-27 19:07:05 -040010#include <bitset>
11
John Stiles2d68ea32020-10-22 15:42:27 -040012#include "include/private/SkMutex.h"
John Stilesb3cc5fd2020-10-29 14:10:31 +000013#include "src/sksl/ir/SkSLIRNode.h"
John Stiles5c7bb322020-10-22 11:09:15 -040014
15#define VLOG(...) // printf(__VA_ARGS__)
16
17namespace SkSL {
18
John Stilesb3cc5fd2020-10-29 14:10:31 +000019namespace {
20
21template <int kNodeSize, int kNumNodes>
22class Subpool {
23public:
24 Subpool() {
25 // Initializes each node in the pool as a free node. The free nodes form a singly-linked
26 // list, each pointing to the next free node in sequence.
27 for (int index = 0; index < kNumNodes - 1; ++index) {
28 fNodes[index].fFreeListNext = &fNodes[index + 1];
29 }
30 fNodes[kNumNodes - 1].fFreeListNext = nullptr;
31 }
32
33 void* poolBegin() {
34 return &fNodes[0];
35 }
36
37 void* poolEnd() {
38 return &fNodes[kNumNodes];
39 }
40
41 void* alloc() {
42 // Does the pool contain a free node?
43 if (!fFreeListHead) {
44 return nullptr;
45 }
46 // Yes. Take a node from the freelist.
47 auto* node = fFreeListHead;
48 fFreeListHead = node->fFreeListNext;
49 return node->fBuffer;
50 }
51
52 void free(void* node_v) {
53 SkASSERT(this->isValidNodePtrInPool(node_v));
54
55 // Push a node back onto the freelist.
56 auto* node = static_cast<Subpool::Node*>(node_v);
57 node->fFreeListNext = fFreeListHead;
58 fFreeListHead = node;
59 }
60
61 bool isValidNodePtrInPool(void* node_v) {
62 // Verify that the pointer exists in our subpool at all.
63 if (node_v < this->poolBegin()) {
64 return false;
65 }
66 if (node_v >= this->poolEnd()) {
67 return false;
68 }
69 // Verify that the pointer points to the start of a node, not the middle.
70 intptr_t offsetInPool = (intptr_t)node_v - (intptr_t)this->poolBegin();
71 return (offsetInPool % kNodeSize) == 0;
72 }
73
74 void checkForLeaks() {
75 #ifdef SK_DEBUG
76 // Walk the free list and mark each node. We should encounter every item in the pool.
77 std::bitset<kNumNodes> freed;
78 for (Node* node = fFreeListHead; node; node = node->fFreeListNext) {
79 ptrdiff_t nodeIndex = this->nodeIndex(node);
80 freed[nodeIndex] = true;
81 }
82 // Look for any bit left unset above, and report it as a leak.
83 bool foundLeaks = false;
84 for (int index = 0; index < kNumNodes; ++index) {
85 if (!freed[index]) {
86 SkDebugf("Node %d leaked: ", index);
87 IRNode* leak = reinterpret_cast<IRNode*>(fNodes[index].fBuffer);
88 SkDebugf("%s\n", leak->description().c_str());
89 foundLeaks = true;
90 }
91 }
92 if (foundLeaks) {
93 SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will "
94 "likely be fatal");
95 }
96 #endif
97 }
98
99 // Accessors.
100 constexpr int nodeCount() { return kNumNodes; }
101
102 int nodeIndex(void* node_v) {
103 SkASSERT(this->isValidNodePtrInPool(node_v));
104
105 auto* node = static_cast<Subpool::Node*>(node_v);
106 return SkToInt(node - fNodes);
107 }
108
109private:
110 struct Node {
111 union {
112 uint8_t fBuffer[kNodeSize];
113 Node* fFreeListNext;
114 };
115 };
116
117 // This holds the first free node in the pool. It will be null when the pool is exhausted.
118 Node* fFreeListHead = fNodes;
119
120 // Our pooled data lives here.
121 Node fNodes[kNumNodes];
122};
123
124static constexpr int kSmallNodeSize = 120;
125static constexpr int kNumSmallNodes = 480;
126using SmallSubpool = Subpool<kSmallNodeSize, kNumSmallNodes>;
127
128static constexpr int kLargeNodeSize = 240;
129static constexpr int kNumLargeNodes = 20;
130using LargeSubpool = Subpool<kLargeNodeSize, kNumLargeNodes>;
131
132} // namespace
133
134struct PoolData {
135 SmallSubpool fSmall;
136 LargeSubpool fLarge;
137};
138
John Stiles0bb9ec52020-10-22 11:35:18 -0400139#if defined(SK_BUILD_FOR_IOS) && \
140 (!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
141
142#include <pthread.h>
143
144static pthread_key_t get_pthread_key() {
145 static pthread_key_t sKey = []{
146 pthread_key_t key;
147 int result = pthread_key_create(&key, /*destructor=*/nullptr);
148 if (result != 0) {
149 SK_ABORT("pthread_key_create failure: %d", result);
150 }
151 return key;
152 }();
153 return sKey;
154}
155
John Stilesb3cc5fd2020-10-29 14:10:31 +0000156static PoolData* get_thread_local_pool_data() {
John Stiles0bb9ec52020-10-22 11:35:18 -0400157 return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
158}
159
John Stilesb3cc5fd2020-10-29 14:10:31 +0000160static void set_thread_local_pool_data(PoolData* poolData) {
John Stiles0bb9ec52020-10-22 11:35:18 -0400161 pthread_setspecific(get_pthread_key(), poolData);
162}
163
164#else
165
John Stilesb3cc5fd2020-10-29 14:10:31 +0000166static thread_local PoolData* sPoolData = nullptr;
John Stiles5c7bb322020-10-22 11:09:15 -0400167
John Stilesb3cc5fd2020-10-29 14:10:31 +0000168static PoolData* get_thread_local_pool_data() {
169 return sPoolData;
John Stiles0bb9ec52020-10-22 11:35:18 -0400170}
171
John Stilesb3cc5fd2020-10-29 14:10:31 +0000172static void set_thread_local_pool_data(PoolData* poolData) {
173 sPoolData = poolData;
John Stiles0bb9ec52020-10-22 11:35:18 -0400174}
175
176#endif
177
John Stiles2d68ea32020-10-22 15:42:27 -0400178static Pool* sRecycledPool; // GUARDED_BY recycled_pool_mutex
179static SkMutex& recycled_pool_mutex() {
180 static SkMutex* mutex = new SkMutex;
181 return *mutex;
182}
183
John Stiles5c7bb322020-10-22 11:09:15 -0400184Pool::~Pool() {
John Stilesb3cc5fd2020-10-29 14:10:31 +0000185 if (get_thread_local_pool_data() == fData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400186 SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
John Stilesb3cc5fd2020-10-29 14:10:31 +0000187 set_thread_local_pool_data(nullptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400188 }
189
John Stilesb3cc5fd2020-10-29 14:10:31 +0000190 fData->fSmall.checkForLeaks();
191 fData->fLarge.checkForLeaks();
John Stiles5c7bb322020-10-22 11:09:15 -0400192
John Stilesb3cc5fd2020-10-29 14:10:31 +0000193 VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
194 delete fData;
John Stiles5c7bb322020-10-22 11:09:15 -0400195}
196
John Stiles2d68ea32020-10-22 15:42:27 -0400197std::unique_ptr<Pool> Pool::Create() {
John Stiles2d68ea32020-10-22 15:42:27 -0400198 SkAutoMutexExclusive lock(recycled_pool_mutex());
199 std::unique_ptr<Pool> pool;
200 if (sRecycledPool) {
201 pool = std::unique_ptr<Pool>(sRecycledPool);
202 sRecycledPool = nullptr;
John Stilesb3cc5fd2020-10-29 14:10:31 +0000203 VLOG("REUSE Pool:0x%016llX\n", (uint64_t)pool->fData);
John Stiles2d68ea32020-10-22 15:42:27 -0400204 } else {
205 pool = std::unique_ptr<Pool>(new Pool);
John Stilesb3cc5fd2020-10-29 14:10:31 +0000206 pool->fData = new PoolData;
207 VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
John Stiles2d68ea32020-10-22 15:42:27 -0400208 }
John Stiles5c7bb322020-10-22 11:09:15 -0400209 return pool;
210}
211
John Stiles2d68ea32020-10-22 15:42:27 -0400212void Pool::Recycle(std::unique_ptr<Pool> pool) {
213 if (pool) {
John Stilesb3cc5fd2020-10-29 14:10:31 +0000214 pool->fData->fSmall.checkForLeaks();
215 pool->fData->fLarge.checkForLeaks();
John Stiles2d68ea32020-10-22 15:42:27 -0400216 }
217
218 SkAutoMutexExclusive lock(recycled_pool_mutex());
219 if (sRecycledPool) {
220 delete sRecycledPool;
221 }
222
John Stilesb3cc5fd2020-10-29 14:10:31 +0000223 VLOG("STASH Pool:0x%016llX\n", pool ? (uint64_t)pool->fData : 0ull);
John Stiles2d68ea32020-10-22 15:42:27 -0400224 sRecycledPool = pool.release();
John Stiles5c7bb322020-10-22 11:09:15 -0400225}
226
227void Pool::attachToThread() {
John Stilesb3cc5fd2020-10-29 14:10:31 +0000228 VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
229 SkASSERT(get_thread_local_pool_data() == nullptr);
230 set_thread_local_pool_data(fData);
John Stiles5c7bb322020-10-22 11:09:15 -0400231}
232
John Stiles2d68ea32020-10-22 15:42:27 -0400233void Pool::detachFromThread() {
John Stilesb3cc5fd2020-10-29 14:10:31 +0000234 VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
235 SkASSERT(get_thread_local_pool_data() != nullptr);
236 set_thread_local_pool_data(nullptr);
John Stiles2d68ea32020-10-22 15:42:27 -0400237}
238
John Stiles270b5c02020-10-27 17:49:37 -0400239void* Pool::AllocIRNode(size_t size) {
John Stiles3898bb52020-10-27 17:03:14 +0000240 // Is a pool attached?
John Stilesb3cc5fd2020-10-29 14:10:31 +0000241 PoolData* poolData = get_thread_local_pool_data();
242 if (poolData) {
243 if (size <= kSmallNodeSize) {
244 // The node will fit in the small pool.
245 auto* node = poolData->fSmall.alloc();
246 if (node) {
247 VLOG("ALLOC Pool:0x%016llX Index:S%03d 0x%016llX\n",
248 (uint64_t)poolData, poolData->fSmall.nodeIndex(node), (uint64_t)node);
249 return node;
250 }
251 } else if (size <= kLargeNodeSize) {
252 // Try to allocate a large node.
253 auto* node = poolData->fLarge.alloc();
254 if (node) {
255 VLOG("ALLOC Pool:0x%016llX Index:L%03d 0x%016llX\n",
256 (uint64_t)poolData, poolData->fLarge.nodeIndex(node), (uint64_t)node);
257 return node;
258 }
259 }
John Stiles5c7bb322020-10-22 11:09:15 -0400260 }
261
John Stilesb3cc5fd2020-10-29 14:10:31 +0000262 // The pool can't be used for this allocation. Allocate nodes using the system allocator.
263 void* ptr = ::operator new(size);
264 VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
265 (uint64_t)poolData, (uint64_t)ptr);
266 return ptr;
John Stiles5c7bb322020-10-22 11:09:15 -0400267}
268
John Stiles15bfe382020-10-27 19:07:05 -0400269void Pool::FreeIRNode(void* node) {
John Stiles5c7bb322020-10-22 11:09:15 -0400270 // Is a pool attached?
John Stilesb3cc5fd2020-10-29 14:10:31 +0000271 PoolData* poolData = get_thread_local_pool_data();
272 if (poolData) {
273 // Did this node come from either of our pools?
274 if (node >= poolData->fSmall.poolBegin()) {
275 if (node < poolData->fSmall.poolEnd()) {
276 poolData->fSmall.free(node);
277 VLOG("FREE Pool:0x%016llX Index:S%03d 0x%016llX\n",
278 (uint64_t)poolData, poolData->fSmall.nodeIndex(node), (uint64_t)node);
279 return;
280 } else if (node < poolData->fLarge.poolEnd()) {
281 poolData->fLarge.free(node);
282 VLOG("FREE Pool:0x%016llX Index:L%03d 0x%016llX\n",
283 (uint64_t)poolData, poolData->fLarge.nodeIndex(node), (uint64_t)node);
284 return;
285 }
286 }
John Stiles5c7bb322020-10-22 11:09:15 -0400287 }
288
John Stilesb3cc5fd2020-10-29 14:10:31 +0000289 // We couldn't associate this node with our pool. Free it using the system allocator.
290 VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
291 (uint64_t)poolData, (uint64_t)node);
John Stiles15bfe382020-10-27 19:07:05 -0400292 ::operator delete(node);
John Stiles5c7bb322020-10-22 11:09:15 -0400293}
294
John Stilesb3cc5fd2020-10-29 14:10:31 +0000295
John Stiles5c7bb322020-10-22 11:09:15 -0400296} // namespace SkSL