blob: b8c42a67d176daf99ea4c79b4a16bdd17cd23ce5 [file] [log] [blame]
John Stiles5c7bb322020-10-22 11:09:15 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/sksl/SkSLPool.h"
9
John Stiles2d68ea32020-10-22 15:42:27 -040010#include "include/private/SkMutex.h"
John Stiles5c7bb322020-10-22 11:09:15 -040011#include "src/sksl/ir/SkSLIRNode.h"
12
13#define VLOG(...) // printf(__VA_ARGS__)
14
15namespace SkSL {
16
John Stiles5c7bb322020-10-22 11:09:15 -040017namespace { struct IRNodeData {
18 union {
19 uint8_t fBuffer[sizeof(IRNode)];
20 IRNodeData* fFreeListNext;
21 };
22}; }
23
24struct PoolData {
25 // This holds the first free node in the pool. It will be null when the pool is exhausted.
26 IRNodeData* fFreeListHead = fNodes;
27
28 // This points to end of our pooled data, and implies the number of nodes.
29 IRNodeData* fNodesEnd = nullptr;
30
31 // Our pooled data lives here. (We allocate lots of nodes here, not just one.)
32 IRNodeData fNodes[1];
33
34 // Accessors.
35 ptrdiff_t nodeCount() { return fNodesEnd - fNodes; }
36
John Stiles0bb9ec52020-10-22 11:35:18 -040037 int nodeIndex(IRNodeData* node) {
John Stiles5c7bb322020-10-22 11:09:15 -040038 SkASSERT(node >= fNodes);
39 SkASSERT(node < fNodesEnd);
John Stiles0bb9ec52020-10-22 11:35:18 -040040 return SkToInt(node - fNodes);
John Stiles5c7bb322020-10-22 11:09:15 -040041 }
42};
43
John Stiles0bb9ec52020-10-22 11:35:18 -040044#if defined(SK_BUILD_FOR_IOS) && \
45 (!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
46
47#include <pthread.h>
48
49static pthread_key_t get_pthread_key() {
50 static pthread_key_t sKey = []{
51 pthread_key_t key;
52 int result = pthread_key_create(&key, /*destructor=*/nullptr);
53 if (result != 0) {
54 SK_ABORT("pthread_key_create failure: %d", result);
55 }
56 return key;
57 }();
58 return sKey;
59}
60
61static PoolData* get_thread_local_pool_data() {
62 return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
63}
64
65static void set_thread_local_pool_data(PoolData* poolData) {
66 pthread_setspecific(get_pthread_key(), poolData);
67}
68
69#else
70
John Stiles5c7bb322020-10-22 11:09:15 -040071static thread_local PoolData* sPoolData = nullptr;
72
John Stiles0bb9ec52020-10-22 11:35:18 -040073static PoolData* get_thread_local_pool_data() {
74 return sPoolData;
75}
76
77static void set_thread_local_pool_data(PoolData* poolData) {
78 sPoolData = poolData;
79}
80
81#endif
82
John Stiles2d68ea32020-10-22 15:42:27 -040083static Pool* sRecycledPool; // GUARDED_BY recycled_pool_mutex
84static SkMutex& recycled_pool_mutex() {
85 static SkMutex* mutex = new SkMutex;
86 return *mutex;
87}
88
John Stiles5c7bb322020-10-22 11:09:15 -040089static PoolData* create_pool_data(int nodesInPool) {
90 // Create a PoolData structure with extra space at the end for additional IRNode data.
91 int numExtraIRNodes = nodesInPool - 1;
92 PoolData* poolData = static_cast<PoolData*>(malloc(sizeof(PoolData) +
93 (sizeof(IRNodeData) * numExtraIRNodes)));
94
95 // Initialize each pool node as a free node. The free nodes form a singly-linked list, each
96 // pointing to the next free node in sequence.
97 for (int index = 0; index < nodesInPool - 1; ++index) {
98 poolData->fNodes[index].fFreeListNext = &poolData->fNodes[index + 1];
99 }
100 poolData->fNodes[nodesInPool - 1].fFreeListNext = nullptr;
101 poolData->fNodesEnd = &poolData->fNodes[nodesInPool];
102
103 return poolData;
104}
105
106Pool::~Pool() {
John Stiles0bb9ec52020-10-22 11:35:18 -0400107 if (get_thread_local_pool_data() == fData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400108 SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
John Stiles0bb9ec52020-10-22 11:35:18 -0400109 set_thread_local_pool_data(nullptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400110 }
111
John Stiles2d68ea32020-10-22 15:42:27 -0400112 this->checkForLeaks();
John Stiles5c7bb322020-10-22 11:09:15 -0400113
114 VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
115 free(fData);
116}
117
John Stiles2d68ea32020-10-22 15:42:27 -0400118std::unique_ptr<Pool> Pool::Create() {
119 constexpr int kNodesInPool = 2000;
120
121 SkAutoMutexExclusive lock(recycled_pool_mutex());
122 std::unique_ptr<Pool> pool;
123 if (sRecycledPool) {
124 pool = std::unique_ptr<Pool>(sRecycledPool);
125 sRecycledPool = nullptr;
126 VLOG("REUSE Pool:0x%016llX\n", (uint64_t)pool->fData);
127 } else {
128 pool = std::unique_ptr<Pool>(new Pool);
129 pool->fData = create_pool_data(kNodesInPool);
130 pool->fData->fFreeListHead = &pool->fData->fNodes[0];
131 VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
132 }
John Stiles5c7bb322020-10-22 11:09:15 -0400133 return pool;
134}
135
John Stiles2d68ea32020-10-22 15:42:27 -0400136void Pool::Recycle(std::unique_ptr<Pool> pool) {
137 if (pool) {
138 pool->checkForLeaks();
139 }
140
141 SkAutoMutexExclusive lock(recycled_pool_mutex());
142 if (sRecycledPool) {
143 delete sRecycledPool;
144 }
145
146 VLOG("STASH Pool:0x%016llX\n", pool ? (uint64_t)pool->fData : 0ull);
147 sRecycledPool = pool.release();
John Stiles5c7bb322020-10-22 11:09:15 -0400148}
149
150void Pool::attachToThread() {
151 VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
John Stiles0bb9ec52020-10-22 11:35:18 -0400152 SkASSERT(get_thread_local_pool_data() == nullptr);
153 set_thread_local_pool_data(fData);
John Stiles5c7bb322020-10-22 11:09:15 -0400154}
155
John Stiles2d68ea32020-10-22 15:42:27 -0400156void Pool::detachFromThread() {
157 VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
158 SkASSERT(get_thread_local_pool_data() != nullptr);
159 set_thread_local_pool_data(nullptr);
160}
161
John Stiles5c7bb322020-10-22 11:09:15 -0400162void* Pool::AllocIRNode() {
163 // Is a pool attached?
John Stiles0bb9ec52020-10-22 11:35:18 -0400164 PoolData* poolData = get_thread_local_pool_data();
165 if (poolData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400166 // Does the pool contain a free node?
John Stiles0bb9ec52020-10-22 11:35:18 -0400167 IRNodeData* node = poolData->fFreeListHead;
John Stiles5c7bb322020-10-22 11:09:15 -0400168 if (node) {
169 // Yes. Take a node from the freelist.
John Stiles0bb9ec52020-10-22 11:35:18 -0400170 poolData->fFreeListHead = node->fFreeListNext;
John Stiles5c7bb322020-10-22 11:09:15 -0400171 VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400172 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
John Stiles5c7bb322020-10-22 11:09:15 -0400173 return node->fBuffer;
174 }
175 }
176
177 // The pool is detached or full; allocate nodes using malloc.
178 void* ptr = ::operator new(sizeof(IRNode));
179 VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400180 (uint64_t)poolData, (uint64_t)ptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400181 return ptr;
182}
183
184void Pool::FreeIRNode(void* node_v) {
185 // Is a pool attached?
John Stiles0bb9ec52020-10-22 11:35:18 -0400186 PoolData* poolData = get_thread_local_pool_data();
187 if (poolData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400188 // Did this node come from our pool?
189 auto* node = static_cast<IRNodeData*>(node_v);
John Stiles0bb9ec52020-10-22 11:35:18 -0400190 if (node >= &poolData->fNodes[0] && node < poolData->fNodesEnd) {
John Stiles5c7bb322020-10-22 11:09:15 -0400191 // Yes. Push it back onto the freelist.
192 VLOG("FREE Pool:0x%016llX Index:%04d 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400193 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
194 node->fFreeListNext = poolData->fFreeListHead;
195 poolData->fFreeListHead = node;
John Stiles5c7bb322020-10-22 11:09:15 -0400196 return;
197 }
198 }
199
200 // No pool is attached or the node was malloced; it must be freed.
201 VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400202 (uint64_t)poolData, (uint64_t)node_v);
John Stiles5c7bb322020-10-22 11:09:15 -0400203 ::operator delete(node_v);
204}
205
John Stiles2d68ea32020-10-22 15:42:27 -0400206void Pool::checkForLeaks() {
207#ifdef SK_DEBUG
208 ptrdiff_t nodeCount = fData->nodeCount();
209 std::vector<bool> freed(nodeCount);
210 for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
211 ptrdiff_t nodeIndex = fData->nodeIndex(node);
212 freed[nodeIndex] = true;
213 }
214 bool foundLeaks = false;
215 for (int index = 0; index < nodeCount; ++index) {
216 if (!freed[index]) {
217 IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
218 SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
219 foundLeaks = true;
220 }
221 }
222 if (foundLeaks) {
223 SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
224 }
225#endif
226}
227
John Stiles5c7bb322020-10-22 11:09:15 -0400228} // namespace SkSL