blob: ac9524d4fc7fd0cbbfe1a08c4f9a3b32fcef42eb [file] [log] [blame]
John Stiles5c7bb322020-10-22 11:09:15 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/sksl/SkSLPool.h"
9
10#include "src/sksl/ir/SkSLIRNode.h"
11
12#define VLOG(...) // printf(__VA_ARGS__)
13
14namespace SkSL {
15
John Stiles5c7bb322020-10-22 11:09:15 -040016namespace { struct IRNodeData {
17 union {
18 uint8_t fBuffer[sizeof(IRNode)];
19 IRNodeData* fFreeListNext;
20 };
21}; }
22
23struct PoolData {
24 // This holds the first free node in the pool. It will be null when the pool is exhausted.
25 IRNodeData* fFreeListHead = fNodes;
26
27 // This points to end of our pooled data, and implies the number of nodes.
28 IRNodeData* fNodesEnd = nullptr;
29
30 // Our pooled data lives here. (We allocate lots of nodes here, not just one.)
31 IRNodeData fNodes[1];
32
33 // Accessors.
34 ptrdiff_t nodeCount() { return fNodesEnd - fNodes; }
35
John Stiles0bb9ec52020-10-22 11:35:18 -040036 int nodeIndex(IRNodeData* node) {
John Stiles5c7bb322020-10-22 11:09:15 -040037 SkASSERT(node >= fNodes);
38 SkASSERT(node < fNodesEnd);
John Stiles0bb9ec52020-10-22 11:35:18 -040039 return SkToInt(node - fNodes);
John Stiles5c7bb322020-10-22 11:09:15 -040040 }
41};
42
John Stiles0bb9ec52020-10-22 11:35:18 -040043#if defined(SK_BUILD_FOR_IOS) && \
44 (!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
45
46#include <pthread.h>
47
48static pthread_key_t get_pthread_key() {
49 static pthread_key_t sKey = []{
50 pthread_key_t key;
51 int result = pthread_key_create(&key, /*destructor=*/nullptr);
52 if (result != 0) {
53 SK_ABORT("pthread_key_create failure: %d", result);
54 }
55 return key;
56 }();
57 return sKey;
58}
59
60static PoolData* get_thread_local_pool_data() {
61 return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
62}
63
64static void set_thread_local_pool_data(PoolData* poolData) {
65 pthread_setspecific(get_pthread_key(), poolData);
66}
67
68#else
69
John Stiles5c7bb322020-10-22 11:09:15 -040070static thread_local PoolData* sPoolData = nullptr;
71
John Stiles0bb9ec52020-10-22 11:35:18 -040072static PoolData* get_thread_local_pool_data() {
73 return sPoolData;
74}
75
76static void set_thread_local_pool_data(PoolData* poolData) {
77 sPoolData = poolData;
78}
79
80#endif
81
John Stiles5c7bb322020-10-22 11:09:15 -040082static PoolData* create_pool_data(int nodesInPool) {
83 // Create a PoolData structure with extra space at the end for additional IRNode data.
84 int numExtraIRNodes = nodesInPool - 1;
85 PoolData* poolData = static_cast<PoolData*>(malloc(sizeof(PoolData) +
86 (sizeof(IRNodeData) * numExtraIRNodes)));
87
88 // Initialize each pool node as a free node. The free nodes form a singly-linked list, each
89 // pointing to the next free node in sequence.
90 for (int index = 0; index < nodesInPool - 1; ++index) {
91 poolData->fNodes[index].fFreeListNext = &poolData->fNodes[index + 1];
92 }
93 poolData->fNodes[nodesInPool - 1].fFreeListNext = nullptr;
94 poolData->fNodesEnd = &poolData->fNodes[nodesInPool];
95
96 return poolData;
97}
98
99Pool::~Pool() {
John Stiles0bb9ec52020-10-22 11:35:18 -0400100 if (get_thread_local_pool_data() == fData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400101 SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
John Stiles0bb9ec52020-10-22 11:35:18 -0400102 set_thread_local_pool_data(nullptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400103 }
104
105 // In debug mode, report any leaked nodes.
106#ifdef SK_DEBUG
107 ptrdiff_t nodeCount = fData->nodeCount();
108 std::vector<bool> freed(nodeCount);
109 for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
110 ptrdiff_t nodeIndex = fData->nodeIndex(node);
111 freed[nodeIndex] = true;
112 }
113 bool foundLeaks = false;
114 for (int index = 0; index < nodeCount; ++index) {
115 if (!freed[index]) {
116 IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
117 SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
118 foundLeaks = true;
119 }
120 }
121 if (foundLeaks) {
122 SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
123 }
124#endif
125
126 VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
127 free(fData);
128}
129
130std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
131 auto pool = std::unique_ptr<Pool>(new Pool);
132 pool->fData = create_pool_data(nodesInPool);
133 pool->fData->fFreeListHead = &pool->fData->fNodes[0];
134 VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
135 pool->attachToThread();
136 return pool;
137}
138
139void Pool::detachFromThread() {
John Stiles0bb9ec52020-10-22 11:35:18 -0400140 VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
141 SkASSERT(get_thread_local_pool_data() != nullptr);
142 set_thread_local_pool_data(nullptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400143}
144
145void Pool::attachToThread() {
146 VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
John Stiles0bb9ec52020-10-22 11:35:18 -0400147 SkASSERT(get_thread_local_pool_data() == nullptr);
148 set_thread_local_pool_data(fData);
John Stiles5c7bb322020-10-22 11:09:15 -0400149}
150
151void* Pool::AllocIRNode() {
152 // Is a pool attached?
John Stiles0bb9ec52020-10-22 11:35:18 -0400153 PoolData* poolData = get_thread_local_pool_data();
154 if (poolData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400155 // Does the pool contain a free node?
John Stiles0bb9ec52020-10-22 11:35:18 -0400156 IRNodeData* node = poolData->fFreeListHead;
John Stiles5c7bb322020-10-22 11:09:15 -0400157 if (node) {
158 // Yes. Take a node from the freelist.
John Stiles0bb9ec52020-10-22 11:35:18 -0400159 poolData->fFreeListHead = node->fFreeListNext;
John Stiles5c7bb322020-10-22 11:09:15 -0400160 VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400161 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
John Stiles5c7bb322020-10-22 11:09:15 -0400162 return node->fBuffer;
163 }
164 }
165
166 // The pool is detached or full; allocate nodes using malloc.
167 void* ptr = ::operator new(sizeof(IRNode));
168 VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400169 (uint64_t)poolData, (uint64_t)ptr);
John Stiles5c7bb322020-10-22 11:09:15 -0400170 return ptr;
171}
172
173void Pool::FreeIRNode(void* node_v) {
174 // Is a pool attached?
John Stiles0bb9ec52020-10-22 11:35:18 -0400175 PoolData* poolData = get_thread_local_pool_data();
176 if (poolData) {
John Stiles5c7bb322020-10-22 11:09:15 -0400177 // Did this node come from our pool?
178 auto* node = static_cast<IRNodeData*>(node_v);
John Stiles0bb9ec52020-10-22 11:35:18 -0400179 if (node >= &poolData->fNodes[0] && node < poolData->fNodesEnd) {
John Stiles5c7bb322020-10-22 11:09:15 -0400180 // Yes. Push it back onto the freelist.
181 VLOG("FREE Pool:0x%016llX Index:%04d 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400182 (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
183 node->fFreeListNext = poolData->fFreeListHead;
184 poolData->fFreeListHead = node;
John Stiles5c7bb322020-10-22 11:09:15 -0400185 return;
186 }
187 }
188
189 // No pool is attached or the node was malloced; it must be freed.
190 VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
John Stiles0bb9ec52020-10-22 11:35:18 -0400191 (uint64_t)poolData, (uint64_t)node_v);
John Stiles5c7bb322020-10-22 11:09:15 -0400192 ::operator delete(node_v);
193}
194
John Stiles5c7bb322020-10-22 11:09:15 -0400195} // namespace SkSL