blob: 2830fe2e29a5993cdf6184e4f8c925e7fbc1b3a4 [file] [log] [blame]
John Stiles5c7bb322020-10-22 11:09:15 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/sksl/SkSLPool.h"
9
10#include "src/sksl/ir/SkSLIRNode.h"
11
12#define VLOG(...) // printf(__VA_ARGS__)
13
14namespace SkSL {
15
16#if defined(SK_BUILD_FOR_IOS) && \
17 (!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
18
19// iOS did not support for C++11 `thread_local` variables until iOS 9.
20// Pooling is not supported here; we allocate all nodes directly.
21struct PoolData {};
22
23Pool::~Pool() {}
24std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
25 auto pool = std::unique_ptr<Pool>(new Pool);
26 pool->fData = nullptr;
27 return pool;
28}
29void Pool::detachFromThread() {}
30void Pool::attachToThread() {}
31void* Pool::AllocIRNode() { return ::operator new(sizeof(IRNode)); }
32void Pool::FreeIRNode(void* node) { ::operator delete(node); }
33
34#else // !defined(SK_BUILD_FOR_IOS)...
35
36namespace { struct IRNodeData {
37 union {
38 uint8_t fBuffer[sizeof(IRNode)];
39 IRNodeData* fFreeListNext;
40 };
41}; }
42
43struct PoolData {
44 // This holds the first free node in the pool. It will be null when the pool is exhausted.
45 IRNodeData* fFreeListHead = fNodes;
46
47 // This points to end of our pooled data, and implies the number of nodes.
48 IRNodeData* fNodesEnd = nullptr;
49
50 // Our pooled data lives here. (We allocate lots of nodes here, not just one.)
51 IRNodeData fNodes[1];
52
53 // Accessors.
54 ptrdiff_t nodeCount() { return fNodesEnd - fNodes; }
55
56 ptrdiff_t nodeIndex(IRNodeData* node) {
57 SkASSERT(node >= fNodes);
58 SkASSERT(node < fNodesEnd);
59 return node - fNodes;
60 }
61};
62
63static thread_local PoolData* sPoolData = nullptr;
64
65static PoolData* create_pool_data(int nodesInPool) {
66 // Create a PoolData structure with extra space at the end for additional IRNode data.
67 int numExtraIRNodes = nodesInPool - 1;
68 PoolData* poolData = static_cast<PoolData*>(malloc(sizeof(PoolData) +
69 (sizeof(IRNodeData) * numExtraIRNodes)));
70
71 // Initialize each pool node as a free node. The free nodes form a singly-linked list, each
72 // pointing to the next free node in sequence.
73 for (int index = 0; index < nodesInPool - 1; ++index) {
74 poolData->fNodes[index].fFreeListNext = &poolData->fNodes[index + 1];
75 }
76 poolData->fNodes[nodesInPool - 1].fFreeListNext = nullptr;
77 poolData->fNodesEnd = &poolData->fNodes[nodesInPool];
78
79 return poolData;
80}
81
82Pool::~Pool() {
83 if (sPoolData == fData) {
84 SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
85 sPoolData = nullptr;
86 }
87
88 // In debug mode, report any leaked nodes.
89#ifdef SK_DEBUG
90 ptrdiff_t nodeCount = fData->nodeCount();
91 std::vector<bool> freed(nodeCount);
92 for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
93 ptrdiff_t nodeIndex = fData->nodeIndex(node);
94 freed[nodeIndex] = true;
95 }
96 bool foundLeaks = false;
97 for (int index = 0; index < nodeCount; ++index) {
98 if (!freed[index]) {
99 IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
100 SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
101 foundLeaks = true;
102 }
103 }
104 if (foundLeaks) {
105 SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
106 }
107#endif
108
109 VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
110 free(fData);
111}
112
113std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
114 auto pool = std::unique_ptr<Pool>(new Pool);
115 pool->fData = create_pool_data(nodesInPool);
116 pool->fData->fFreeListHead = &pool->fData->fNodes[0];
117 VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
118 pool->attachToThread();
119 return pool;
120}
121
122void Pool::detachFromThread() {
123 VLOG("DETACH Pool:0x%016llX\n", (uint64_t)sPoolData);
124 SkASSERT(sPoolData != nullptr);
125 sPoolData = nullptr;
126}
127
128void Pool::attachToThread() {
129 VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
130 SkASSERT(sPoolData == nullptr);
131 sPoolData = fData;
132}
133
134void* Pool::AllocIRNode() {
135 // Is a pool attached?
136 if (sPoolData) {
137 // Does the pool contain a free node?
138 IRNodeData* node = sPoolData->fFreeListHead;
139 if (node) {
140 // Yes. Take a node from the freelist.
141 sPoolData->fFreeListHead = node->fFreeListNext;
142 VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n",
143 (uint64_t)sPoolData, (int)(node - &sPoolData->fNodes[0]), (uint64_t)node);
144 return node->fBuffer;
145 }
146 }
147
148 // The pool is detached or full; allocate nodes using malloc.
149 void* ptr = ::operator new(sizeof(IRNode));
150 VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
151 (uint64_t)sPoolData, (uint64_t)ptr);
152 return ptr;
153}
154
155void Pool::FreeIRNode(void* node_v) {
156 // Is a pool attached?
157 if (sPoolData) {
158 // Did this node come from our pool?
159 auto* node = static_cast<IRNodeData*>(node_v);
160 if (node >= &sPoolData->fNodes[0] && node < sPoolData->fNodesEnd) {
161 // Yes. Push it back onto the freelist.
162 VLOG("FREE Pool:0x%016llX Index:%04d 0x%016llX\n",
163 (uint64_t)sPoolData, (int)(node - &sPoolData->fNodes[0]), (uint64_t)node);
164 node->fFreeListNext = sPoolData->fFreeListHead;
165 sPoolData->fFreeListHead = node;
166 return;
167 }
168 }
169
170 // No pool is attached or the node was malloced; it must be freed.
171 VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
172 (uint64_t)sPoolData, (uint64_t)node_v);
173 ::operator delete(node_v);
174}
175
176#endif // !defined(SK_BUILD_FOR_IOS)...
177
178} // namespace SkSL