Allow SkSL compilers to reuse SkSL Pools without reallocating.

When a Program is freed, rather than immediately disposing of its Pool,
it now sends it to Pool::Recycle, which holds onto it. If Pool::Create
is called, it satisfies the request by simply handing back the recycled
pool. Only one pool is kept in recycle storage at a time--recycling
more than one pool in a row will cause all but one to be freed. To avoid
holding onto Pool memory indefinitely, pool recycle storage is cleaned
up whenever a Compiler is destroyed.

Change-Id: I21c1ccde84507e344102d05506d869e62ca095a6
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329175
Reviewed-by: Brian Osman <brianosman@google.com>
Reviewed-by: Ethan Nicholas <ethannicholas@google.com>
Commit-Queue: John Stiles <johnstiles@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
diff --git a/src/sksl/SkSLPool.cpp b/src/sksl/SkSLPool.cpp
index ac9524d..b8c42a6 100644
--- a/src/sksl/SkSLPool.cpp
+++ b/src/sksl/SkSLPool.cpp
@@ -7,6 +7,7 @@
 
 #include "src/sksl/SkSLPool.h"
 
+#include "include/private/SkMutex.h"
 #include "src/sksl/ir/SkSLIRNode.h"
 
 #define VLOG(...) // printf(__VA_ARGS__)
@@ -79,6 +80,12 @@
 
 #endif
 
+static Pool* sRecycledPool; // GUARDED_BY recycled_pool_mutex
+static SkMutex& recycled_pool_mutex() {
+    static SkMutex* mutex = new SkMutex;
+    return *mutex;
+}
+
 static PoolData* create_pool_data(int nodesInPool) {
     // Create a PoolData structure with extra space at the end for additional IRNode data.
     int numExtraIRNodes = nodesInPool - 1;
@@ -102,44 +109,42 @@
         set_thread_local_pool_data(nullptr);
     }
 
-    // In debug mode, report any leaked nodes.
-#ifdef SK_DEBUG
-    ptrdiff_t nodeCount = fData->nodeCount();
-    std::vector<bool> freed(nodeCount);
-    for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
-        ptrdiff_t nodeIndex = fData->nodeIndex(node);
-        freed[nodeIndex] = true;
-    }
-    bool foundLeaks = false;
-    for (int index = 0; index < nodeCount; ++index) {
-        if (!freed[index]) {
-            IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
-            SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
-            foundLeaks = true;
-        }
-    }
-    if (foundLeaks) {
-        SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
-    }
-#endif
+    this->checkForLeaks();
 
     VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
     free(fData);
 }
 
-std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
-    auto pool = std::unique_ptr<Pool>(new Pool);
-    pool->fData = create_pool_data(nodesInPool);
-    pool->fData->fFreeListHead = &pool->fData->fNodes[0];
-    VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
-    pool->attachToThread();
+std::unique_ptr<Pool> Pool::Create() {
+    constexpr int kNodesInPool = 2000;
+
+    SkAutoMutexExclusive lock(recycled_pool_mutex());
+    std::unique_ptr<Pool> pool;
+    if (sRecycledPool) {
+        pool = std::unique_ptr<Pool>(sRecycledPool);
+        sRecycledPool = nullptr;
+        VLOG("REUSE  Pool:0x%016llX\n", (uint64_t)pool->fData);
+    } else {
+        pool = std::unique_ptr<Pool>(new Pool);
+        pool->fData = create_pool_data(kNodesInPool);
+        pool->fData->fFreeListHead = &pool->fData->fNodes[0];
+        VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
+    }
     return pool;
 }
 
-void Pool::detachFromThread() {
-    VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
-    SkASSERT(get_thread_local_pool_data() != nullptr);
-    set_thread_local_pool_data(nullptr);
+void Pool::Recycle(std::unique_ptr<Pool> pool) {
+    if (pool) {
+        pool->checkForLeaks();
+    }
+
+    SkAutoMutexExclusive lock(recycled_pool_mutex());
+    if (sRecycledPool) {
+        delete sRecycledPool;
+    }
+
+    VLOG("STASH  Pool:0x%016llX\n", pool ? (uint64_t)pool->fData : 0ull);
+    sRecycledPool = pool.release();
 }
 
 void Pool::attachToThread() {
@@ -148,6 +153,12 @@
     set_thread_local_pool_data(fData);
 }
 
+void Pool::detachFromThread() {
+    VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
+    SkASSERT(get_thread_local_pool_data() != nullptr);
+    set_thread_local_pool_data(nullptr);
+}
+
 void* Pool::AllocIRNode() {
     // Is a pool attached?
     PoolData* poolData = get_thread_local_pool_data();
@@ -192,4 +203,26 @@
     ::operator delete(node_v);
 }
 
+void Pool::checkForLeaks() {
+#ifdef SK_DEBUG
+    ptrdiff_t nodeCount = fData->nodeCount();
+    std::vector<bool> freed(nodeCount);
+    for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
+        ptrdiff_t nodeIndex = fData->nodeIndex(node);
+        freed[nodeIndex] = true;
+    }
+    bool foundLeaks = false;
+    for (int index = 0; index < nodeCount; ++index) {
+        if (!freed[index]) {
+            IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
+            SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
+            foundLeaks = true;
+        }
+    }
+    if (foundLeaks) {
+        SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
+    }
+#endif
+}
+
 }  // namespace SkSL