Move GrBlockAllocator and GrTBlockList to src/core/

src/gpu/GrBlockAllocator -> src/core/SkBlockAllocator
src/gpu/GrTBlockList -> src/core/SkTBlockList

Tests and references also renamed.

Bug: skia:12330
Change-Id: I5fad05faa3dcecd89a0a478dcf30c090ea7589f5
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/441477
Reviewed-by: Brian Osman <brianosman@google.com>
Commit-Queue: Michael Ludwig <michaelludwig@google.com>
diff --git a/BUILD.gn b/BUILD.gn
index 7e82e9e..91e649e 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -618,6 +618,7 @@
       "SK_ENABLE_SPIRV_VALIDATION",
     ]
     sources = [
+      "src/core/SkBlockAllocator.cpp",
       "src/core/SkCpu.cpp",
       "src/core/SkData.cpp",
       "src/core/SkHalf.cpp",
@@ -632,7 +633,6 @@
       "src/core/SkThreadID.cpp",
       "src/core/SkUtils.cpp",
       "src/core/SkVM.cpp",
-      "src/gpu/GrBlockAllocator.cpp",
       "src/gpu/GrMemoryPool.cpp",
       "src/gpu/GrShaderUtils.cpp",
       "src/ports/SkMemory_malloc.cpp",
diff --git a/gn/core.gni b/gn/core.gni
index 789a42c..54873b3 100644
--- a/gn/core.gni
+++ b/gn/core.gni
@@ -136,6 +136,8 @@
   "$_src/core/SkBlitter_ARGB32.cpp",
   "$_src/core/SkBlitter_RGB565.cpp",
   "$_src/core/SkBlitter_Sprite.cpp",
+  "$_src/core/SkBlockAllocator.cpp",
+  "$_src/core/SkBlockAllocator.h",
   "$_src/core/SkBlurMF.cpp",
   "$_src/core/SkBlurMask.cpp",
   "$_src/core/SkBlurMask.h",
@@ -374,6 +376,7 @@
   "$_src/core/SkSurfaceCharacterization.cpp",
   "$_src/core/SkSurfacePriv.h",
   "$_src/core/SkSwizzle.cpp",
+  "$_src/core/SkTBlockList.h",
   "$_src/core/SkTDPQueue.h",
   "$_src/core/SkTDynamicHash.h",
   "$_src/core/SkTInternalLList.h",
diff --git a/gn/gpu.gni b/gn/gpu.gni
index b471360..727df61 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -48,8 +48,6 @@
   "$_src/gpu/GrBackendUtils.h",
   "$_src/gpu/GrBaseContextPriv.h",
   "$_src/gpu/GrBlend.h",
-  "$_src/gpu/GrBlockAllocator.cpp",
-  "$_src/gpu/GrBlockAllocator.h",
   "$_src/gpu/GrBuffer.h",
   "$_src/gpu/GrBufferAllocPool.cpp",
   "$_src/gpu/GrBufferAllocPool.h",
@@ -210,7 +208,6 @@
   "$_src/gpu/GrSurfaceProxyView.h",
   "$_src/gpu/GrSwizzle.cpp",
   "$_src/gpu/GrSwizzle.h",
-  "$_src/gpu/GrTBlockList.h",
   "$_src/gpu/GrTTopoSort.h",
   "$_src/gpu/GrTestUtils.cpp",
   "$_src/gpu/GrTestUtils.h",
diff --git a/gn/tests.gni b/gn/tests.gni
index 75c158d..42946ae 100644
--- a/gn/tests.gni
+++ b/gn/tests.gni
@@ -91,7 +91,6 @@
   "$_tests/GpuDrawPathTest.cpp",
   "$_tests/GpuRectanizerTest.cpp",
   "$_tests/GrAHardwareBufferTest.cpp",
-  "$_tests/GrBlockAllocatorTest.cpp",
   "$_tests/GrContextAbandonTest.cpp",
   "$_tests/GrContextFactoryTest.cpp",
   "$_tests/GrContextOOM.cpp",
@@ -107,7 +106,6 @@
   "$_tests/GrStyledShapeTest.cpp",
   "$_tests/GrSubmittedFlushTest.cpp",
   "$_tests/GrSurfaceTest.cpp",
-  "$_tests/GrTBlockListTest.cpp",
   "$_tests/GrTextBlobTest.cpp",
   "$_tests/GrTextureMipMapInvalidationTest.cpp",
   "$_tests/GrVxTest.cpp",
@@ -224,6 +222,7 @@
   "$_tests/ShaperTest.cpp",
   "$_tests/SizeTest.cpp",
   "$_tests/SkBase64Test.cpp",
+  "$_tests/SkBlockAllocatorTest.cpp",
   "$_tests/SkColor4fTest.cpp",
   "$_tests/SkColorSpaceXformStepsTest.cpp",
   "$_tests/SkDOMTest.cpp",
@@ -254,6 +253,7 @@
   "$_tests/SkSharedMutexTest.cpp",
   "$_tests/SkStrikeCacheTest.cpp",
   "$_tests/SkStringViewTest.cpp",
+  "$_tests/SkTBlockListTest.cpp",
   "$_tests/SkTOptionalTest.cpp",
   "$_tests/SkUTFTest.cpp",
   "$_tests/SkVMTest.cpp",
diff --git a/src/gpu/GrBlockAllocator.cpp b/src/core/SkBlockAllocator.cpp
similarity index 92%
rename from src/gpu/GrBlockAllocator.cpp
rename to src/core/SkBlockAllocator.cpp
index bac879d..4c32476 100644
--- a/src/gpu/GrBlockAllocator.cpp
+++ b/src/core/SkBlockAllocator.cpp
@@ -5,13 +5,13 @@
  * found in the LICENSE file.
  */
 
-#include "src/gpu/GrBlockAllocator.h"
+#include "src/core/SkBlockAllocator.h"
 
 #ifdef SK_DEBUG
 #include <vector>
 #endif
 
-GrBlockAllocator::GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
+SkBlockAllocator::SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
                                    size_t additionalPreallocBytes)
         : fTail(&fHead)
         // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
@@ -21,14 +21,14 @@
         , fGrowthPolicy(static_cast<uint64_t>(policy))
         , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
         , fN1(1)
-        // The head block always fills remaining space from GrBlockAllocator's size, because it's
+        // The head block always fills remaining space from SkBlockAllocator's size, because it's
         // inline, but can take over the specified number of bytes immediately after it.
         , fHead(/*prev=*/nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
     SkASSERT(fBlockIncrement >= 1);
     SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
 }
 
-GrBlockAllocator::Block::Block(Block* prev, int allocationSize)
+SkBlockAllocator::Block::Block(Block* prev, int allocationSize)
          : fNext(nullptr)
          , fPrev(prev)
          , fSize(allocationSize)
@@ -41,16 +41,16 @@
     this->poisonRange(kDataStart, fSize);
 }
 
-GrBlockAllocator::Block::~Block() {
+SkBlockAllocator::Block::~Block() {
     this->unpoisonRange(kDataStart, fSize);
 
     SkASSERT(fSentinel == kAssignedMarker);
     SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
 }
 
-size_t GrBlockAllocator::totalSize() const {
+size_t SkBlockAllocator::totalSize() const {
     // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
-    size_t size = offsetof(GrBlockAllocator, fHead) + this->scratchBlockSize();
+    size_t size = offsetof(SkBlockAllocator, fHead) + this->scratchBlockSize();
     for (const Block* b : this->blocks()) {
         size += b->fSize;
     }
@@ -58,7 +58,7 @@
     return size;
 }
 
-size_t GrBlockAllocator::totalUsableSpace() const {
+size_t SkBlockAllocator::totalUsableSpace() const {
     size_t size = this->scratchBlockSize();
     if (size > 0) {
         size -= kDataStart; // scratchBlockSize reports total block size, not usable size
@@ -70,7 +70,7 @@
     return size;
 }
 
-size_t GrBlockAllocator::totalSpaceInUse() const {
+size_t SkBlockAllocator::totalSpaceInUse() const {
     size_t size = 0;
     for (const Block* b : this->blocks()) {
         size += (b->fCursor - kDataStart);
@@ -79,7 +79,7 @@
     return size;
 }
 
-GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
+SkBlockAllocator::Block* SkBlockAllocator::findOwningBlock(const void* p) {
     // When in doubt, search in reverse to find an overlapping block.
     uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
     for (Block* b : this->rblocks()) {
@@ -93,7 +93,7 @@
     return nullptr;
 }
 
-void GrBlockAllocator::releaseBlock(Block* block) {
+void SkBlockAllocator::releaseBlock(Block* block) {
      if (block == &fHead) {
         // Reset the cursor of the head block so that it can be reused if it becomes the new tail
         block->fCursor = kDataStart;
@@ -147,7 +147,7 @@
     SkASSERT(fN1 >= 1 && fN0 >= 0);
 }
 
-void GrBlockAllocator::stealHeapBlocks(GrBlockAllocator* other) {
+void SkBlockAllocator::stealHeapBlocks(SkBlockAllocator* other) {
     Block* toSteal = other->fHead.fNext;
     if (toSteal) {
         // The other's next block connects back to this allocator's current tail, and its new tail
@@ -162,7 +162,7 @@
     } // else no block to steal
 }
 
-void GrBlockAllocator::reset() {
+void SkBlockAllocator::reset() {
     for (Block* b : this->rblocks()) {
         if (b == &fHead) {
             // Reset metadata and cursor, tail points to the head block again
@@ -187,14 +187,14 @@
     fN1 = 1;
 }
 
-void GrBlockAllocator::resetScratchSpace() {
+void SkBlockAllocator::resetScratchSpace() {
     if (fHead.fPrev) {
         delete fHead.fPrev;
         fHead.fPrev = nullptr;
     }
 }
 
-void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
+void SkBlockAllocator::addBlock(int minimumSize, int maxSize) {
     SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize);
 
     // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
@@ -259,7 +259,7 @@
 }
 
 #ifdef SK_DEBUG
-void GrBlockAllocator::validate() const {
+void SkBlockAllocator::validate() const {
     std::vector<const Block*> blocks;
     const Block* prev = nullptr;
     for (const Block* block : this->blocks()) {
diff --git a/src/gpu/GrBlockAllocator.h b/src/core/SkBlockAllocator.h
similarity index 87%
rename from src/gpu/GrBlockAllocator.h
rename to src/core/SkBlockAllocator.h
index 35b8447..c147558 100644
--- a/src/gpu/GrBlockAllocator.h
+++ b/src/core/SkBlockAllocator.h
@@ -5,8 +5,8 @@
  * found in the LICENSE file.
  */
 
-#ifndef GrBlockAllocator_DEFINED
-#define GrBlockAllocator_DEFINED
+#ifndef SkBlockAllocator_DEFINED
+#define SkBlockAllocator_DEFINED
 
 #include "include/private/GrTypesPriv.h"
 #include "include/private/SkNoncopyable.h"
@@ -16,34 +16,36 @@
 #include <cstddef> // max_align_t
 
 /**
- * GrBlockAllocator provides low-level support for a block allocated arena with a dynamic tail that
+ * SkBlockAllocator provides low-level support for a block allocated arena with a dynamic tail that
  * tracks space reservations within each block. Its APIs provide the ability to reserve space,
  * resize reservations, and release reservations. It will automatically create new blocks if needed
  * and destroy all remaining blocks when it is destructed. It assumes that anything allocated within
- * its blocks has its destructors called externally. It is recommended that GrBlockAllocator is
+ * its blocks has its destructors called externally. It is recommended that SkBlockAllocator is
  * wrapped by a higher-level allocator that uses the low-level APIs to implement a simpler,
  * purpose-focused API w/o having to worry as much about byte-level concerns.
  *
- * GrBlockAllocator has no limit to its total size, but each allocation is limited to 512MB (which
- * should be sufficient for Ganesh's use cases). This upper allocation limit allows all internal
+ * SkBlockAllocator has no limit to its total size, but each allocation is limited to 512MB (which
+ * should be sufficient for Skia's use cases). This upper allocation limit allows all internal
  * operations to be performed using 'int' and avoid many overflow checks. Static asserts are used
  * to ensure that those operations would not overflow when using the largest possible values.
  *
  * Possible use modes:
  * 1. No upfront allocation, either on the stack or as a field
- *    GrBlockAllocator allocator(policy, heapAllocSize);
+ *    SkBlockAllocator allocator(policy, heapAllocSize);
  *
  * 2. In-place new'd
  *    void* mem = operator new(totalSize);
- *    GrBlockAllocator* allocator = new (mem) GrBlockAllocator(policy, heapAllocSize,
- *                                                             totalSize- sizeof(GrBlockAllocator));
+ *    SkBlockAllocator* allocator = new (mem) SkBlockAllocator(policy, heapAllocSize,
+ *                                                             totalSize- sizeof(SkBlockAllocator));
  *    delete allocator;
  *
- * 3. Use GrSBlockAllocator to increase the preallocation size
- *    GrSBlockAllocator<1024> allocator(policy, heapAllocSize);
+ * 3. Use SkSBlockAllocator to increase the preallocation size
+ *    SkSBlockAllocator<1024> allocator(policy, heapAllocSize);
  *    sizeof(allocator) == 1024;
  */
-class GrBlockAllocator final : SkNoncopyable {
+// TODO(michaelludwig) - While API is different, this shares similarities to SkArenaAlloc and
+// SkFibBlockSizes, so we should work to integrate them.
+class SkBlockAllocator final : SkNoncopyable {
 public:
     // Largest size that can be requested from allocate(), chosen because it's the largest pow-2
     // that is less than int32_t::max()/2.
@@ -122,7 +124,7 @@
         inline bool resize(int start, int end, int deltaBytes);
 
     private:
-        friend class GrBlockAllocator;
+        friend class SkBlockAllocator;
 
         Block(Block* prev, int allocationSize);
 
@@ -173,17 +175,17 @@
     // after the allocator can be used by its inline head block. This is useful when the allocator
     // is in-place new'ed into a larger block of memory, but it should remain set to 0 if stack
     // allocated or if the class layout does not guarantee that space is present.
-    GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
+    SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
                      size_t additionalPreallocBytes = 0);
 
-    ~GrBlockAllocator() { this->reset(); }
+    ~SkBlockAllocator() { this->reset(); }
     void operator delete(void* p) { ::operator delete(p); }
 
     /**
      * Helper to calculate the minimum number of bytes needed for heap block size, under the
      * assumption that Align will be the requested alignment of the first call to allocate().
      * Ex. To store N instances of T in a heap block, the 'blockIncrementBytes' should be set to
-     *   BlockOverhead<alignof(T)>() + N * sizeof(T) when making the GrBlockAllocator.
+     *   BlockOverhead<alignof(T)>() + N * sizeof(T) when making the SkBlockAllocator.
      */
     template<size_t Align = 1, size_t Padding = 0>
     static constexpr size_t BlockOverhead();
@@ -191,7 +193,7 @@
     /**
      * Helper to calculate the minimum number of bytes needed for a preallocation, under the
      * assumption that Align will be the requested alignment of the first call to allocate().
-     * Ex. To preallocate a GrSBlockAllocator to hold N instances of T, its arge should be
+     * Ex. To preallocate a SkSBlockAllocator to hold N instances of T, its arge should be
      *   Overhead<alignof(T)>() + N * sizeof(T)
      */
     template<size_t Align = 1, size_t Padding = 0>
@@ -215,13 +217,13 @@
     size_t totalSpaceInUse() const;
 
     /**
-     * Return the total number of bytes that were pre-allocated for the GrBlockAllocator. This will
+     * Return the total number of bytes that were pre-allocated for the SkBlockAllocator. This will
      * include 'additionalPreallocBytes' passed to the constructor, and represents what the total
      * size would become after a call to reset().
      */
     size_t preallocSize() const {
-        // Don't double count fHead's Block overhead in both sizeof(GrBlockAllocator) and fSize.
-        return sizeof(GrBlockAllocator) + fHead.fSize - BaseHeadBlockSize();
+        // Don't double count fHead's Block overhead in both sizeof(SkBlockAllocator) and fSize.
+        return sizeof(SkBlockAllocator) + fHead.fSize - BaseHeadBlockSize();
     }
     /**
      * Return the usable size of the inline head block; this will be equal to
@@ -235,7 +237,7 @@
     /**
      * Get the current value of the allocator-level metadata (a user-oriented slot). This is
      * separate from any block-level metadata, but can serve a similar purpose to compactly support
-     * data collections on top of GrBlockAllocator.
+     * data collections on top of SkBlockAllocator.
      */
     int metadata() const { return fHead.fAllocatorMetadata; }
 
@@ -276,7 +278,7 @@
 
     enum ReserveFlags : unsigned {
         // If provided to reserve(), the input 'size' will be rounded up to the next size determined
-        // by the growth policy of the GrBlockAllocator. If not, 'size' will be aligned to max_align
+        // by the growth policy of the SkBlockAllocator. If not, 'size' will be aligned to max_align
         kIgnoreGrowthPolicy_Flag  = 0b01,
         // If provided to reserve(), the number of available bytes of the current block  will not
         // be used to satisfy the reservation (assuming the contiguous range was long enough to
@@ -327,7 +329,7 @@
 
     template <size_t Align, size_t Padding = 0>
     const Block* owningBlock(const void* ptr, int start) const {
-        return const_cast<GrBlockAllocator*>(this)->owningBlock<Align, Padding>(ptr, start);
+        return const_cast<SkBlockAllocator*>(this)->owningBlock<Align, Padding>(ptr, start);
     }
 
     /**
@@ -336,12 +338,12 @@
      */
     Block* findOwningBlock(const void* ptr);
     const Block* findOwningBlock(const void* ptr) const {
-        return const_cast<GrBlockAllocator*>(this)->findOwningBlock(ptr);
+        return const_cast<SkBlockAllocator*>(this)->findOwningBlock(ptr);
     }
 
     /**
      * Explicitly free an entire block, invalidating any remaining allocations from the block.
-     * GrBlockAllocator will release all alive blocks automatically when it is destroyed, but this
+     * SkBlockAllocator will release all alive blocks automatically when it is destroyed, but this
      * function can be used to reclaim memory over the lifetime of the allocator. The provided
      * 'block' pointer must have previously come from a call to currentBlock() or allocate().
      *
@@ -358,14 +360,14 @@
      * Detach every heap-allocated block owned by 'other' and concatenate them to this allocator's
      * list of blocks. This memory is now managed by this allocator. Since this only transfers
      * ownership of a Block, and a Block itself does not move, any previous allocations remain
-     * valid and associated with their original Block instances. GrBlockAllocator-level functions
+     * valid and associated with their original Block instances. SkBlockAllocator-level functions
      * that accept allocated pointers (e.g. findOwningBlock), must now use this allocator and not
      * 'other' for these allocations.
      *
      * The head block of 'other' cannot be stolen, so higher-level allocators and memory structures
      * must handle that data differently.
      */
-    void stealHeapBlocks(GrBlockAllocator* other);
+    void stealHeapBlocks(SkBlockAllocator* other);
 
     /**
      * Explicitly free all blocks (invalidating all allocations), and resets the head block to its
@@ -381,7 +383,7 @@
     template <bool Forward, bool Const> class BlockIter;
 
     /**
-     * Clients can iterate over all active Blocks in the GrBlockAllocator using for loops:
+     * Clients can iterate over all active Blocks in the SkBlockAllocator using for loops:
      *
      * Forward iteration from head to tail block (or non-const variant):
      *   for (const Block* b : this->blocks()) { }
@@ -402,18 +404,17 @@
     void validate() const;
 #endif
 
-#if GR_TEST_UTILS
-    int testingOnly_scratchBlockSize() const { return this->scratchBlockSize(); }
-#endif
-
 private:
+    friend class BlockAllocatorTestAccess;
+    friend class TBlockListTestAccess;
+
     static constexpr int kDataStart = sizeof(Block);
     #ifdef SK_FORCE_8_BYTE_ALIGNMENT
         // This is an issue for WASM builds using emscripten, which had std::max_align_t = 16, but
         // was returning pointers only aligned to 8 bytes.
         // https://github.com/emscripten-core/emscripten/issues/10072
         //
-        // Setting this to 8 will let GrBlockAllocator properly correct for the pointer address if
+        // Setting this to 8 will let SkBlockAllocator properly correct for the pointer address if
         // a 16-byte aligned allocation is requested in wasm (unlikely since we don't use long
         // doubles).
         static constexpr size_t kAddressAlign = 8;
@@ -429,19 +430,19 @@
     static constexpr size_t MaxBlockSize();
 
     static constexpr int BaseHeadBlockSize() {
-        return sizeof(GrBlockAllocator) - offsetof(GrBlockAllocator, fHead);
+        return sizeof(SkBlockAllocator) - offsetof(SkBlockAllocator, fHead);
     }
 
     // Append a new block to the end of the block linked list, updating fTail. 'minSize' must
     // have enough room for sizeof(Block). 'maxSize' is the upper limit of fSize for the new block
-    // that will preserve the static guarantees GrBlockAllocator makes.
+    // that will preserve the static guarantees SkBlockAllocator makes.
     void addBlock(int minSize, int maxSize);
 
     int scratchBlockSize() const { return fHead.fPrev ? fHead.fPrev->fSize : 0; }
 
     Block* fTail; // All non-head blocks are heap allocated; tail will never be null.
 
-    // All remaining state is packed into 64 bits to keep GrBlockAllocator at 16 bytes + head block
+    // All remaining state is packed into 64 bits to keep SkBlockAllocator at 16 bytes + head block
     // (on a 64-bit system).
 
     // Growth of the block size is controlled by four factors: BlockIncrement, N0 and N1, and a
@@ -450,7 +451,7 @@
     // N0' = N1' (exponential). The size of the new block is N1' * BlockIncrement * MaxAlign,
     // after which fN0 and fN1 store N0' and N1' clamped into 23 bits. With current bit allocations,
     // N1' is limited to 2^24, and assuming MaxAlign=16, then BlockIncrement must be '2' in order to
-    // eventually reach the hard 2^29 size limit of GrBlockAllocator.
+    // eventually reach the hard 2^29 size limit of SkBlockAllocator.
 
     // Next heap block size = (fBlockIncrement * alignof(std::max_align_t) * (fN0 + fN1))
     uint64_t fBlockIncrement : 16;
@@ -468,64 +469,64 @@
     static_assert(kGrowthPolicyCount <= 4);
 };
 
-// A wrapper around GrBlockAllocator that includes preallocated storage for the head block.
+// A wrapper around SkBlockAllocator that includes preallocated storage for the head block.
 // N will be the preallocSize() reported by the allocator.
 template<size_t N>
-class GrSBlockAllocator : SkNoncopyable {
+class SkSBlockAllocator : SkNoncopyable {
 public:
-    using GrowthPolicy = GrBlockAllocator::GrowthPolicy;
+    using GrowthPolicy = SkBlockAllocator::GrowthPolicy;
 
-    GrSBlockAllocator() {
-        new (fStorage) GrBlockAllocator(GrowthPolicy::kFixed, N, N - sizeof(GrBlockAllocator));
+    SkSBlockAllocator() {
+        new (fStorage) SkBlockAllocator(GrowthPolicy::kFixed, N, N - sizeof(SkBlockAllocator));
     }
-    explicit GrSBlockAllocator(GrowthPolicy policy) {
-        new (fStorage) GrBlockAllocator(policy, N, N - sizeof(GrBlockAllocator));
+    explicit SkSBlockAllocator(GrowthPolicy policy) {
+        new (fStorage) SkBlockAllocator(policy, N, N - sizeof(SkBlockAllocator));
     }
 
-    GrSBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes) {
-        new (fStorage) GrBlockAllocator(policy, blockIncrementBytes, N - sizeof(GrBlockAllocator));
+    SkSBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes) {
+        new (fStorage) SkBlockAllocator(policy, blockIncrementBytes, N - sizeof(SkBlockAllocator));
     }
 
-    ~GrSBlockAllocator() {
-        this->allocator()->~GrBlockAllocator();
+    ~SkSBlockAllocator() {
+        this->allocator()->~SkBlockAllocator();
     }
 
-    GrBlockAllocator* operator->() { return this->allocator(); }
-    const GrBlockAllocator* operator->() const { return this->allocator(); }
+    SkBlockAllocator* operator->() { return this->allocator(); }
+    const SkBlockAllocator* operator->() const { return this->allocator(); }
 
-    GrBlockAllocator* allocator() { return reinterpret_cast<GrBlockAllocator*>(fStorage); }
-    const GrBlockAllocator* allocator() const {
-        return reinterpret_cast<const GrBlockAllocator*>(fStorage);
+    SkBlockAllocator* allocator() { return reinterpret_cast<SkBlockAllocator*>(fStorage); }
+    const SkBlockAllocator* allocator() const {
+        return reinterpret_cast<const SkBlockAllocator*>(fStorage);
     }
 
 private:
-    static_assert(N >= sizeof(GrBlockAllocator));
+    static_assert(N >= sizeof(SkBlockAllocator));
 
     // Will be used to placement new the allocator
-    alignas(GrBlockAllocator) char fStorage[N];
+    alignas(SkBlockAllocator) char fStorage[N];
 };
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////
 // Template and inline implementations
 
-GR_MAKE_BITFIELD_OPS(GrBlockAllocator::ReserveFlags)
+GR_MAKE_BITFIELD_OPS(SkBlockAllocator::ReserveFlags)
 
 template<size_t Align, size_t Padding>
-constexpr size_t GrBlockAllocator::BlockOverhead() {
+constexpr size_t SkBlockAllocator::BlockOverhead() {
     static_assert(GrAlignTo(kDataStart + Padding, Align) >= sizeof(Block));
     return GrAlignTo(kDataStart + Padding, Align);
 }
 
 template<size_t Align, size_t Padding>
-constexpr size_t GrBlockAllocator::Overhead() {
-    // NOTE: On most platforms, GrBlockAllocator is packed; this is not the case on debug builds
+constexpr size_t SkBlockAllocator::Overhead() {
+    // NOTE: On most platforms, SkBlockAllocator is packed; this is not the case on debug builds
     // due to extra fields, or on WASM due to 4byte pointers but 16byte max align.
-    return std::max(sizeof(GrBlockAllocator),
-                    offsetof(GrBlockAllocator, fHead) + BlockOverhead<Align, Padding>());
+    return std::max(sizeof(SkBlockAllocator),
+                    offsetof(SkBlockAllocator, fHead) + BlockOverhead<Align, Padding>());
 }
 
 template<size_t Align, size_t Padding>
-constexpr size_t GrBlockAllocator::MaxBlockSize() {
+constexpr size_t SkBlockAllocator::MaxBlockSize() {
     // Without loss of generality, assumes 'align' will be the largest encountered alignment for the
     // allocator (if it's not, the largest align will be encountered by the compiler and pass/fail
     // the same set of static asserts).
@@ -533,7 +534,7 @@
 }
 
 template<size_t Align, size_t Padding>
-void GrBlockAllocator::reserve(size_t size, ReserveFlags flags) {
+void SkBlockAllocator::reserve(size_t size, ReserveFlags flags) {
     if (size > kMaxAllocationSize) {
         SK_ABORT("Allocation too large (%zu bytes requested)", size);
     }
@@ -556,7 +557,7 @@
 }
 
 template <size_t Align, size_t Padding>
-GrBlockAllocator::ByteRange GrBlockAllocator::allocate(size_t size) {
+SkBlockAllocator::ByteRange SkBlockAllocator::allocate(size_t size) {
     // Amount of extra space for a new block to make sure the allocation can succeed.
     static constexpr int kBlockOverhead = (int) BlockOverhead<Align, Padding>();
 
@@ -596,7 +597,7 @@
 }
 
 template <size_t Align, size_t Padding>
-GrBlockAllocator::Block* GrBlockAllocator::owningBlock(const void* p, int start) {
+SkBlockAllocator::Block* SkBlockAllocator::owningBlock(const void* p, int start) {
     // 'p' was originally formed by aligning 'block + start + Padding', producing the inequality:
     //     block + start + Padding <= p <= block + start + Padding + Align-1
     // Rearranging this yields:
@@ -617,7 +618,7 @@
 }
 
 template <size_t Align, size_t Padding>
-int GrBlockAllocator::Block::alignedOffset(int offset) const {
+int SkBlockAllocator::Block::alignedOffset(int offset) const {
     static_assert(SkIsPow2(Align));
     // Aligning adds (Padding + Align - 1) as an intermediate step, so ensure that can't overflow
     static_assert(MaxBlockSize<Align, Padding>() + Padding + Align - 1
@@ -636,7 +637,7 @@
     }
 }
 
-bool GrBlockAllocator::Block::resize(int start, int end, int deltaBytes) {
+bool SkBlockAllocator::Block::resize(int start, int end, int deltaBytes) {
     SkASSERT(fSentinel == kAssignedMarker);
     SkASSERT(start >= kDataStart && end <= fSize && start < end);
 
@@ -667,7 +668,7 @@
 // NOTE: release is equivalent to resize(start, end, start - end), and the compiler can optimize
 // most of the operations away, but it wasn't able to remove the unnecessary branch comparing the
 // new cursor to the block size or old start, so release() gets a specialization.
-bool GrBlockAllocator::Block::release(int start, int end) {
+bool SkBlockAllocator::Block::release(int start, int end) {
     SkASSERT(fSentinel == kAssignedMarker);
     SkASSERT(start >= kDataStart && end <= fSize && start < end);
 
@@ -683,11 +684,11 @@
 
 ///////// Block iteration
 template <bool Forward, bool Const>
-class GrBlockAllocator::BlockIter {
+class SkBlockAllocator::BlockIter {
 private:
     using BlockT = typename std::conditional<Const, const Block, Block>::type;
     using AllocatorT =
-            typename std::conditional<Const, const GrBlockAllocator, GrBlockAllocator>::type;
+            typename std::conditional<Const, const SkBlockAllocator, SkBlockAllocator>::type;
 
 public:
     BlockIter(AllocatorT* allocator) : fAllocator(allocator) {}
@@ -731,17 +732,17 @@
     AllocatorT* fAllocator;
 };
 
-GrBlockAllocator::BlockIter<true, false> GrBlockAllocator::blocks() {
+SkBlockAllocator::BlockIter<true, false> SkBlockAllocator::blocks() {
     return BlockIter<true, false>(this);
 }
-GrBlockAllocator::BlockIter<true, true> GrBlockAllocator::blocks() const {
+SkBlockAllocator::BlockIter<true, true> SkBlockAllocator::blocks() const {
     return BlockIter<true, true>(this);
 }
-GrBlockAllocator::BlockIter<false, false> GrBlockAllocator::rblocks() {
+SkBlockAllocator::BlockIter<false, false> SkBlockAllocator::rblocks() {
     return BlockIter<false, false>(this);
 }
-GrBlockAllocator::BlockIter<false, true> GrBlockAllocator::rblocks() const {
+SkBlockAllocator::BlockIter<false, true> SkBlockAllocator::rblocks() const {
     return BlockIter<false, true>(this);
 }
 
-#endif // GrBlockAllocator_DEFINED
+#endif // SkBlockAllocator_DEFINED
diff --git a/src/gpu/GrTBlockList.h b/src/core/SkTBlockList.h
similarity index 85%
rename from src/gpu/GrTBlockList.h
rename to src/core/SkTBlockList.h
index ec8c355..0959d25 100644
--- a/src/gpu/GrTBlockList.h
+++ b/src/core/SkTBlockList.h
@@ -5,27 +5,27 @@
  * found in the LICENSE file.
  */
 
-#ifndef GrTBlockList_DEFINED
-#define GrTBlockList_DEFINED
+#ifndef SkTBlockList_DEFINED
+#define SkTBlockList_DEFINED
 
-#include "src/gpu/GrBlockAllocator.h"
+#include "src/core/SkBlockAllocator.h"
 
 #include <type_traits>
 
-// Forward declarations for the iterators used by GrTBlockList
-using IndexFn = int (*)(const GrBlockAllocator::Block*);
-using NextFn = int (*)(const GrBlockAllocator::Block*, int);
+// Forward declarations for the iterators used by SkTBlockList
+using IndexFn = int (*)(const SkBlockAllocator::Block*);
+using NextFn = int (*)(const SkBlockAllocator::Block*, int);
 template<typename T, typename B> using ItemFn = T (*)(B*, int);
 template <typename T, bool Forward, bool Const, IndexFn Start, IndexFn End, NextFn Next,
-          ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block,
-                                                     GrBlockAllocator::Block>::type> Resolve>
+          ItemFn<T, typename std::conditional<Const, const SkBlockAllocator::Block,
+                                                     SkBlockAllocator::Block>::type> Resolve>
 class BlockIndexIterator;
 
 /**
- * GrTBlockList manages dynamic storage for instances of T, reserving fixed blocks such that
+ * SkTBlockList manages dynamic storage for instances of T, reserving fixed blocks such that
  * allocation is amortized across every N instances. In this way it is a hybrid of an array-based
  * vector and a linked-list. T can be any type and non-trivial destructors are automatically
- * invoked when the GrTBlockList is destructed. The addresses of instances are guaranteed
+ * invoked when the SkTBlockList is destructed. The addresses of instances are guaranteed
  * not to move except when a list is concatenated to another.
  *
  * The collection supports storing a templated number of elements inline before heap-allocated
@@ -47,25 +47,25 @@
  * acting as a stack, or simply using it as a typed allocator.
  */
 template <typename T, int StartingItems = 1>
-class GrTBlockList {
+class SkTBlockList {
 public:
     /**
      * Create an allocator that defaults to using StartingItems as heap increment.
      */
-    GrTBlockList() : GrTBlockList(StartingItems) {}
+    SkTBlockList() : SkTBlockList(StartingItems) {}
 
     /**
      * Create an allocator
      *
      * @param   itemsPerBlock   the number of items to allocate at once
      */
-    explicit GrTBlockList(int itemsPerBlock,
-                          GrBlockAllocator::GrowthPolicy policy =
-                                  GrBlockAllocator::GrowthPolicy::kFixed)
+    explicit SkTBlockList(int itemsPerBlock,
+                          SkBlockAllocator::GrowthPolicy policy =
+                                  SkBlockAllocator::GrowthPolicy::kFixed)
             : fAllocator(policy,
-                         GrBlockAllocator::BlockOverhead<alignof(T)>() + sizeof(T)*itemsPerBlock) {}
+                         SkBlockAllocator::BlockOverhead<alignof(T)>() + sizeof(T)*itemsPerBlock) {}
 
-    ~GrTBlockList() { this->reset(); }
+    ~SkTBlockList() { this->reset(); }
 
     /**
      * Adds an item and returns it.
@@ -94,7 +94,7 @@
      * this is O(StartingItems) and not O(N). All other items are concatenated in O(1).
      */
     template <int SI>
-    void concat(GrTBlockList<T, SI>&& other);
+    void concat(SkTBlockList<T, SI>&& other);
 
     /**
      * Allocate, if needed, space to hold N more Ts before another malloc will occur.
@@ -105,7 +105,7 @@
             int reserved = n - avail;
             // Don't consider existing bytes since we've already determined how to split the N items
             fAllocator->template reserve<alignof(T)>(
-                    reserved * sizeof(T), GrBlockAllocator::kIgnoreExistingBytes_Flag);
+                    reserved * sizeof(T), SkBlockAllocator::kIgnoreExistingBytes_Flag);
         }
     }
 
@@ -115,7 +115,7 @@
     void pop_back() {
         SkASSERT(this->count() > 0);
 
-        GrBlockAllocator::Block* block = fAllocator->currentBlock();
+        SkBlockAllocator::Block* block = fAllocator->currentBlock();
 
         // Run dtor for the popped item
         int releaseIndex = Last(block);
@@ -220,33 +220,34 @@
         SkUNREACHABLE;
     }
     const T& item(int i) const {
-        return const_cast<GrTBlockList*>(this)->item(i);
+        return const_cast<SkTBlockList*>(this)->item(i);
     }
 
 private:
-    // Let other GrTBlockLists have access (only ever used when T and S are the same but you
+    // Let other SkTBlockLists have access (only ever used when T and S are the same but you
     // cannot have partial specializations declared as a friend...)
-    template<typename S, int N> friend class GrTBlockList;
+    template<typename S, int N> friend class SkTBlockList;
+    friend class TBlockListTestAccess;  // for fAllocator
 
     static constexpr size_t StartingSize =
-            GrBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T);
+            SkBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T);
 
-    static T& GetItem(GrBlockAllocator::Block* block, int index) {
+    static T& GetItem(SkBlockAllocator::Block* block, int index) {
         return *static_cast<T*>(block->ptr(index));
     }
-    static const T& GetItem(const GrBlockAllocator::Block* block, int index) {
+    static const T& GetItem(const SkBlockAllocator::Block* block, int index) {
         return *static_cast<const T*>(block->ptr(index));
     }
-    static int First(const GrBlockAllocator::Block* b) {
+    static int First(const SkBlockAllocator::Block* b) {
         return b->firstAlignedOffset<alignof(T)>();
     }
-    static int Last(const GrBlockAllocator::Block* b) {
+    static int Last(const SkBlockAllocator::Block* b) {
         return b->metadata();
     }
-    static int Increment(const GrBlockAllocator::Block* b, int index) {
+    static int Increment(const SkBlockAllocator::Block* b, int index) {
         return index + sizeof(T);
     }
-    static int Decrement(const GrBlockAllocator::Block* b, int index) {
+    static int Decrement(const SkBlockAllocator::Block* b, int index) {
         return index - sizeof(T);
     }
 
@@ -260,12 +261,12 @@
         return br.fBlock->ptr(br.fAlignedOffset);
     }
 
-    // N represents the number of items, whereas GrSBlockAllocator takes total bytes, so must
+    // N represents the number of items, whereas SkSBlockAllocator takes total bytes, so must
     // account for the block allocator's size too.
     //
-    // This class uses the GrBlockAllocator's metadata to track total count of items, and per-block
+    // This class uses the SkBlockAllocator's metadata to track total count of items, and per-block
     // metadata to track the index of the last allocated item within each block.
-    GrSBlockAllocator<StartingSize> fAllocator;
+    SkSBlockAllocator<StartingSize> fAllocator;
 
 public:
     using Iter   = BlockIndexIterator<T&,       true,  false, &First, &Last,  &Increment, &GetItem>;
@@ -284,16 +285,11 @@
     // Iterate from newest to oldest using a for-range loop.
     RIter  ritems() { return RIter(fAllocator.allocator()); }
     CRIter ritems() const { return CRIter(fAllocator.allocator()); }
-
-#if GR_TEST_UTILS
-    // For introspection
-    const GrBlockAllocator* allocator() const { return fAllocator.allocator(); }
-#endif
 };
 
 template <typename T, int SI1>
 template <int SI2>
-void GrTBlockList<T, SI1>::concat(GrTBlockList<T, SI2>&& other) {
+void SkTBlockList<T, SI1>::concat(SkTBlockList<T, SI2>&& other) {
     // Optimize the common case where the list to append only has a single item
     if (other.empty()) {
         return;
@@ -306,7 +302,7 @@
     // Manually move all items in other's head block into this list; all heap blocks from 'other'
     // will be appended to the block linked list (no per-item moves needed then).
     int headItemCount = 0;
-    GrBlockAllocator::Block* headBlock = other.fAllocator->headBlock();
+    SkBlockAllocator::Block* headBlock = other.fAllocator->headBlock();
     SkDEBUGCODE(int oldCount = this->count();)
     if (headBlock->metadata() > 0) {
         int headStart = First(headBlock);
@@ -318,14 +314,14 @@
             // kIgnoreGrowthPolicy_Flag to make this reservation as tight as possible since
             // 'other's heap blocks will be appended after it and any extra space is wasted.
             fAllocator->template reserve<alignof(T)>((headItemCount - avail) * sizeof(T),
-                                                     GrBlockAllocator::kIgnoreExistingBytes_Flag |
-                                                     GrBlockAllocator::kIgnoreGrowthPolicy_Flag);
+                                                     SkBlockAllocator::kIgnoreExistingBytes_Flag |
+                                                     SkBlockAllocator::kIgnoreGrowthPolicy_Flag);
         }
 
         if constexpr (std::is_trivially_copy_constructible<T>::value) {
             // memcpy all items at once (or twice between current and reserved space).
             SkASSERT(std::is_trivially_destructible<T>::value);
-            auto copy = [](GrBlockAllocator::Block* src, int start, GrBlockAllocator* dst, int n) {
+            auto copy = [](SkBlockAllocator::Block* src, int start, SkBlockAllocator* dst, int n) {
                 auto target = dst->template allocate<alignof(T)>(n * sizeof(T));
                 memcpy(target.fBlock->ptr(target.fAlignedOffset), src->ptr(start), n * sizeof(T));
                 target.fBlock->setMetadata(target.fAlignedOffset + (n - 1) * sizeof(T));
@@ -366,7 +362,7 @@
 
 /**
  * BlockIndexIterator provides a reusable iterator template for collections built on top of a
- * GrBlockAllocator, where each item is of the same type, and the index to an item can be iterated
+ * SkBlockAllocator, where each item is of the same type, and the index to an item can be iterated
  * over in a known manner. It supports const and non-const, and forward and reverse, assuming it's
  * provided with proper functions for starting, ending, and advancing.
  */
@@ -376,10 +372,10 @@
           IndexFn Start, // Returns the index of the first valid item in a block
           IndexFn End,   // Returns the index of the last valid item (so it is inclusive)
           NextFn Next,   // Returns the next index given the current index
-          ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block,
-                                                     GrBlockAllocator::Block>::type> Resolve>
+          ItemFn<T, typename std::conditional<Const, const SkBlockAllocator::Block,
+                                                     SkBlockAllocator::Block>::type> Resolve>
 class BlockIndexIterator {
-    using BlockIter = typename GrBlockAllocator::BlockIter<Forward, Const>;
+    using BlockIter = typename SkBlockAllocator::BlockIter<Forward, Const>;
 public:
     BlockIndexIterator(BlockIter iter) : fBlockIter(iter) {}
 
diff --git a/src/gpu/GrMemoryPool.cpp b/src/gpu/GrMemoryPool.cpp
index 21be212..8cc227c 100644
--- a/src/gpu/GrMemoryPool.cpp
+++ b/src/gpu/GrMemoryPool.cpp
@@ -22,16 +22,16 @@
     static_assert(sizeof(GrMemoryPool) < GrMemoryPool::kMinAllocationSize);
 
     preallocSize = SkTPin(preallocSize, kMinAllocationSize,
-                          (size_t) GrBlockAllocator::kMaxAllocationSize);
+                          (size_t) SkBlockAllocator::kMaxAllocationSize);
     minAllocSize = SkTPin(minAllocSize, kMinAllocationSize,
-                          (size_t) GrBlockAllocator::kMaxAllocationSize);
+                          (size_t) SkBlockAllocator::kMaxAllocationSize);
     void* mem = operator new(preallocSize);
     return std::unique_ptr<GrMemoryPool>(new (mem) GrMemoryPool(preallocSize, minAllocSize));
 }
 
 GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize)
-        : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, minAllocSize,
-                     preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(GrBlockAllocator)) {
+        : fAllocator(SkBlockAllocator::GrowthPolicy::kFixed, minAllocSize,
+                     preallocSize - offsetof(GrMemoryPool, fAllocator) - sizeof(SkBlockAllocator)) {
     SkDEBUGCODE(fAllocationCount = 0;)
 }
 
@@ -62,7 +62,7 @@
     static_assert(alignof(Header) <= kAlignment);
     SkDEBUGCODE(this->validate();)
 
-    GrBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size);
+    SkBlockAllocator::ByteRange alloc = fAllocator.allocate<kAlignment, sizeof(Header)>(size);
 
     // Initialize GrMemoryPool's custom header at the start of the allocation
     Header* header = static_cast<Header*>(alloc.fBlock->ptr(alloc.fAlignedOffset - sizeof(Header)));
@@ -75,7 +75,7 @@
 #if defined(SK_SANITIZE_ADDRESS)
     sk_asan_poison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
 #elif defined(SK_DEBUG)
-    header->fSentinel = GrBlockAllocator::kAssignedMarker;
+    header->fSentinel = SkBlockAllocator::kAssignedMarker;
 #endif
 
 #if defined(SK_DEBUG)
@@ -99,8 +99,8 @@
 #if defined(SK_SANITIZE_ADDRESS)
     sk_asan_unpoison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
 #elif defined(SK_DEBUG)
-    SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel);
-    header->fSentinel = GrBlockAllocator::kFreedMarker;
+    SkASSERT(SkBlockAllocator::kAssignedMarker == header->fSentinel);
+    header->fSentinel = SkBlockAllocator::kFreedMarker;
 #endif
 
 #if defined(SK_DEBUG)
@@ -108,10 +108,10 @@
     fAllocationCount--;
 #endif
 
-    GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
+    SkBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
 
 #if defined(SK_DEBUG)
-    // (p - block) matches the original alignedOffset value from GrBlockAllocator::allocate().
+    // (p - block) matches the original alignedOffset value from SkBlockAllocator::allocate().
     intptr_t alignedOffset = (intptr_t)p - (intptr_t)block;
     SkASSERT(p == block->ptr(alignedOffset));
 
diff --git a/src/gpu/GrMemoryPool.h b/src/gpu/GrMemoryPool.h
index 9ce408f..962e591 100644
--- a/src/gpu/GrMemoryPool.h
+++ b/src/gpu/GrMemoryPool.h
@@ -8,7 +8,7 @@
 #ifndef GrMemoryPool_DEFINED
 #define GrMemoryPool_DEFINED
 
-#include "src/gpu/GrBlockAllocator.h"
+#include "src/core/SkBlockAllocator.h"
 
 #ifdef SK_DEBUG
 #include "include/private/SkTHash.h"
@@ -42,7 +42,7 @@
      * time and keep around until pool destruction. The min alloc size is
      * the smallest allowed size of additional allocations. Both sizes are
      * adjusted to ensure that they are at least as large as kMinAllocationSize
-     * and less than GrBlockAllocator::kMaxAllocationSize.
+     * and less than SkBlockAllocator::kMaxAllocationSize.
      *
      * Both sizes are what the pool will end up allocating from the system, and
      * portions of the allocated memory is used for internal bookkeeping.
@@ -123,6 +123,6 @@
     int              fAllocationCount;
 #endif
 
-    GrBlockAllocator fAllocator; // Must be the last field, in order to use extra allocated space
+    SkBlockAllocator fAllocator; // Must be the last field, in order to use extra allocated space
 };
 #endif
diff --git a/src/gpu/GrSPIRVUniformHandler.h b/src/gpu/GrSPIRVUniformHandler.h
index 7ba8468..5cb1aaa 100644
--- a/src/gpu/GrSPIRVUniformHandler.h
+++ b/src/gpu/GrSPIRVUniformHandler.h
@@ -8,7 +8,7 @@
 #ifndef GrSPIRVUniformHandler_DEFINED
 #define GrSPIRVUniformHandler_DEFINED
 
-#include "src/gpu/GrTBlockList.h"
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
 
 /*
@@ -27,7 +27,7 @@
     struct SPIRVUniformInfo : public UniformInfo {
         int fUBOOffset;
     };
-    typedef GrTBlockList<SPIRVUniformInfo> UniformInfoArray;
+    typedef SkTBlockList<SPIRVUniformInfo> UniformInfoArray;
     enum {
         kUniformBinding = 0,
         kUniformDescriptorSet = 0,
diff --git a/src/gpu/gl/GrGLProgramDataManager.h b/src/gpu/gl/GrGLProgramDataManager.h
index 1d96968..c01006c 100644
--- a/src/gpu/gl/GrGLProgramDataManager.h
+++ b/src/gpu/gl/GrGLProgramDataManager.h
@@ -9,8 +9,8 @@
 #define GrGLProgramDataManager_DEFINED
 
 #include "include/gpu/gl/GrGLTypes.h"
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/GrShaderVar.h"
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/glsl/GrGLSLProgramDataManager.h"
 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
 
@@ -35,11 +35,11 @@
         GrGLint     fLocation;
     };
 
-    // This uses a GrTBlockList rather than SkTArray/std::vector so that the GrShaderVars
+    // This uses a SkTBlockList rather than SkTArray/std::vector so that the GrShaderVars
     // don't move in memory after they are inserted. Users of GrGLShaderBuilder get refs to the vars
     // and ptrs to their name strings. Otherwise, we'd have to hand out copies.
-    typedef GrTBlockList<GLUniformInfo> UniformInfoArray;
-    typedef GrTBlockList<VaryingInfo>   VaryingInfoArray;
+    typedef SkTBlockList<GLUniformInfo> UniformInfoArray;
+    typedef SkTBlockList<VaryingInfo>   VaryingInfoArray;
 
     GrGLProgramDataManager(GrGLGpu*, const UniformInfoArray&);
 
diff --git a/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h b/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h
index bfa053a..aff0bc5 100644
--- a/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h
+++ b/src/gpu/glsl/GrGLSLFragmentShaderBuilder.h
@@ -46,7 +46,7 @@
 
 private:
     // WARNING: LIke GrRenderTargetProxy, changes to this can cause issues in ASAN. This is caused
-    // by GrGLSLProgramBuilder's GrTBlockLists requiring 16 byte alignment, but since
+    // by GrGLSLProgramBuilder's SkTBlockLists requiring 16 byte alignment, but since
     // GrGLSLFragmentShaderBuilder has a virtual diamond hierarchy, ASAN requires all this pointers
     // to start aligned, even though clang is already correctly offsetting the individual fields
     // that require the larger alignment. In the current world, this extra padding is sufficient to
diff --git a/src/gpu/glsl/GrGLSLShaderBuilder.h b/src/gpu/glsl/GrGLSLShaderBuilder.h
index 71be3ba..b1f6fbc 100644
--- a/src/gpu/glsl/GrGLSLShaderBuilder.h
+++ b/src/gpu/glsl/GrGLSLShaderBuilder.h
@@ -12,8 +12,8 @@
 #include "include/private/SkSLStatement.h"
 #include "include/private/SkSLString.h"
 #include "include/private/SkTDArray.h"
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/GrShaderVar.h"
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
 
 #include <stdarg.h>
@@ -179,7 +179,7 @@
     };
 
 protected:
-    typedef GrTBlockList<GrShaderVar> VarArray;
+    typedef SkTBlockList<GrShaderVar> VarArray;
     void appendDecls(const VarArray& vars, SkString* out) const;
 
     void appendFunctionDecl(GrSLType returnType,
diff --git a/src/gpu/glsl/GrGLSLVarying.h b/src/gpu/glsl/GrGLSLVarying.h
index 148a651..80df23c 100644
--- a/src/gpu/glsl/GrGLSLVarying.h
+++ b/src/gpu/glsl/GrGLSLVarying.h
@@ -9,8 +9,8 @@
 #define GrGLSLVarying_DEFINED
 
 #include "include/private/GrTypesPriv.h"
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/GrShaderVar.h"
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/glsl/GrGLSLProgramDataManager.h"
 
 class GrGeometryProcessor;
@@ -159,8 +159,8 @@
         GrShaderFlags    fVisibility;
     };
 
-    typedef GrTBlockList<VaryingInfo> VaryingList;
-    typedef GrTBlockList<GrShaderVar> VarArray;
+    typedef SkTBlockList<VaryingInfo> VaryingList;
+    typedef SkTBlockList<GrShaderVar> VarArray;
 
     VaryingList    fVaryings;
     VarArray       fVertexInputs;
diff --git a/src/gpu/mtl/GrMtlUniformHandler.h b/src/gpu/mtl/GrMtlUniformHandler.h
index 89bf4e7..88e2b08 100644
--- a/src/gpu/mtl/GrMtlUniformHandler.h
+++ b/src/gpu/mtl/GrMtlUniformHandler.h
@@ -8,8 +8,8 @@
 #ifndef GrMtlUniformHandler_DEFINED
 #define GrMtlUniformHandler_DEFINED
 
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/GrShaderVar.h"
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
 
 #include <vector>
@@ -32,7 +32,7 @@
     struct MtlUniformInfo : public UniformInfo {
         uint32_t fUBOffset;
     };
-    typedef GrTBlockList<MtlUniformInfo> UniformInfoArray;
+    typedef SkTBlockList<MtlUniformInfo> UniformInfoArray;
 
     const GrShaderVar& getUniformVariable(UniformHandle u) const override {
         return fUniforms.item(u.toIndex()).fVariable;
diff --git a/src/gpu/ops/GrAtlasTextOp.h b/src/gpu/ops/GrAtlasTextOp.h
index cb81221..4c4a7bb 100644
--- a/src/gpu/ops/GrAtlasTextOp.h
+++ b/src/gpu/ops/GrAtlasTextOp.h
@@ -8,7 +8,6 @@
 #ifndef GrAtlasTextOp_DEFINED
 #define GrAtlasTextOp_DEFINED
 
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/effects/GrDistanceFieldGeoProc.h"
 #include "src/gpu/ops/GrMeshDrawOp.h"
 #include "src/gpu/text/GrTextBlob.h"
diff --git a/src/gpu/tessellate/GrAtlasRenderTask.h b/src/gpu/tessellate/GrAtlasRenderTask.h
index 1074a9f..cfd79ea 100644
--- a/src/gpu/tessellate/GrAtlasRenderTask.h
+++ b/src/gpu/tessellate/GrAtlasRenderTask.h
@@ -9,9 +9,9 @@
 #define GrAtlasRenderTask_DEFINED
 
 #include "include/core/SkPath.h"
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/GrDynamicAtlas.h"
 #include "src/gpu/GrOpsTask.h"
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/tessellate/GrPathTessellator.h"
 
 struct SkIPoint16;
@@ -59,8 +59,8 @@
     const std::unique_ptr<GrDynamicAtlas> fDynamicAtlas;
 
     // Allocate enough inline entries for 16 atlas path draws, then spill to the heap.
-    using PathDrawAllocator = GrTBlockList<GrPathTessellator::PathDrawList, 16>;
-    PathDrawAllocator fPathDrawAllocator{64, GrBlockAllocator::GrowthPolicy::kFibonacci};
+    using PathDrawAllocator = SkTBlockList<GrPathTessellator::PathDrawList, 16>;
+    PathDrawAllocator fPathDrawAllocator{64, SkBlockAllocator::GrowthPolicy::kFibonacci};
 
     class AtlasPathList : SkNoncopyable {
     public:
diff --git a/src/gpu/v1/ClipStack.h b/src/gpu/v1/ClipStack.h
index 26aa857..94887f7 100644
--- a/src/gpu/v1/ClipStack.h
+++ b/src/gpu/v1/ClipStack.h
@@ -12,9 +12,9 @@
 #include "include/core/SkMatrix.h"
 #include "include/core/SkShader.h"
 #include "include/private/GrResourceKey.h"
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/GrClip.h"
 #include "src/gpu/GrSurfaceProxyView.h"
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/geometry/GrShape.h"
 
 class GrAppliedClip;
@@ -100,7 +100,7 @@
     // Wraps the geometric Element data with logic for containment and bounds testing.
     class RawElement : private Element {
     public:
-        using Stack = GrTBlockList<RawElement, 1>;
+        using Stack = SkTBlockList<RawElement, 1>;
 
         RawElement(const SkMatrix& localToDevice, const GrShape& shape, GrAA aa, SkClipOp op);
 
@@ -167,7 +167,7 @@
     // owned by the ClipStack. Once SW masks are no longer needed, this can go away.
     class Mask {
     public:
-        using Stack = GrTBlockList<Mask, 1>;
+        using Stack = SkTBlockList<Mask, 1>;
 
         Mask(const SaveRecord& current, const SkIRect& bounds);
 
@@ -201,7 +201,7 @@
     // given a draw query.
     class SaveRecord {
     public:
-        using Stack = GrTBlockList<SaveRecord, 2>;
+        using Stack = SkTBlockList<SaveRecord, 2>;
 
         explicit SaveRecord(const SkIRect& deviceBounds);
 
diff --git a/src/gpu/vk/GrVkUniformHandler.h b/src/gpu/vk/GrVkUniformHandler.h
index 738db57..3328780 100644
--- a/src/gpu/vk/GrVkUniformHandler.h
+++ b/src/gpu/vk/GrVkUniformHandler.h
@@ -9,9 +9,9 @@
 #define GrVkUniformHandler_DEFINED
 
 #include "include/gpu/vk/GrVkTypes.h"
+#include "src/core/SkTBlockList.h"
 #include "src/gpu/GrSamplerState.h"
 #include "src/gpu/GrShaderVar.h"
-#include "src/gpu/GrTBlockList.h"
 #include "src/gpu/glsl/GrGLSLProgramBuilder.h"
 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
 #include "src/gpu/vk/GrVkSampler.h"
@@ -60,7 +60,7 @@
         // fImmutableSampler is used for sampling an image with a ycbcr conversion.
         const GrVkSampler*      fImmutableSampler = nullptr;
     };
-    typedef GrTBlockList<VkUniformInfo> UniformInfoArray;
+    typedef SkTBlockList<VkUniformInfo> UniformInfoArray;
 
     ~GrVkUniformHandler() override;
 
diff --git a/tests/GrBlockAllocatorTest.cpp b/tests/SkBlockAllocatorTest.cpp
similarity index 84%
rename from tests/GrBlockAllocatorTest.cpp
rename to tests/SkBlockAllocatorTest.cpp
index 0ebe164..4b87606 100644
--- a/tests/GrBlockAllocatorTest.cpp
+++ b/tests/SkBlockAllocatorTest.cpp
@@ -5,17 +5,25 @@
  * found in the LICENSE file.
  */
 
-#include "src/gpu/GrBlockAllocator.h"
+#include "src/core/SkBlockAllocator.h"
 #include "tests/Test.h"
 
 #include <cstring>
 
-using Block = GrBlockAllocator::Block;
-using GrowthPolicy = GrBlockAllocator::GrowthPolicy;
+using Block = SkBlockAllocator::Block;
+using GrowthPolicy = SkBlockAllocator::GrowthPolicy;
+
+class BlockAllocatorTestAccess {
+public:
+    template<size_t N>
+    static size_t ScratchBlockSize(SkSBlockAllocator<N>& pool) {
+        return (size_t) pool->scratchBlockSize();
+    }
+};
 
 // Helper functions for modifying the allocator in a controlled manner
 template<size_t N>
-static int block_count(const GrSBlockAllocator<N>& pool) {
+static int block_count(const SkSBlockAllocator<N>& pool) {
     int ct = 0;
     for (const Block* b : pool->blocks()) {
         (void) b;
@@ -25,7 +33,7 @@
 }
 
 template<size_t N>
-static Block* get_block(GrSBlockAllocator<N>& pool, int blockIndex) {
+static Block* get_block(SkSBlockAllocator<N>& pool, int blockIndex) {
     Block* found = nullptr;
     int i = 0;
     for (Block* b: pool->blocks()) {
@@ -40,18 +48,18 @@
     return found;
 }
 
-// GrBlockAllocator holds on to the largest last-released block to reuse for new allocations,
+// SkBlockAllocator holds on to the largest last-released block to reuse for new allocations,
 // and this is still counted in its totalSize(). However, it's easier to reason about size - scratch
 // in many of these tests.
 template<size_t N>
-static size_t total_size(GrSBlockAllocator<N>& pool) {
-    return pool->totalSize() - pool->testingOnly_scratchBlockSize();
+static size_t total_size(SkSBlockAllocator<N>& pool) {
+    return pool->totalSize() - BlockAllocatorTestAccess::ScratchBlockSize(pool);
 }
 
 template<size_t N>
-static size_t add_block(GrSBlockAllocator<N>& pool) {
+static size_t add_block(SkSBlockAllocator<N>& pool) {
     size_t currentSize = total_size(pool);
-    GrBlockAllocator::Block* current = pool->currentBlock();
+    SkBlockAllocator::Block* current = pool->currentBlock();
     while(pool->currentBlock() == current) {
         pool->template allocate<4>(pool->preallocSize() / 2);
     }
@@ -59,44 +67,44 @@
 }
 
 template<size_t N>
-static void* alloc_byte(GrSBlockAllocator<N>& pool) {
+static void* alloc_byte(SkSBlockAllocator<N>& pool) {
     auto br = pool->template allocate<1>(1);
     return br.fBlock->ptr(br.fAlignedOffset);
 }
 
-DEF_TEST(GrBlockAllocatorPreallocSize, r) {
+DEF_TEST(SkBlockAllocatorPreallocSize, r) {
     // Tests stack/member initialization, option #1 described in doc
-    GrBlockAllocator stack{GrowthPolicy::kFixed, 2048};
+    SkBlockAllocator stack{GrowthPolicy::kFixed, 2048};
     SkDEBUGCODE(stack.validate();)
 
-    REPORTER_ASSERT(r, stack.preallocSize() == sizeof(GrBlockAllocator));
+    REPORTER_ASSERT(r, stack.preallocSize() == sizeof(SkBlockAllocator));
     REPORTER_ASSERT(r, stack.preallocUsableSpace() == (size_t) stack.currentBlock()->avail());
 
     // Tests placement new initialization to increase head block size, option #2
     void* mem = operator new(1024);
-    GrBlockAllocator* placement = new (mem) GrBlockAllocator(GrowthPolicy::kLinear, 1024,
-                                                             1024 - sizeof(GrBlockAllocator));
+    SkBlockAllocator* placement = new (mem) SkBlockAllocator(GrowthPolicy::kLinear, 1024,
+                                                             1024 - sizeof(SkBlockAllocator));
     REPORTER_ASSERT(r, placement->preallocSize() == 1024);
     REPORTER_ASSERT(r, placement->preallocUsableSpace() < 1024 &&
-                       placement->preallocUsableSpace() >= (1024 - sizeof(GrBlockAllocator)));
+                       placement->preallocUsableSpace() >= (1024 - sizeof(SkBlockAllocator)));
     delete placement;
 
     // Tests inline increased preallocation, option #3
-    GrSBlockAllocator<2048> inlined{};
+    SkSBlockAllocator<2048> inlined{};
     SkDEBUGCODE(inlined->validate();)
     REPORTER_ASSERT(r, inlined->preallocSize() == 2048);
     REPORTER_ASSERT(r, inlined->preallocUsableSpace() < 2048 &&
-                       inlined->preallocUsableSpace() >= (2048 - sizeof(GrBlockAllocator)));
+                       inlined->preallocUsableSpace() >= (2048 - sizeof(SkBlockAllocator)));
 }
 
-DEF_TEST(GrBlockAllocatorAlloc, r) {
-    GrSBlockAllocator<1024> pool{};
+DEF_TEST(SkBlockAllocatorAlloc, r) {
+    SkSBlockAllocator<1024> pool{};
     SkDEBUGCODE(pool->validate();)
 
     // Assumes the previous pointer was in the same block
     auto validate_ptr = [&](int align, int size,
-                            GrBlockAllocator::ByteRange br,
-                            GrBlockAllocator::ByteRange* prevBR) {
+                            SkBlockAllocator::ByteRange br,
+                            SkBlockAllocator::ByteRange* prevBR) {
         uintptr_t pt = reinterpret_cast<uintptr_t>(br.fBlock->ptr(br.fAlignedOffset));
         // Matches the requested align
         REPORTER_ASSERT(r, pt % align == 0);
@@ -170,12 +178,12 @@
     SkDEBUGCODE(pool->validate();)
 }
 
-DEF_TEST(GrBlockAllocatorResize, r) {
-    GrSBlockAllocator<1024> pool{};
+DEF_TEST(SkBlockAllocatorResize, r) {
+    SkSBlockAllocator<1024> pool{};
     SkDEBUGCODE(pool->validate();)
 
     // Fixed resize from 16 to 32
-    GrBlockAllocator::ByteRange p = pool->allocate<4>(16);
+    SkBlockAllocator::ByteRange p = pool->allocate<4>(16);
     REPORTER_ASSERT(r, p.fBlock->avail<4>() > 16);
     REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, 16));
     p.fEnd += 16;
@@ -223,8 +231,8 @@
     REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
 }
 
-DEF_TEST(GrBlockAllocatorRelease, r) {
-    GrSBlockAllocator<1024> pool{};
+DEF_TEST(SkBlockAllocatorRelease, r) {
+    SkSBlockAllocator<1024> pool{};
     SkDEBUGCODE(pool->validate();)
 
     // Successful allocate and release
@@ -255,13 +263,13 @@
     SkDEBUGCODE(pool->validate();)
 }
 
-DEF_TEST(GrBlockAllocatorRewind, r) {
+DEF_TEST(SkBlockAllocatorRewind, r) {
     // Confirm that a bunch of allocations and then releases in stack order fully goes back to the
     // start of the block (i.e. unwinds the entire stack, and not just the last cursor position)
-    GrSBlockAllocator<1024> pool{};
+    SkSBlockAllocator<1024> pool{};
     SkDEBUGCODE(pool->validate();)
 
-    std::vector<GrBlockAllocator::ByteRange> ptrs;
+    std::vector<SkBlockAllocator::ByteRange> ptrs;
     for (int i = 0; i < 32; ++i) {
         ptrs.push_back(pool->allocate<4>(16));
     }
@@ -279,10 +287,10 @@
     REPORTER_ASSERT(r, pool->allocate<4>(16).fStart == ptrs[0].fStart);
 }
 
-DEF_TEST(GrBlockAllocatorGrowthPolicy, r) {
+DEF_TEST(SkBlockAllocatorGrowthPolicy, r) {
     static constexpr int kInitSize = 128;
     static constexpr int kBlockCount = 5;
-    static constexpr size_t kExpectedSizes[GrBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
+    static constexpr size_t kExpectedSizes[SkBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
         // kFixed -> kInitSize per block
         { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
         // kLinear -> (block ct + 1) * kInitSize for next block
@@ -293,8 +301,8 @@
         { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
     };
 
-    for (int gp = 0; gp < GrBlockAllocator::kGrowthPolicyCount; ++gp) {
-        GrSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
+    for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
+        SkSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
         SkDEBUGCODE(pool->validate();)
 
         REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
@@ -306,10 +314,10 @@
     }
 }
 
-DEF_TEST(GrBlockAllocatorReset, r) {
+DEF_TEST(SkBlockAllocatorReset, r) {
     static constexpr int kBlockIncrement = 1024;
 
-    GrSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
+    SkSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
     SkDEBUGCODE(pool->validate();)
 
     void* firstAlloc = alloc_byte(pool);
@@ -339,11 +347,11 @@
     SkDEBUGCODE(pool->validate();)
 }
 
-DEF_TEST(GrBlockAllocatorReleaseBlock, r) {
+DEF_TEST(SkBlockAllocatorReleaseBlock, r) {
     // This loops over all growth policies to make sure that the incremental releases update the
     // sequence correctly for each policy.
-    for (int gp = 0; gp < GrBlockAllocator::kGrowthPolicyCount; ++gp) {
-        GrSBlockAllocator<1024> pool{(GrowthPolicy) gp};
+    for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
+        SkSBlockAllocator<1024> pool{(GrowthPolicy) gp};
         SkDEBUGCODE(pool->validate();)
 
         void* firstAlloc = alloc_byte(pool);
@@ -400,8 +408,8 @@
     }
 }
 
-DEF_TEST(GrBlockAllocatorIterateAndRelease, r) {
-    GrSBlockAllocator<256> pool;
+DEF_TEST(SkBlockAllocatorIterateAndRelease, r) {
+    SkSBlockAllocator<256> pool;
 
     pool->headBlock()->setMetadata(1);
     add_block(pool);
@@ -437,21 +445,21 @@
     REPORTER_ASSERT(r, block_count(pool) == 1);
 }
 
-DEF_TEST(GrBlockAllocatorScratchBlockReserve, r) {
-    GrSBlockAllocator<256> pool;
+DEF_TEST(SkBlockAllocatorScratchBlockReserve, r) {
+    SkSBlockAllocator<256> pool;
 
     size_t added = add_block(pool);
-    REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
     size_t total = pool->totalSize();
     pool->releaseBlock(pool->currentBlock());
 
     // Total size shouldn't have changed, the released block should become scratch
     REPORTER_ASSERT(r, pool->totalSize() == total);
-    REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == added);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == added);
 
     // But a reset definitely deletes any scratch block
     pool->reset();
-    REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
 
     // Reserving more than what's available adds a scratch block, and current block remains avail.
     size_t avail = pool->currentBlock()->avail();
@@ -459,50 +467,50 @@
     pool->reserve(reserve);
     REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
     // And rounds up to the fixed size of this pool's growth policy
-    REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= reserve &&
-                       pool->testingOnly_scratchBlockSize() % 256 == 0);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= reserve &&
+                       BlockAllocatorTestAccess::ScratchBlockSize(pool) % 256 == 0);
 
     // Allocating more than avail activates the scratch block (so totalSize doesn't change)
     size_t preAllocTotalSize = pool->totalSize();
     pool->allocate<1>(avail + 1);
-    REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == 0);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
     REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
 
     // When reserving less than what's still available in the current block, no scratch block is
     // added.
     pool->reserve(pool->currentBlock()->avail());
-    REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
 
     // Unless checking available bytes is disabled
-    pool->reserve(pool->currentBlock()->avail(), GrBlockAllocator::kIgnoreExistingBytes_Flag);
-    REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0);
+    pool->reserve(pool->currentBlock()->avail(), SkBlockAllocator::kIgnoreExistingBytes_Flag);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0);
 
     // If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
     // follow the size (which in this case is a fixed 256 bytes per block).
     pool->resetScratchSpace();
-    pool->reserve(32, GrBlockAllocator::kIgnoreGrowthPolicy_Flag);
-    REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0 &&
-                       pool->testingOnly_scratchBlockSize() < 256);
+    pool->reserve(32, SkBlockAllocator::kIgnoreGrowthPolicy_Flag);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0 &&
+                       BlockAllocatorTestAccess::ScratchBlockSize(pool) < 256);
 
     // When requesting an allocation larger than the current block and the scratch block, a new
     // block is added, and the scratch block remains scratch.
-    GrBlockAllocator::Block* oldTail = pool->currentBlock();
+    SkBlockAllocator::Block* oldTail = pool->currentBlock();
     avail = oldTail->avail();
     size_t scratchAvail = 2 * avail;
     pool->reserve(scratchAvail);
-    REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= scratchAvail);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= scratchAvail);
 
     // This allocation request is higher than oldTail's available, and the scratch size so we
     // should add a new block and scratch size should stay the same.
-    scratchAvail = pool->testingOnly_scratchBlockSize();
+    scratchAvail = BlockAllocatorTestAccess::ScratchBlockSize(pool);
     pool->allocate<1>(scratchAvail + 1);
     REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
-    REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == scratchAvail);
+    REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == scratchAvail);
 }
 
-DEF_TEST(GrBlockAllocatorStealBlocks, r) {
-    GrSBlockAllocator<256> poolA;
-    GrSBlockAllocator<128> poolB;
+DEF_TEST(SkBlockAllocatorStealBlocks, r) {
+    SkSBlockAllocator<256> poolA;
+    SkSBlockAllocator<128> poolB;
 
     add_block(poolA);
     add_block(poolA);
@@ -514,7 +522,7 @@
     char* bAlloc = (char*) alloc_byte(poolB);
     *bAlloc = 't';
 
-    const GrBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
+    const SkBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
 
     REPORTER_ASSERT(r, block_count(poolA) == 4);
     REPORTER_ASSERT(r, block_count(poolB) == 3);
@@ -550,8 +558,8 @@
     int fX2;
 };
 
-DEF_TEST(GrBlockAllocatorMetadata, r) {
-    GrSBlockAllocator<1024> pool{};
+DEF_TEST(SkBlockAllocatorMetadata, r) {
+    SkSBlockAllocator<1024> pool{};
     SkDEBUGCODE(pool->validate();)
 
     // Allocation where alignment of user data > alignment of metadata
@@ -588,8 +596,8 @@
     REPORTER_ASSERT(r, metaBig->fX1 == 3 && metaBig->fX2 == 6);
 }
 
-DEF_TEST(GrBlockAllocatorAllocatorMetadata, r) {
-    GrSBlockAllocator<256> pool{};
+DEF_TEST(SkBlockAllocatorAllocatorMetadata, r) {
+    SkSBlockAllocator<256> pool{};
     SkDEBUGCODE(pool->validate();)
 
     REPORTER_ASSERT(r, pool->metadata() == 0); // initial value
@@ -608,7 +616,7 @@
 }
 
 template<size_t Align, size_t Padding>
-static void run_owning_block_test(skiatest::Reporter* r, GrBlockAllocator* pool) {
+static void run_owning_block_test(skiatest::Reporter* r, SkBlockAllocator* pool) {
     auto br = pool->allocate<Align, Padding>(1);
 
     void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
@@ -625,7 +633,7 @@
 }
 
 template<size_t Padding>
-static void run_owning_block_tests(skiatest::Reporter* r, GrBlockAllocator* pool) {
+static void run_owning_block_tests(skiatest::Reporter* r, SkBlockAllocator* pool) {
     run_owning_block_test<1, Padding>(r, pool);
     run_owning_block_test<2, Padding>(r, pool);
     run_owning_block_test<4, Padding>(r, pool);
@@ -636,8 +644,8 @@
     run_owning_block_test<128, Padding>(r, pool);
 }
 
-DEF_TEST(GrBlockAllocatorOwningBlock, r) {
-    GrSBlockAllocator<1024> pool{};
+DEF_TEST(SkBlockAllocatorOwningBlock, r) {
+    SkSBlockAllocator<1024> pool{};
     SkDEBUGCODE(pool->validate();)
 
     run_owning_block_tests<1>(r, pool.allocator());
diff --git a/tests/GrTBlockListTest.cpp b/tests/SkTBlockListTest.cpp
similarity index 84%
rename from tests/GrTBlockListTest.cpp
rename to tests/SkTBlockListTest.cpp
index aa16338..0032453 100644
--- a/tests/GrTBlockListTest.cpp
+++ b/tests/SkTBlockListTest.cpp
@@ -5,7 +5,7 @@
  * found in the LICENSE file.
  */
 
-#include "src/gpu/GrTBlockList.h"
+#include "src/core/SkTBlockList.h"
 #include "tests/Test.h"
 
 namespace {
@@ -22,7 +22,7 @@
 
     int fID;
 
-    // Under the hood, GrTBlockList and GrBlockAllocator round up to max_align_t. If 'C' was
+    // Under the hood, SkTBlockList and SkBlockAllocator round up to max_align_t. If 'C' was
     // just 4 bytes, that often means the internal blocks can squeeze a few extra instances in. This
     // is fine, but makes predicting a little trickier, so make sure C is a bit bigger.
     int fPadding[4];
@@ -37,10 +37,23 @@
 
 }  // namespace
 
+class TBlockListTestAccess {
+public:
+    template<int N>
+    static size_t ScratchBlockSize(SkTBlockList<C, N>& list) {
+        return (size_t) list.fAllocator->scratchBlockSize();
+    }
+
+    template<int N>
+    static size_t TotalSize(SkTBlockList<C, N>& list) {
+        return list.fAllocator->totalSize();
+    }
+};
+
 // Checks that the allocator has the correct count, etc and that the element IDs are correct.
 // Then pops popCnt items and checks again.
 template<int N>
-static void check_allocator_helper(GrTBlockList<C, N>* allocator, int cnt, int popCnt,
+static void check_allocator_helper(SkTBlockList<C, N>* allocator, int cnt, int popCnt,
                                    skiatest::Reporter* reporter) {
     REPORTER_ASSERT(reporter, (0 == cnt) == allocator->empty());
     REPORTER_ASSERT(reporter, cnt == allocator->count());
@@ -67,10 +80,10 @@
 }
 
 template<int N>
-static void check_iterator_helper(GrTBlockList<C, N>* allocator,
+static void check_iterator_helper(SkTBlockList<C, N>* allocator,
                                   const std::vector<C*>& expected,
                                   skiatest::Reporter* reporter) {
-    const GrTBlockList<C, N>* cAlloc = allocator;
+    const SkTBlockList<C, N>* cAlloc = allocator;
     REPORTER_ASSERT(reporter, (size_t) allocator->count() == expected.size());
     // Forward+const
     int i = 0;
@@ -114,7 +127,7 @@
 // Adds cnt items to the allocator, tests the cnts and iterators, pops popCnt items and checks
 // again. Finally it resets the allocator and checks again.
 template<int N>
-static void check_allocator(GrTBlockList<C, N>* allocator, int cnt, int popCnt,
+static void check_allocator(SkTBlockList<C, N>* allocator, int cnt, int popCnt,
                             skiatest::Reporter* reporter) {
     enum ItemInitializer : int {
         kCopyCtor,
@@ -156,7 +169,7 @@
 }
 
 template<int N>
-static void run_allocator_test(GrTBlockList<C, N>* allocator, skiatest::Reporter* reporter) {
+static void run_allocator_test(SkTBlockList<C, N>* allocator, skiatest::Reporter* reporter) {
     check_allocator(allocator, 0, 0, reporter);
     check_allocator(allocator, 1, 1, reporter);
     check_allocator(allocator, 2, 2, reporter);
@@ -169,8 +182,8 @@
 template<int N1, int N2>
 static void run_concat_test(skiatest::Reporter* reporter, int aCount, int bCount) {
 
-    GrTBlockList<C, N1> listA;
-    GrTBlockList<C, N2> listB;
+    SkTBlockList<C, N1> listA;
+    SkTBlockList<C, N2> listB;
 
     for (int i = 0; i < aCount; ++i) {
         listA.emplace_back(i);
@@ -185,7 +198,7 @@
     // Concatenate B into A and verify.
     listA.concat(std::move(listB));
     REPORTER_ASSERT(reporter, listA.count() == aCount + bCount);
-    // GrTBlockList guarantees the moved list is empty, but clang-tidy doesn't know about it;
+    // SkTBlockList guarantees the moved list is empty, but clang-tidy doesn't know about it;
     // in practice we won't really be using moved lists so this won't pollute our main code base
     // with lots of warning disables.
     REPORTER_ASSERT(reporter, listB.count() == 0); // NOLINT(bugprone-use-after-move)
@@ -206,8 +219,8 @@
 
     // This is similar to run_concat_test(), except since D is trivial we can't verify the instant
     // counts that are tracked via ctor/dtor.
-    GrTBlockList<D, N1> listA;
-    GrTBlockList<D, N2> listB;
+    SkTBlockList<D, N1> listA;
+    SkTBlockList<D, N2> listB;
 
     for (int i = 0; i < aCount; ++i) {
         listA.push_back({i});
@@ -235,24 +248,24 @@
 static void run_reserve_test(skiatest::Reporter* reporter) {
     constexpr int kItemsPerBlock = N + 4; // Make this a number > 1, even if N starting items == 1
 
-    GrTBlockList<C, N> list(kItemsPerBlock);
-    size_t initialSize = list.allocator()->totalSize();
+    SkTBlockList<C, N> list(kItemsPerBlock);
+    size_t initialSize = TBlockListTestAccess::TotalSize(list);
     // Should be able to add N instances of T w/o changing size from initialSize
     for (int i = 0; i < N; ++i) {
         list.push_back(C(i));
     }
-    REPORTER_ASSERT(reporter, initialSize == list.allocator()->totalSize());
+    REPORTER_ASSERT(reporter, initialSize == TBlockListTestAccess::TotalSize(list));
 
     // Reserve room for 2*kItemsPerBlock items
     list.reserve(2 * kItemsPerBlock);
     REPORTER_ASSERT(reporter, list.count() == N); // count shouldn't change though
 
-    size_t reservedSize = list.allocator()->totalSize();
+    size_t reservedSize = TBlockListTestAccess::TotalSize(list);
     REPORTER_ASSERT(reporter, reservedSize >= initialSize + 2 * kItemsPerBlock * sizeof(C));
     for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
         list.push_back(C(i));
     }
-    REPORTER_ASSERT(reporter, reservedSize == list.allocator()->totalSize());
+    REPORTER_ASSERT(reporter, reservedSize == TBlockListTestAccess::TotalSize(list));
 
     // Make the next block partially fully (N > 0 but < kItemsPerBlock)
     for (int i = 0; i < N; ++i) {
@@ -263,20 +276,20 @@
     // (kItemsPerBlock-N) that are still available in the active block
     list.reserve(2 * kItemsPerBlock);
     int extraReservedCount = kItemsPerBlock + N;
-    // Because GrTBlockList normally allocates blocks in fixed sizes, and extraReservedCount >
+    // Because SkTBlockList normally allocates blocks in fixed sizes, and extraReservedCount >
     // items-per-block, it will always use that size and not that of the growth policy.
-    REPORTER_ASSERT(reporter, (size_t) list.allocator()->testingOnly_scratchBlockSize() >=
+    REPORTER_ASSERT(reporter, TBlockListTestAccess::ScratchBlockSize(list) >=
                                        extraReservedCount * sizeof(C));
 
-    reservedSize = list.allocator()->totalSize();
+    reservedSize = TBlockListTestAccess::TotalSize(list);
     for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
         list.push_back(C(i));
     }
-    REPORTER_ASSERT(reporter, reservedSize == list.allocator()->totalSize());
+    REPORTER_ASSERT(reporter, reservedSize == TBlockListTestAccess::TotalSize(list));
 
     // If we reserve a count < items-per-block, it will use the fixed size from the growth policy.
     list.reserve(2);
-    REPORTER_ASSERT(reporter, (size_t) list.allocator()->testingOnly_scratchBlockSize() >=
+    REPORTER_ASSERT(reporter, TBlockListTestAccess::ScratchBlockSize(list) >=
                                        kItemsPerBlock * sizeof(C));
 
     // Ensure the reservations didn't initialize any more D's than anticipated
@@ -287,24 +300,24 @@
     REPORTER_ASSERT(reporter, 0 == C::gInstCnt);
 }
 
-DEF_TEST(GrTBlockList, reporter) {
+DEF_TEST(SkTBlockList, reporter) {
     // Test combinations of allocators with and without stack storage and with different block sizes
-    GrTBlockList<C> a1(1);
+    SkTBlockList<C> a1(1);
     run_allocator_test(&a1, reporter);
 
-    GrTBlockList<C> a2(2);
+    SkTBlockList<C> a2(2);
     run_allocator_test(&a2, reporter);
 
-    GrTBlockList<C> a5(5);
+    SkTBlockList<C> a5(5);
     run_allocator_test(&a5, reporter);
 
-    GrTBlockList<C, 1> sa1;
+    SkTBlockList<C, 1> sa1;
     run_allocator_test(&sa1, reporter);
 
-    GrTBlockList<C, 3> sa3;
+    SkTBlockList<C, 3> sa3;
     run_allocator_test(&sa3, reporter);
 
-    GrTBlockList<C, 4> sa4;
+    SkTBlockList<C, 4> sa4;
     run_allocator_test(&sa4, reporter);
 
     run_reserve_test<1>(reporter);