Add GrThreadSafePipelineBuilder

GrThreadSafePipelineBuilder is the generic, base object the
GrContextThreadSafeProxy will hold. Each backend will create a
backend-specific version that is shared between the direct context
and the (possibly many) utility contexts.

Right now GrThreadSafePipelineBuilder just holds the pipeline
creation stats. Relatedly only GrGLGpu::ProgramCache and
GrVkResourceProvider::PipelineStateCache currently derive from
the new class (since they are the only backends that generate
pipeline stats).

Change-Id: I58f441c5c2b870bb5970c29cba19d1775864d52e
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/378320
Commit-Queue: Robert Phillips <robertphillips@google.com>
Reviewed-by: Jim Van Verth <jvanverth@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/GrThreadSafePipelineBuilder.h b/src/gpu/GrThreadSafePipelineBuilder.h
new file mode 100644
index 0000000..6d6b3a1
--- /dev/null
+++ b/src/gpu/GrThreadSafePipelineBuilder.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrThreadSafePipelineBuilder_Base_DEFINED
+#define GrThreadSafePipelineBuilder_Base_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrConfig.h"
+
+#if GR_TEST_UTILS
+#include "include/private/SkTArray.h"
+class SkString;
+#endif
+
+class GrThreadSafePipelineBuilder : public SkRefCnt {
+public:
+    GrThreadSafePipelineBuilder() = default;
+
+    class Stats {
+    public:
+        enum class ProgramCacheResult {
+            kHit,       // the program was found in the cache
+            kMiss,      // the program was not found in the cache (and was, thus, compiled)
+            kPartial,   // a precompiled version was found in the persistent cache
+
+            kLast = kPartial
+        };
+
+#if GR_GPU_STATS
+        static const int kNumProgramCacheResults = (int)ProgramCacheResult::kLast + 1;
+
+        Stats() = default;
+
+        int shaderCompilations() const { return fShaderCompilations; }
+        void incShaderCompilations() { fShaderCompilations++; }
+
+        int numInlineCompilationFailures() const { return fNumInlineCompilationFailures; }
+        void incNumInlineCompilationFailures() { ++fNumInlineCompilationFailures; }
+
+        int numInlineProgramCacheResult(ProgramCacheResult stat) const {
+            return fInlineProgramCacheStats[(int) stat];
+        }
+        void incNumInlineProgramCacheResult(ProgramCacheResult stat) {
+            ++fInlineProgramCacheStats[(int) stat];
+        }
+
+        int numPreCompilationFailures() const { return fNumPreCompilationFailures; }
+        void incNumPreCompilationFailures() { ++fNumPreCompilationFailures; }
+
+        int numPreProgramCacheResult(ProgramCacheResult stat) const {
+            return fPreProgramCacheStats[(int) stat];
+        }
+        void incNumPreProgramCacheResult(ProgramCacheResult stat) {
+            ++fPreProgramCacheStats[(int) stat];
+        }
+
+        int numCompilationFailures() const { return fNumCompilationFailures; }
+        void incNumCompilationFailures() { ++fNumCompilationFailures; }
+
+        int numPartialCompilationSuccesses() const { return fNumPartialCompilationSuccesses; }
+        void incNumPartialCompilationSuccesses() { ++fNumPartialCompilationSuccesses; }
+
+        int numCompilationSuccesses() const { return fNumCompilationSuccesses; }
+        void incNumCompilationSuccesses() { ++fNumCompilationSuccesses; }
+
+#if GR_TEST_UTILS
+        void dump(SkString*);
+        void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
+#endif
+
+    private:
+        std::atomic<int> fShaderCompilations{0};
+
+        std::atomic<int> fNumInlineCompilationFailures{0};
+        std::atomic<int> fInlineProgramCacheStats[kNumProgramCacheResults]{0};
+
+        std::atomic<int> fNumPreCompilationFailures{0};
+        std::atomic<int> fPreProgramCacheStats[kNumProgramCacheResults]{0};
+
+        std::atomic<int> fNumCompilationFailures{0};
+        std::atomic<int> fNumPartialCompilationSuccesses{0};
+        std::atomic<int> fNumCompilationSuccesses{0};
+
+#else
+        void incShaderCompilations() {}
+        void incNumInlineCompilationFailures() {}
+        void incNumInlineProgramCacheResult(ProgramCacheResult stat) {}
+        void incNumPreCompilationFailures() {}
+        void incNumPreProgramCacheResult(ProgramCacheResult stat) {}
+        void incNumCompilationFailures() {}
+        void incNumPartialCompilationSuccesses() {}
+        void incNumCompilationSuccesses() {}
+
+#if GR_TEST_UTILS
+        void dump(SkString*) {}
+        void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
+#endif
+
+#endif // GR_GPU_STATS
+    };
+
+    Stats* stats() { return &fStats; }
+
+protected:
+    Stats fStats;
+};
+
+#endif