Modernize SkSpinlock.

 - Use std::atomic directly.
 - No more need for SkPODSpinlock or SK_DECLARE_STATIC_SPINLOCK.

Now simple code like this works as you'd hope:
    static SkSpinlock gLock;

That is, it starts unlocked and there's no static initializer.

std::atomic_flag would make this terser and standard-guaranteed,
but ATOMIC_FLAG_INIT caused not-yet-implemented errors on MSVC 2013.
The generated code for this approach is identical.

It appears the implicit constructor is constexpr when all the member
initializers are.  I'm hoping this way of producing constexpr constructors
without typing "constexpr" gives us a way to eliminate more SkFoo / SkBaseFoo
distinctions and SK_DECLARE_STATIC_FOO.  This was certainly the easiest.

BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1734383002

Review URL: https://codereview.chromium.org/1734383002
diff --git a/src/core/SkSpinlock.cpp b/src/core/SkSpinlock.cpp
index 0f76427..eb9d633 100644
--- a/src/core/SkSpinlock.cpp
+++ b/src/core/SkSpinlock.cpp
@@ -7,7 +7,9 @@
 
 #include "SkSpinlock.h"
 
-void SkPODSpinlock::contendedAcquire() {
-    // To act as a mutex, we need an acquire barrier when we take the lock.
-    while(sk_atomic_exchange(&fLocked, true, sk_memory_order_acquire)) { /*spin*/ }
+void SkSpinlock::contendedAcquire() {
+    // To act as a mutex, we need an acquire barrier when we acquire the lock.
+    while (fLocked.exchange(true, std::memory_order_acquire)) {
+        /*spin*/
+    }
 }
diff --git a/src/gpu/GrProcessor.cpp b/src/gpu/GrProcessor.cpp
index 15206c2..aef5190 100644
--- a/src/gpu/GrProcessor.cpp
+++ b/src/gpu/GrProcessor.cpp
@@ -81,7 +81,7 @@
 // memory barrier between accesses of a context on different threads. Also, there may be multiple
 // GrContexts and those contexts may be in use concurrently on different threads.
 namespace {
-SK_DECLARE_STATIC_SPINLOCK(gProcessorSpinlock);
+static SkSpinlock gProcessorSpinlock;
 class MemoryPoolAccessor {
 public:
     MemoryPoolAccessor() { gProcessorSpinlock.acquire(); }
diff --git a/src/gpu/batches/GrBatch.cpp b/src/gpu/batches/GrBatch.cpp
index 19c19ff..8af1c1e 100644
--- a/src/gpu/batches/GrBatch.cpp
+++ b/src/gpu/batches/GrBatch.cpp
@@ -20,7 +20,7 @@
 // memory barrier between accesses of a context on different threads. Also, there may be multiple
 // GrContexts and those contexts may be in use concurrently on different threads.
 namespace {
-SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock);
+static SkSpinlock gBatchSpinlock;
 class MemoryPoolAccessor {
 public:
     MemoryPoolAccessor() { gBatchSpinlock.acquire(); }