some SkAtomics cleanup

 - Replace sk_memory_order with std::memory_order.
 - Remove SkAtomic<T>.

SkPath was the only user of SkAtomic<T>, for its fConvexity and
fFirstDirection fields.  I've replaced them with std::atomic types, and
funneled access to them through methods that enforce the relaxed memory
order like SkAtomic<T> did.

For fConvexity, we can use the exisiting setConvexity() and
getConvexityOrUnknown() methods, adding a private const setConvexity()
to mutate convexity from const methods.  For fFirstDirection I've added
private setFirstDirection() and getFirstDirection() methods.

Removing SkAtomic<T> means SkAtomics.h no longer needs SkNoncopyable.h.
I've had to update a bunch of other headers that were depending on
transitive inclusion.

Change-Id: Ib238be71a121519db6e970a9a8955834e1298c87
Reviewed-on: https://skia-review.googlesource.com/c/174220
Commit-Queue: Brian Salomon <bsalomon@google.com>
Auto-Submit: Mike Klein <mtklein@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
diff --git a/experimental/svg/model/SkSVGAttributeParser.h b/experimental/svg/model/SkSVGAttributeParser.h
index 7d329c7..517f1ca 100644
--- a/experimental/svg/model/SkSVGAttributeParser.h
+++ b/experimental/svg/model/SkSVGAttributeParser.h
@@ -8,6 +8,7 @@
 #ifndef SkSVGAttributeParser_DEFINED
 #define SkSVGAttributeParser_DEFINED
 
+#include "SkNoncopyable.h"
 #include "SkSVGTypes.h"
 
 class SkSVGAttributeParser : public SkNoncopyable {
diff --git a/experimental/svg/model/SkSVGValue.h b/experimental/svg/model/SkSVGValue.h
index d994f64..d58d845 100644
--- a/experimental/svg/model/SkSVGValue.h
+++ b/experimental/svg/model/SkSVGValue.h
@@ -10,6 +10,7 @@
 
 #include "SkColor.h"
 #include "SkMatrix.h"
+#include "SkNoncopyable.h"
 #include "SkPath.h"
 #include "SkSVGTypes.h"
 #include "SkTypes.h"
diff --git a/include/core/SkPath.h b/include/core/SkPath.h
index 9192419..d463956 100644
--- a/include/core/SkPath.h
+++ b/include/core/SkPath.h
@@ -233,7 +233,8 @@
         @return  computed or stored SkPath::Convexity
     */
     Convexity getConvexity() const {
-        for (Convexity convexity = fConvexity.load(); kUnknown_Convexity != convexity; ) {
+        Convexity convexity = this->getConvexityOrUnknown();
+        if (convexity != kUnknown_Convexity) {
             return convexity;
         }
         return this->internalGetConvexity();
@@ -244,7 +245,7 @@
 
         @return  stored SkPath::Convexity
     */
-    Convexity getConvexityOrUnknown() const { return (Convexity)fConvexity; }
+    Convexity getConvexityOrUnknown() const { return fConvexity.load(std::memory_order_relaxed); }
 
     /** Stores convexity so that it is later returned by getConvexity() or getConvexityOrUnknown().
         convexity may differ from getConvexity(), although setting an incorrect value may
@@ -1687,13 +1688,13 @@
 #endif
 
 private:
-    sk_sp<SkPathRef>                                     fPathRef;
-    int                                                  fLastMoveToIndex;
-    mutable SkAtomic<Convexity, sk_memory_order_relaxed> fConvexity;       // SkPath::Convexity
-   mutable SkAtomic<uint8_t, sk_memory_order_relaxed> fFirstDirection; // SkPathPriv::FirstDirection
-    uint8_t                                              fFillType    : 2;
-    uint8_t                                              fIsVolatile  : 1;
-    uint8_t                                              fIsBadForDAA : 1;
+    sk_sp<SkPathRef>               fPathRef;
+    int                            fLastMoveToIndex;
+    mutable std::atomic<Convexity> fConvexity;
+    mutable std::atomic<uint8_t>   fFirstDirection; // really an SkPathPriv::FirstDirection
+    uint8_t                        fFillType    : 2;
+    uint8_t                        fIsVolatile  : 1;
+    uint8_t                        fIsBadForDAA : 1;
 
     /** Resets all fields other than fPathRef to their initial 'empty' values.
      *  Assumes the caller has already emptied fPathRef.
@@ -1765,6 +1766,12 @@
 
     void setPt(int index, SkScalar x, SkScalar y);
 
+    // Bottlenecks for working with fConvexity and fFirstDirection.
+    // Notice the setters are const... these are mutable atomic fields.
+    void    setConvexity(Convexity) const;
+    void    setFirstDirection(uint8_t) const;
+    uint8_t getFirstDirection() const;
+
     friend class SkAutoPathBoundsUpdate;
     friend class SkAutoDisableOvalCheck;
     friend class SkAutoDisableDirectionCheck;
diff --git a/include/core/SkPathMeasure.h b/include/core/SkPathMeasure.h
index e506c42..ae4c4e6 100644
--- a/include/core/SkPathMeasure.h
+++ b/include/core/SkPathMeasure.h
@@ -8,6 +8,7 @@
 #ifndef SkPathMeasure_DEFINED
 #define SkPathMeasure_DEFINED
 
+#include "../private/SkNoncopyable.h"
 #include "../private/SkTDArray.h"
 #include "SkPath.h"
 
diff --git a/include/private/SkAtomics.h b/include/private/SkAtomics.h
index 349bdc4..d062246 100644
--- a/include/private/SkAtomics.h
+++ b/include/private/SkAtomics.h
@@ -8,117 +8,59 @@
 #ifndef SkAtomics_DEFINED
 #define SkAtomics_DEFINED
 
-// This file is not part of the public Skia API.
-#include "../private/SkNoncopyable.h"
 #include "SkTypes.h"
 #include <atomic>
 
-// ~~~~~~~~ APIs ~~~~~~~~~
-
-enum sk_memory_order {
-    sk_memory_order_relaxed,
-    sk_memory_order_consume,
-    sk_memory_order_acquire,
-    sk_memory_order_release,
-    sk_memory_order_acq_rel,
-    sk_memory_order_seq_cst,
-};
+// ~~~~~~~~ Legacy APIs ~~~~~~~~~
+//
+// Please use types from <atomic> for any new code.
+// That's all this file ends up doing under the hood.
 
 template <typename T>
-T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
-
-template <typename T>
-void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
-
-template <typename T>
-T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
-
-template <typename T>
-bool sk_atomic_compare_exchange(T*, T* expected, T desired,
-                                sk_memory_order success = sk_memory_order_seq_cst,
-                                sk_memory_order failure = sk_memory_order_seq_cst);
-
-// A little wrapper class for small T (think, builtins: int, float, void*) to
-// ensure they're always used atomically.  This is our stand-in for std::atomic<T>.
-// !!! Please _really_ know what you're doing if you change default_memory_order. !!!
-template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
-class SkAtomic : SkNoncopyable {
-public:
-    SkAtomic() {}
-    explicit SkAtomic(const T& val) : fVal(val) {}
-
-    // It is essential we return by value rather than by const&.  fVal may change at any time.
-    T load(sk_memory_order mo = default_memory_order) const {
-        return sk_atomic_load(&fVal, mo);
-    }
-
-    void store(const T& val, sk_memory_order mo = default_memory_order) {
-        sk_atomic_store(&fVal, val, mo);
-    }
-
-    // Alias for .load(default_memory_order).
-    operator T() const {
-        return this->load();
-    }
-
-    // Alias for .store(v, default_memory_order).
-    T operator=(const T& v) {
-        this->store(v);
-        return v;
-    }
-private:
-    T fVal;
-};
-
-// ~~~~~~~~ Implementations ~~~~~~~~~
-
-template <typename T>
-T sk_atomic_load(const T* ptr, sk_memory_order mo) {
-    SkASSERT(mo == sk_memory_order_relaxed ||
-             mo == sk_memory_order_seq_cst ||
-             mo == sk_memory_order_acquire ||
-             mo == sk_memory_order_consume);
+T sk_atomic_load(const T* ptr, std::memory_order mo = std::memory_order_seq_cst) {
+    SkASSERT(mo == std::memory_order_relaxed ||
+             mo == std::memory_order_seq_cst ||
+             mo == std::memory_order_acquire ||
+             mo == std::memory_order_consume);
     const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
-    return std::atomic_load_explicit(ap, (std::memory_order)mo);
+    return std::atomic_load_explicit(ap, mo);
 }
 
 template <typename T>
-void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
-    SkASSERT(mo == sk_memory_order_relaxed ||
-             mo == sk_memory_order_seq_cst ||
-             mo == sk_memory_order_release);
+void sk_atomic_store(T* ptr, T val, std::memory_order mo = std::memory_order_seq_cst) {
+    SkASSERT(mo == std::memory_order_relaxed ||
+             mo == std::memory_order_seq_cst ||
+             mo == std::memory_order_release);
     std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
-    return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
+    return std::atomic_store_explicit(ap, val, mo);
 }
 
 template <typename T>
-T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
+T sk_atomic_fetch_add(T* ptr, T val, std::memory_order mo = std::memory_order_seq_cst) {
     // All values of mo are valid.
     std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
-    return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
+    return std::atomic_fetch_add_explicit(ap, val, mo);
 }
 
 template <typename T>
 bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
-                                sk_memory_order success,
-                                sk_memory_order failure) {
+                                std::memory_order success = std::memory_order_seq_cst,
+                                std::memory_order failure = std::memory_order_seq_cst) {
     // All values of success are valid.
-    SkASSERT(failure == sk_memory_order_relaxed ||
-             failure == sk_memory_order_seq_cst ||
-             failure == sk_memory_order_acquire ||
-             failure == sk_memory_order_consume);
+    SkASSERT(failure == std::memory_order_relaxed ||
+             failure == std::memory_order_seq_cst ||
+             failure == std::memory_order_acquire ||
+             failure == std::memory_order_consume);
     SkASSERT(failure <= success);
     std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
-    return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
-                                                        (std::memory_order)success,
-                                                        (std::memory_order)failure);
+    return std::atomic_compare_exchange_strong_explicit(ap, expected, desired, success, failure);
 }
 
-// ~~~~~~~~ Legacy APIs ~~~~~~~~~
-
-// From here down we have shims for our old atomics API, to be weaned off of.
-// We use the default sequentially-consistent memory order to make things simple
-// and to match the practical reality of our old _sync and _win implementations.
+// ~~~~~~~~ Very Legacy APIs ~~~~~~~~~
+//
+// Here are shims for our very old atomics API, to be weaned off of.  They use
+// sequentially-consistent memory order to match historical behavior, but most
+// of the callers could perform better with explicit, weaker memory ordering.
 
 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
diff --git a/src/core/SkPath.cpp b/src/core/SkPath.cpp
index 468ea56..86c2618 100644
--- a/src/core/SkPath.cpp
+++ b/src/core/SkPath.cpp
@@ -65,11 +65,11 @@
 class SkAutoDisableDirectionCheck {
 public:
     SkAutoDisableDirectionCheck(SkPath* path) : fPath(path) {
-        fSaved = static_cast<SkPathPriv::FirstDirection>(fPath->fFirstDirection.load());
+        fSaved = static_cast<SkPathPriv::FirstDirection>(fPath->getFirstDirection());
     }
 
     ~SkAutoDisableDirectionCheck() {
-        fPath->fFirstDirection = fSaved;
+        fPath->setFirstDirection(fSaved);
     }
 
 private:
@@ -163,8 +163,8 @@
     //fPathRef is assumed to have been emptied by the caller.
     fLastMoveToIndex = INITIAL_LASTMOVETOINDEX_VALUE;
     fFillType = kWinding_FillType;
-    fConvexity = kUnknown_Convexity;
-    fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+    this->setConvexity(kUnknown_Convexity);
+    this->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
 
     // We don't touch Android's fSourcePath.  It's used to track texture garbage collection, so we
     // don't want to muck with it if it's been set to something non-nullptr.
@@ -199,8 +199,8 @@
     fIsBadForDAA     = that.fIsBadForDAA;
 
     // Non-atomic assignment of atomic values.
-    fConvexity     .store(that.fConvexity     .load());
-    fFirstDirection.store(that.fFirstDirection.load());
+    this->setConvexity(that.getConvexityOrUnknown());
+    this->setFirstDirection(that.getFirstDirection());
 }
 
 bool operator==(const SkPath& a, const SkPath& b) {
@@ -224,13 +224,13 @@
         that.fIsVolatile = iv;
 
         // Non-atomic swaps of atomic values.
-        Convexity c = fConvexity.load();
-        fConvexity.store(that.fConvexity.load());
-        that.fConvexity.store(c);
+        Convexity c = this->getConvexityOrUnknown();
+        this->setConvexity(that.getConvexityOrUnknown());
+        that.setConvexity(c);
 
-        uint8_t fd = fFirstDirection.load();
-        fFirstDirection.store(that.fFirstDirection.load());
-        that.fFirstDirection.store(fd);
+        uint8_t fd = this->getFirstDirection();
+        this->setFirstDirection(that.getFirstDirection());
+        that.setFirstDirection(fd);
     }
 }
 
@@ -742,19 +742,29 @@
     }
 }
 
+// This is the public-facing non-const setConvexity().
 void SkPath::setConvexity(Convexity c) {
-    if (fConvexity != c) {
-        fConvexity = c;
-    }
+    fConvexity.store(c, std::memory_order_relaxed);
+}
+
+// Const hooks for working with fConvexity and fFirstDirection from const methods.
+void SkPath::setConvexity(Convexity c) const {
+    fConvexity.store(c, std::memory_order_relaxed);
+}
+void SkPath::setFirstDirection(uint8_t d) const {
+    fFirstDirection.store(d, std::memory_order_relaxed);
+}
+uint8_t SkPath::getFirstDirection() const {
+    return fFirstDirection.load(std::memory_order_relaxed);
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //  Construction methods
 
-#define DIRTY_AFTER_EDIT                                        \
-    do {                                                        \
-        fConvexity = kUnknown_Convexity;                        \
-        fFirstDirection = SkPathPriv::kUnknown_FirstDirection;  \
+#define DIRTY_AFTER_EDIT                                               \
+    do {                                                               \
+        this->setConvexity(kUnknown_Convexity);                        \
+        this->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);  \
     } while (0)
 
 void SkPath::incReserve(int inc) {
@@ -1027,8 +1037,8 @@
 
 SkPath& SkPath::addRect(const SkRect &rect, Direction dir, unsigned startIndex) {
     assert_known_direction(dir);
-    fFirstDirection = this->hasOnlyMoveTos() ?
-        (SkPathPriv::FirstDirection)dir : SkPathPriv::kUnknown_FirstDirection;
+    this->setFirstDirection(this->hasOnlyMoveTos() ? (SkPathPriv::FirstDirection)dir
+                                                   : SkPathPriv::kUnknown_FirstDirection);
     SkAutoDisableDirectionCheck addc(this);
     SkAutoPathBoundsUpdate apbu(this, rect);
 
@@ -1174,8 +1184,8 @@
         // degenerate(oval) => line points are collapsing
         this->addOval(bounds, dir, startIndex / 2);
     } else {
-        fFirstDirection = this->hasOnlyMoveTos() ?
-                            (SkPathPriv::FirstDirection)dir : SkPathPriv::kUnknown_FirstDirection;
+        this->setFirstDirection(this->hasOnlyMoveTos() ? (SkPathPriv::FirstDirection)dir
+                                                       : SkPathPriv::kUnknown_FirstDirection);
 
         SkAutoPathBoundsUpdate apbu(this, bounds);
         SkAutoDisableDirectionCheck addc(this);
@@ -1281,9 +1291,9 @@
      */
     bool isOval = hasOnlyMoveTos();
     if (isOval) {
-        fFirstDirection = (SkPathPriv::FirstDirection)dir;
+        this->setFirstDirection((SkPathPriv::FirstDirection)dir);
     } else {
-        fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+        this->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
     }
 
     SkAutoDisableDirectionCheck addc(this);
@@ -1839,10 +1849,10 @@
         dst->swap(tmp);
         SkPathRef::Editor ed(&dst->fPathRef);
         matrix.mapPoints(ed.points(), ed.pathRef()->countPoints());
-        dst->fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+        dst->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
     } else {
 #ifndef SK_SUPPORT_LEGACY_CACHE_CONVEXITY
-        Convexity convexity = fConvexity;
+        Convexity convexity = this->getConvexityOrUnknown();
 #endif
 
         SkPathRef::CreateTransformedCopy(&dst->fPathRef, *fPathRef.get(), matrix);
@@ -1851,7 +1861,7 @@
             dst->fLastMoveToIndex = fLastMoveToIndex;
             dst->fFillType = fFillType;
 #ifdef SK_SUPPORT_LEGACY_CACHE_CONVEXITY
-            dst->fConvexity.store(fConvexity);
+            dst->setConvexity(this->getConvexityOrUnknown());
 #endif
             dst->fIsVolatile = fIsVolatile;
         }
@@ -1862,28 +1872,29 @@
         // However, some transformations are thought to be safe:
         //    axis-aligned values under scale/translate.
         if (matrix.isScaleTranslate() && SkPathPriv::IsAxisAligned(*this)) {
-            dst->fConvexity = convexity;
+            dst->setConvexity(convexity);
         } else {
-            dst->fConvexity = kUnknown_Convexity;
+            dst->setConvexity(kUnknown_Convexity);
         }
 #endif
 
-        if (SkPathPriv::kUnknown_FirstDirection == fFirstDirection) {
-            dst->fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+        if (this->getFirstDirection() == SkPathPriv::kUnknown_FirstDirection) {
+            dst->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
         } else {
             SkScalar det2x2 =
                 matrix.get(SkMatrix::kMScaleX) * matrix.get(SkMatrix::kMScaleY) -
                 matrix.get(SkMatrix::kMSkewX)  * matrix.get(SkMatrix::kMSkewY);
             if (det2x2 < 0) {
-                dst->fFirstDirection = SkPathPriv::OppositeFirstDirection(
-                        (SkPathPriv::FirstDirection)fFirstDirection.load());
+                dst->setFirstDirection(
+                        SkPathPriv::OppositeFirstDirection(
+                            (SkPathPriv::FirstDirection)this->getFirstDirection()));
             } else if (det2x2 > 0) {
-                dst->fFirstDirection = fFirstDirection.load();
+                dst->setFirstDirection(this->getFirstDirection());
             } else {
 #ifdef SK_SUPPORT_LEGACY_CACHE_CONVEXITY
-                dst->fConvexity = kUnknown_Convexity;
+                dst->setConvexity(kUnknown_Convexity);
 #endif
-                dst->fFirstDirection = SkPathPriv::kUnknown_FirstDirection;
+                dst->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
             }
         }
 
@@ -2496,7 +2507,8 @@
 
 SkPath::Convexity SkPath::internalGetConvexity() const {
     // Sometimes we think we need to calculate convexity but another thread already did.
-    for (auto c = (Convexity)fConvexity; c != kUnknown_Convexity; ) {
+    auto c = this->getConvexityOrUnknown();
+    if (c != kUnknown_Convexity) {
         return c;
     }
 
@@ -2515,7 +2527,7 @@
         switch (verb) {
             case kMove_Verb:
                 if (++contourCount > 1) {
-                    fConvexity = kConcave_Convexity;
+                    this->setConvexity(kConcave_Convexity);
                     return kConcave_Convexity;
                 }
                 pts[1] = pts[0];
@@ -2541,7 +2553,7 @@
                 break;
             default:
                 SkDEBUGFAIL("bad verb");
-                fConvexity = kConcave_Convexity;
+                this->setConvexity(kConcave_Convexity);
                 return kConcave_Convexity;
         }
 
@@ -2553,20 +2565,24 @@
             return kUnknown_Convexity;
         }
         if (kConcave_Convexity == state.getConvexity()) {
-            fConvexity = kConcave_Convexity;
+            this->setConvexity(kConcave_Convexity);
             return kConcave_Convexity;
         }
     }
-    fConvexity = state.getConvexity();
-    if (kConvex_Convexity == fConvexity && SkPathPriv::kUnknown_FirstDirection == fFirstDirection) {
-        if (SkPathPriv::kUnknown_FirstDirection == state.getFirstDirection() &&
-                !this->getBounds().isEmpty() && !state.hasBackwards()) {
-            fConvexity = Convexity::kConcave_Convexity;
+    this->setConvexity(state.getConvexity());
+
+    if (this->getConvexityOrUnknown() == kConvex_Convexity &&
+            this->getFirstDirection() == SkPathPriv::kUnknown_FirstDirection) {
+
+        if (state.getFirstDirection() == SkPathPriv::kUnknown_FirstDirection
+                && !this->getBounds().isEmpty()
+                && !state.hasBackwards()) {
+            this->setConvexity(Convexity::kConcave_Convexity);
         } else {
-            fFirstDirection = state.getFirstDirection();
+            this->setFirstDirection(state.getFirstDirection());
         }
     }
-    return static_cast<Convexity>(fConvexity);
+    return this->getConvexityOrUnknown();
 }
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -2738,16 +2754,17 @@
  *  its cross product.
  */
 bool SkPathPriv::CheapComputeFirstDirection(const SkPath& path, FirstDirection* dir) {
-    if (kUnknown_FirstDirection != path.fFirstDirection.load()) {
-        *dir = static_cast<FirstDirection>(path.fFirstDirection.load());
+    auto d = path.getFirstDirection();
+    if (d != kUnknown_FirstDirection) {
+        *dir = static_cast<FirstDirection>(d);
         return true;
     }
 
-    // don't want to pay the cost for computing this if it
-    // is unknown, so we don't call isConvex()
-    if (SkPath::kConvex_Convexity == path.getConvexityOrUnknown()) {
-        SkASSERT(kUnknown_FirstDirection == path.fFirstDirection);
-        *dir = static_cast<FirstDirection>(path.fFirstDirection.load());
+    // We don't want to pay the cost for computing convexity if it is unknown,
+    // so we call getConvexityOrUnknown() instead of isConvex().
+    if (path.getConvexityOrUnknown() == SkPath::kConvex_Convexity) {
+        SkASSERT(path.getFirstDirection() == kUnknown_FirstDirection);
+        *dir = static_cast<FirstDirection>(path.getFirstDirection());
         return false;
     }
 
@@ -2820,7 +2837,7 @@
     }
     if (ymaxCross) {
         crossToDir(ymaxCross, dir);
-        path.fFirstDirection = *dir;
+        path.setFirstDirection(*dir);
         return true;
     } else {
         return false;
@@ -3486,7 +3503,7 @@
         path->close();
     }
     path->setConvexity(convex ? SkPath::kConvex_Convexity : SkPath::kConcave_Convexity);
-    path->fFirstDirection.store(firstDir);
+    path->setFirstDirection(firstDir);
 }
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/core/SkPicture.cpp b/src/core/SkPicture.cpp
index c37c01e..603dea9 100644
--- a/src/core/SkPicture.cpp
+++ b/src/core/SkPicture.cpp
@@ -37,12 +37,12 @@
 
 uint32_t SkPicture::uniqueID() const {
     static uint32_t gNextID = 1;
-    uint32_t id = sk_atomic_load(&fUniqueID, sk_memory_order_relaxed);
+    uint32_t id = sk_atomic_load(&fUniqueID, std::memory_order_relaxed);
     while (id == 0) {
         uint32_t next = sk_atomic_fetch_add(&gNextID, 1u);
         if (sk_atomic_compare_exchange(&fUniqueID, &id, next,
-                                       sk_memory_order_relaxed,
-                                       sk_memory_order_relaxed)) {
+                                       std::memory_order_relaxed,
+                                       std::memory_order_relaxed)) {
             id = next;
         } else {
             // sk_atomic_compare_exchange replaced id with the current value of fUniqueID.
diff --git a/src/core/SkReader32.h b/src/core/SkReader32.h
index 1f027f7..bfdfa4f 100644
--- a/src/core/SkReader32.h
+++ b/src/core/SkReader32.h
@@ -11,6 +11,7 @@
 
 #include "SkData.h"
 #include "SkMatrix.h"
+#include "SkNoncopyable.h"
 #include "SkPath.h"
 #include "SkRegion.h"
 #include "SkRRect.h"
diff --git a/src/core/SkTraceEvent.h b/src/core/SkTraceEvent.h
index 219be42..032be8b 100644
--- a/src/core/SkTraceEvent.h
+++ b/src/core/SkTraceEvent.h
@@ -71,9 +71,9 @@
     SkEventTracer::GetInstance()->updateTraceEventDuration
 
 #define TRACE_EVENT_API_ATOMIC_WORD intptr_t
-#define TRACE_EVENT_API_ATOMIC_LOAD(var) sk_atomic_load(&var, sk_memory_order_relaxed)
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) sk_atomic_load(&var, std::memory_order_relaxed)
 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
-    sk_atomic_store(&var, value, sk_memory_order_relaxed)
+    sk_atomic_store(&var, value, std::memory_order_relaxed)
 
 // Defines visibility for classes in trace_event.h
 #define TRACE_EVENT_API_CLASS_EXPORT SK_API
diff --git a/src/core/SkWriter32.h b/src/core/SkWriter32.h
index 4a8273e..dc72c7b 100644
--- a/src/core/SkWriter32.h
+++ b/src/core/SkWriter32.h
@@ -10,6 +10,7 @@
 
 #include "SkData.h"
 #include "SkMatrix.h"
+#include "SkNoncopyable.h"
 #include "SkPath.h"
 #include "SkPoint.h"
 #include "SkPoint3.h"
diff --git a/src/gpu/vk/GrVkResource.h b/src/gpu/vk/GrVkResource.h
index 78b4d31..6584c90 100644
--- a/src/gpu/vk/GrVkResource.h
+++ b/src/gpu/vk/GrVkResource.h
@@ -69,7 +69,7 @@
      */
     GrVkResource() : fRefCnt(1) {
 #ifdef SK_TRACE_VK_RESOURCES
-        fKey = sk_atomic_fetch_add(&fKeyCounter, 1u, sk_memory_order_relaxed);
+        fKey = sk_atomic_fetch_add(&fKeyCounter, 1u, std::memory_order_relaxed);
         GetTrace()->add(this);
 #endif
     }
@@ -92,7 +92,7 @@
      *  Ensures that all previous owner's actions are complete.
      */
     bool unique() const {
-        if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) {
+        if (1 == sk_atomic_load(&fRefCnt, std::memory_order_acquire)) {
             // The acquire barrier is only really needed if we return true.  It
             // prevents code conditioned on the result of unique() from running
             // until previous owners are all totally done calling unref().
@@ -106,7 +106,7 @@
      */
     void ref() const {
         SkASSERT(fRefCnt > 0);
-        (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed);  // No barrier required.
+        (void)sk_atomic_fetch_add(&fRefCnt, +1, std::memory_order_relaxed);  // No barrier required.
     }
 
     /** Decrement the reference count. If the reference count is 1 before the
@@ -118,7 +118,7 @@
         SkASSERT(fRefCnt > 0);
         SkASSERT(gpu);
         // A release here acts in place of all releases we "should" have been doing in ref().
-        if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+        if (1 == sk_atomic_fetch_add(&fRefCnt, -1, std::memory_order_acq_rel)) {
             // Like unique(), the acquire is only needed on success, to make sure
             // code in internal_dispose() doesn't happen before the decrement.
             this->internal_dispose(gpu);
@@ -129,7 +129,7 @@
     void unrefAndAbandon() const {
         SkASSERT(fRefCnt > 0);
         // A release here acts in place of all releases we "should" have been doing in ref().
-        if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+        if (1 == sk_atomic_fetch_add(&fRefCnt, -1, std::memory_order_acq_rel)) {
             // Like unique(), the acquire is only needed on success, to make sure
             // code in internal_dispose() doesn't happen before the decrement.
             this->internal_dispose();
diff --git a/src/utils/SkEventTracer.cpp b/src/utils/SkEventTracer.cpp
index 5fb60bd..6f8141b 100644
--- a/src/utils/SkEventTracer.cpp
+++ b/src/utils/SkEventTracer.cpp
@@ -54,7 +54,7 @@
 }
 
 SkEventTracer* SkEventTracer::GetInstance() {
-    if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, sk_memory_order_acquire)) {
+    if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, std::memory_order_acquire)) {
         return tracer;
     }
     static SkOnce once;