SkNx_abi is unused.

CQ_INCLUDE_TRYBOTS=skia.primary:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD

Change-Id: I082c34a1f484715cd2dca55a8d23101235755e6a
Reviewed-on: https://skia-review.googlesource.com/5233
Reviewed-by: Mike Klein <mtklein@chromium.org>
Commit-Queue: Mike Klein <mtklein@chromium.org>
diff --git a/src/core/SkNx.h b/src/core/SkNx.h
index 8431afb..a75495c 100644
--- a/src/core/SkNx.h
+++ b/src/core/SkNx.h
@@ -14,11 +14,6 @@
 #include <limits>
 #include <type_traits>
 
-// These _abi types are data-only, and so can be used to store SkNx in structs or
-// pass them as function parameters or return values, even across compilation units.
-template <int N, typename T> struct SkNx_abi      { SkNx_abi<N/2,T> lo, hi; };
-template <       typename T> struct SkNx_abi<1,T> {              T     val; };
-
 // Every single SkNx method wants to be fully inlined.  (We know better than MSVC).
 #define AI SK_ALWAYS_INLINE
 
@@ -47,9 +42,6 @@
         static_assert(N==16, "");
     }
 
-    AI SkNx(const SkNx_abi<N,T>& a) : fLo(a.lo), fHi(a.hi) {}
-    AI operator SkNx_abi<N,T>() const { return { (SkNx_abi<N/2,T>)fLo, (SkNx_abi<N/2,T>)fHi }; }
-
     AI T operator[](int k) const {
         SkASSERT(0 <= k && k < N);
         return k < N/2 ? fLo[k] : fHi[k-N/2];
@@ -137,9 +129,6 @@
     AI SkNx() = default;
     AI SkNx(T v) : fVal(v) {}
 
-    AI SkNx(const SkNx_abi<1,T>& a) : fVal(a.val) {}
-    AI operator SkNx_abi<1,T>() const { return { fVal }; }
-
     // Android complains against unused parameters, so we guard it
     AI T operator[](int SkDEBUGCODE(k)) const {
         SkASSERT(k == 0);
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
index b716cb6..83873a7 100644
--- a/src/opts/SkNx_neon.h
+++ b/src/opts/SkNx_neon.h
@@ -12,8 +12,6 @@
 
 #define SKNX_IS_FAST
 
-template <> struct SkNx_abi<4,float> { float32x4_t vec; };
-
 namespace {
 
 // ARMv8 has vrndmq_f32 to floor 4 floats.  Here we emulate it:
@@ -113,9 +111,6 @@
     AI SkNx(float val) : fVec(vdupq_n_f32(val)) {}
     AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
 
-    AI SkNx(const SkNx_abi<4,float>& a) : fVec(a.vec) {}
-    AI operator SkNx_abi<4,float>() const { return { fVec }; }
-
     AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
     AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
 
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
index fa9f4cd..554d65d 100644
--- a/src/opts/SkNx_sse.h
+++ b/src/opts/SkNx_sse.h
@@ -15,11 +15,6 @@
 
 #define SKNX_IS_FAST
 
-template <> struct SkNx_abi<4,float> { __m128 vec; };
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
-    template <> struct SkNx_abi<8,float> { __m256 vec; };
-#endif
-
 namespace {
 
 template <>
@@ -76,9 +71,6 @@
     AI SkNx(float val)           : fVec( _mm_set1_ps(val) ) {}
     AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
 
-    AI SkNx(const SkNx_abi<4,float>& a) : fVec(a.vec) {}
-    AI operator SkNx_abi<4,float>() const { return { fVec }; }
-
     AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
     AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
 
@@ -562,9 +554,6 @@
         AI SkNx(float a, float b, float c, float d,
                 float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {}
 
-        AI SkNx(const SkNx_abi<8,float>& a) : fVec(a.vec) {}
-        AI operator SkNx_abi<8,float>() const { return { fVec }; }
-
         AI static SkNx Load(const void* ptr) { return _mm256_loadu_ps((const float*)ptr); }
         AI void store(void* ptr) const { _mm256_storeu_ps((float*)ptr, fVec); }