Sk4x_sse.h

CQ_EXTRA_TRYBOTS=client.skia:Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Release-Trybot,Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Release-Trybot,Test-Win7-ShuttleA-HD2000-x86-Debug-GDI-Trybot

BUG=skia:

Committed: https://skia.googlesource.com/skia/+/e4bf793120d3bfc9b003d11880a3fb73ff2b89e9

Review URL: https://codereview.chromium.org/698873003
diff --git a/src/core/Sk4x.h b/src/core/Sk4x.h
index 5b15560..7c246df 100644
--- a/src/core/Sk4x.h
+++ b/src/core/Sk4x.h
@@ -4,13 +4,22 @@
 #include "SkTypes.h"
 
 #define SK4X_PREAMBLE 1
-    #include "Sk4x_portable.h"
+    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+        #include "Sk4x_sse.h"
+    #else
+        #include "Sk4x_portable.h"
+    #endif
 #undef SK4X_PREAMBLE
 
 template <typename T> class Sk4x;
 typedef Sk4x<float>   Sk4f;
 typedef Sk4x<int32_t> Sk4i;
 
+// Some Sk4x methods are implemented only for Sk4f or Sk4i.
+// They might be unavailable, really slow, or just a bad idea.
+// Talk to mtklein if you find yourself unable to link and
+// really need one of those methods.
+
 template <typename T> class Sk4x {
 public:
     Sk4x();  // Uninitialized; use Sk4x(0,0,0,0) for zero.
@@ -34,6 +43,7 @@
     Sk4x   bitNot()            const;
     Sk4x   bitAnd(const Sk4x&) const;
     Sk4x    bitOr(const Sk4x&) const;
+    // TODO: Sk4x bitAndNot(const Sk4x&) const; is efficient in SSE.
     Sk4x      add(const Sk4x&) const;
     Sk4x subtract(const Sk4x&) const;
     Sk4x multiply(const Sk4x&) const;
@@ -56,15 +66,27 @@
     static Sk4x XYAB(const Sk4x& xyzw, const Sk4x& abcd);
     static Sk4x ZWCD(const Sk4x& xyzw, const Sk4x& abcd);
 
+    // TODO: these are particularly efficient in SSE.  Useful?  Also efficient in NEON?
+    // static Sk4x XAYB(const Sk4x& xyzw, const Sk4x& abcd);
+    // static Sk4x ZCWD(const Sk4x& xyzw, const Sk4x& abcd);
+
 private:
     // It's handy to have Sk4f and Sk4i be mutual friends.
     template <typename S> friend class Sk4x;
 
 #define SK4X_PRIVATE 1
-    #include "Sk4x_portable.h"
+    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+        #include "Sk4x_sse.h"
+    #else
+        #include "Sk4x_portable.h"
+    #endif
 #undef SK4X_PRIVATE
 };
 
-#include "Sk4x_portable.h"
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+    #include "Sk4x_sse.h"
+#else
+    #include "Sk4x_portable.h"
+#endif
 
 #endif//Sk4x_DEFINED
diff --git a/src/core/Sk4x_sse.h b/src/core/Sk4x_sse.h
new file mode 100644
index 0000000..246abd3
--- /dev/null
+++ b/src/core/Sk4x_sse.h
@@ -0,0 +1,177 @@
+// It is important _not_ to put header guards here.
+// This file will be intentionally included three times.
+
+// Useful reading:
+//   https://software.intel.com/sites/landingpage/IntrinsicsGuide/
+
+#if defined(SK4X_PREAMBLE)
+    // Code in this file may assume SSE and SSE2.
+    #include <emmintrin.h>
+
+    // It must check for later instruction sets.
+    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+        #include <immintrin.h>
+    #endif
+
+    // A little bit of template metaprogramming to map
+    // float to __m128 and int32_t to __m128i.
+    template <typename T> struct SkScalarToSIMD;
+    template <> struct SkScalarToSIMD<float>   { typedef __m128  Type; };
+    template <> struct SkScalarToSIMD<int32_t> { typedef __m128i Type; };
+
+    // These are all free, zero instructions.
+    // MSVC insists we use _mm_castA_B(a) instead of (B)a.
+    static __m128  as_4f(__m128i v) { return _mm_castsi128_ps(v); }
+    static __m128  as_4f(__m128  v) { return                  v ; }
+    static __m128i as_4i(__m128i v) { return                  v ; }
+    static __m128i as_4i(__m128  v) { return _mm_castps_si128(v); }
+
+#elif defined(SK4X_PRIVATE)
+    // It'd be slightly faster to call _mm_cmpeq_epi32() on an unintialized register and itself,
+    // but that has caused hard to debug issues when compilers recognize dealing with uninitialized
+    // memory as undefined behavior that can be optimized away.
+    static __m128i True()  { return _mm_set1_epi8(~0); }
+
+    // Leaving these implicit makes the rest of the code below a bit less noisy to read.
+    Sk4x(__m128i);
+    Sk4x(__m128);
+
+    Sk4x andNot(const Sk4x&) const;
+
+    typename SkScalarToSIMD<T>::Type fVec;
+
+#else//Method definitions.
+
+// Helps to get these in before anything else.
+template <> inline Sk4f::Sk4x(__m128i v) : fVec(as_4f(v)) {}
+template <> inline Sk4f::Sk4x(__m128  v) : fVec(      v ) {}
+template <> inline Sk4i::Sk4x(__m128i v) : fVec(      v ) {}
+template <> inline Sk4i::Sk4x(__m128  v) : fVec(as_4i(v)) {}
+
+// Next, methods whose implementation is the same for Sk4f and Sk4i.
+template <typename T> Sk4x<T>::Sk4x() {}
+template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; }
+template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) {
+    fVec = other.fVec;
+    return *this;
+}
+
+// We pun in these _mm_shuffle_* methods a little to use the fastest / most available methods.
+// They're all bit-preserving operations so it shouldn't matter.
+
+template <typename T>
+Sk4x<T> Sk4x<T>::zwxy() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFLE(1,0,3,2)); }
+
+template <typename T>
+Sk4x<T> Sk4x<T>::XYAB(const Sk4x<T>& a, const Sk4x<T>& b) {
+    return _mm_movelh_ps(as_4f(a.fVec), as_4f(b.fVec));
+}
+
+template <typename T>
+Sk4x<T> Sk4x<T>::ZWCD(const Sk4x<T>& a, const Sk4x<T>& b) {
+    return _mm_movehl_ps(as_4f(b.fVec), as_4f(a.fVec));
+}
+
+// Now we'll write all Sk4f specific methods.  This M() macro will remove some noise.
+#define M(...) template <> inline __VA_ARGS__ Sk4f::
+
+M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {}
+
+M(Sk4f) Load       (const float fs[4]) { return _mm_loadu_ps(fs); }
+M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); }
+
+M(void) store       (float fs[4]) const { _mm_storeu_ps(fs, fVec); }
+M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); }
+
+template <> template <>
+Sk4i Sk4f::reinterpret<Sk4i>() const { return as_4i(fVec); }
+
+template <> template <>
+Sk4i Sk4f::cast<Sk4i>() const { return _mm_cvtps_epi32(fVec); }
+
+// We're going to try a little experiment here and skip allTrue(), anyTrue(), and bit-manipulators
+// for Sk4f.  Code that calls them probably does so accidentally.
+// Ask mtklein to fill these in if you really need them.
+
+M(Sk4f) add     (const Sk4f& o) const { return _mm_add_ps(fVec, o.fVec); }
+M(Sk4f) subtract(const Sk4f& o) const { return _mm_sub_ps(fVec, o.fVec); }
+M(Sk4f) multiply(const Sk4f& o) const { return _mm_mul_ps(fVec, o.fVec); }
+M(Sk4f) divide  (const Sk4f& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+M(Sk4i) equal           (const Sk4f& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
+M(Sk4i) notEqual        (const Sk4f& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
+M(Sk4i) lessThan        (const Sk4f& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
+M(Sk4i) greaterThan     (const Sk4f& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
+M(Sk4i) lessThanEqual   (const Sk4f& o) const { return _mm_cmple_ps (fVec, o.fVec); }
+M(Sk4i) greaterThanEqual(const Sk4f& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
+
+M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return _mm_min_ps(a.fVec, b.fVec); }
+M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return _mm_max_ps(a.fVec, b.fVec); }
+
+// Now we'll write all the Sk4i specific methods.  Same deal for M().
+#undef M
+#define M(...) template <> inline __VA_ARGS__ Sk4i::
+
+M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,a)) {}
+
+M(Sk4i) Load       (const int32_t is[4]) { return _mm_loadu_si128((const __m128i*)is); }
+M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i*)is); }
+
+M(void) store       (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec); }
+M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec); }
+
+template <> template <>
+Sk4f Sk4i::reinterpret<Sk4f>() const { return as_4f(fVec); }
+
+template <> template <>
+Sk4f Sk4i::cast<Sk4f>() const { return _mm_cvtepi32_ps(fVec); }
+
+M(bool) allTrue() const { return 0xf == _mm_movemask_ps(as_4f(fVec)); }
+M(bool) anyTrue() const { return 0x0 != _mm_movemask_ps(as_4f(fVec)); }
+
+M(Sk4i) bitNot() const { return _mm_xor_si128(fVec, True()); }
+M(Sk4i) bitAnd(const Sk4i& o) const { return _mm_and_si128(fVec, o.fVec); }
+M(Sk4i) bitOr (const Sk4i& o) const { return _mm_or_si128 (fVec, o.fVec); }
+
+M(Sk4i) equal           (const Sk4i& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+M(Sk4i) lessThan        (const Sk4i& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
+M(Sk4i) greaterThan     (const Sk4i& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
+M(Sk4i) notEqual        (const Sk4i& o) const { return this->      equal(o).bitNot();  }
+M(Sk4i) lessThanEqual   (const Sk4i& o) const { return this->greaterThan(o).bitNot();  }
+M(Sk4i) greaterThanEqual(const Sk4i& o) const { return this->   lessThan(o).bitNot();  }
+
+M(Sk4i) add     (const Sk4i& o) const { return _mm_add_epi32(fVec, o.fVec); }
+M(Sk4i) subtract(const Sk4i& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+
+// SSE doesn't have integer division.  Let's see how far we can get without Sk4i::divide().
+
+// Sk4i's multiply(), Min(), and Max() all improve significantly with SSE4.1.
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+    M(Sk4i) multiply(const Sk4i& o) const { return _mm_mullo_epi32(fVec, o.fVec); }
+    M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return _mm_min_epi32(a.fVec, b.fVec); }
+    M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return _mm_max_epi32(a.fVec, b.fVec); }
+#else
+    M(Sk4i) multiply(const Sk4i& o) const {
+        // First 2 32->64 bit multiplies.
+        __m128i mul02 = _mm_mul_epu32(fVec, o.fVec),
+                mul13 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
+        // Now recombine the high bits of the two products.
+        return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul02, _MM_SHUFFLE(0,0,2,0)),
+                                  _mm_shuffle_epi32(mul13, _MM_SHUFFLE(0,0,2,0)));
+    }
+
+    M(Sk4i) andNot(const Sk4i& o) const { return _mm_andnot_si128(o.fVec, fVec); }
+
+    M(Sk4i) Min(const Sk4i& a, const Sk4i& b) {
+        Sk4i less = a.lessThan(b);
+        return a.bitAnd(less).bitOr(b.andNot(less));
+    }
+    M(Sk4i) Max(const Sk4i& a, const Sk4i& b) {
+        Sk4i less = a.lessThan(b);
+        return b.bitAnd(less).bitOr(a.andNot(less));
+    }
+#endif
+
+#undef M
+
+#endif//Method definitions.
diff --git a/tests/Sk4xTest.cpp b/tests/Sk4xTest.cpp
index 130693b..d6e7e51 100644
--- a/tests/Sk4xTest.cpp
+++ b/tests/Sk4xTest.cpp
@@ -15,9 +15,18 @@
     ASSERT_EQ(baz, foo);
 }
 
+struct AlignedFloats {
+    Sk4f forces16ByteAlignment;   // On 64-bit machines, the stack starts 128-bit aligned,
+    float fs[5];                  // but not necessarily so on 32-bit.  Adding an Sk4f forces it.
+};
+
 DEF_TEST(Sk4x_LoadStore, r) {
+    AlignedFloats aligned;
     // fs will be 16-byte aligned, fs+1 not.
-    float fs[] = { 5,6,7,8,9 };
+    float* fs = aligned.fs;
+    for (int i = 0; i < 5; i++) {  // set to 5,6,7,8,9
+        fs[i] = float(i+5);
+    }
 
     Sk4f foo = Sk4f::Load(fs);
     Sk4f bar = Sk4f::LoadAligned(fs);