mtklein | 0e05f38 | 2016-03-22 17:17:44 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2016 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef Sk4x4f_DEFINED |
| 9 | #define Sk4x4f_DEFINED |
| 10 | |
| 11 | #include "SkNx.h" |
| 12 | |
| 13 | struct Sk4x4f { |
| 14 | Sk4f r,g,b,a; |
| 15 | |
| 16 | static Sk4x4f Transpose(const Sk4f&, const Sk4f&, const Sk4f&, const Sk4f&); |
| 17 | static Sk4x4f Transpose(const float[16]); |
| 18 | static Sk4x4f Transpose(const uint8_t[16]); |
| 19 | |
| 20 | void transpose(Sk4f*, Sk4f*, Sk4f*, Sk4f*) const; |
| 21 | void transpose( float[16]) const; |
| 22 | void transpose(uint8_t[16]) const; |
| 23 | }; |
| 24 | |
mtklein | 1443c69 | 2016-03-23 09:52:13 -0700 | [diff] [blame^] | 25 | // TODO: NEON |
mtklein | 0e05f38 | 2016-03-22 17:17:44 -0700 | [diff] [blame] | 26 | |
mtklein | 1443c69 | 2016-03-23 09:52:13 -0700 | [diff] [blame^] | 27 | #if 1 && !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 |
mtklein | 0e05f38 | 2016-03-22 17:17:44 -0700 | [diff] [blame] | 28 | |
| 29 | inline Sk4x4f Sk4x4f::Transpose(const Sk4f& x, const Sk4f& y, const Sk4f& z, const Sk4f& w) { |
| 30 | auto r = x.fVec, |
| 31 | g = y.fVec, |
| 32 | b = z.fVec, |
| 33 | a = w.fVec; |
| 34 | _MM_TRANSPOSE4_PS(r,g,b,a); |
| 35 | return { r,g,b,a }; |
| 36 | } |
| 37 | |
| 38 | inline Sk4x4f Sk4x4f::Transpose(const float fs[16]) { |
| 39 | return Transpose(Sk4f::Load(fs+0), Sk4f::Load(fs+4), Sk4f::Load(fs+8), Sk4f::Load(fs+12)); |
| 40 | } |
| 41 | |
| 42 | inline Sk4x4f Sk4x4f::Transpose(const uint8_t bs[16]) { |
| 43 | auto b16 = _mm_loadu_si128((const __m128i*)bs); |
mtklein | 1443c69 | 2016-03-23 09:52:13 -0700 | [diff] [blame^] | 44 | |
| 45 | auto mask = _mm_set1_epi32(0xFF); |
| 46 | auto r = _mm_cvtepi32_ps(_mm_and_si128(mask, (b16 ))), |
| 47 | g = _mm_cvtepi32_ps(_mm_and_si128(mask, _mm_srli_epi32(b16, 8))), |
| 48 | b = _mm_cvtepi32_ps(_mm_and_si128(mask, _mm_srli_epi32(b16, 16))), |
| 49 | a = _mm_cvtepi32_ps( _mm_srli_epi32(b16, 24)); |
mtklein | 0e05f38 | 2016-03-22 17:17:44 -0700 | [diff] [blame] | 50 | return { r,g,b,a }; |
| 51 | } |
| 52 | |
| 53 | inline void Sk4x4f::transpose(Sk4f* x, Sk4f* y, Sk4f* z, Sk4f* w) const { |
| 54 | auto R = r.fVec, |
| 55 | G = g.fVec, |
| 56 | B = b.fVec, |
| 57 | A = a.fVec; |
| 58 | _MM_TRANSPOSE4_PS(R,G,B,A); |
| 59 | *x = R; |
| 60 | *y = G; |
| 61 | *z = B; |
| 62 | *w = A; |
| 63 | } |
| 64 | |
| 65 | inline void Sk4x4f::transpose(float fs[16]) const { |
| 66 | Sk4f x,y,z,w; |
| 67 | this->transpose(&x,&y,&z,&w); |
| 68 | x.store(fs+ 0); |
| 69 | y.store(fs+ 4); |
| 70 | z.store(fs+ 8); |
| 71 | w.store(fs+12); |
| 72 | } |
| 73 | |
| 74 | inline void Sk4x4f::transpose(uint8_t bs[16]) const { |
mtklein | 1443c69 | 2016-03-23 09:52:13 -0700 | [diff] [blame^] | 75 | auto R = _mm_cvttps_epi32(r.fVec), |
| 76 | G = _mm_slli_epi32(_mm_cvttps_epi32(g.fVec), 8), |
| 77 | B = _mm_slli_epi32(_mm_cvttps_epi32(b.fVec), 16), |
| 78 | A = _mm_slli_epi32(_mm_cvttps_epi32(a.fVec), 24); |
| 79 | _mm_storeu_si128((__m128i*)bs, _mm_or_si128(A, _mm_or_si128(B, _mm_or_si128(G, R)))); |
mtklein | 0e05f38 | 2016-03-22 17:17:44 -0700 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | #else |
| 83 | |
| 84 | inline Sk4x4f Sk4x4f::Transpose(const Sk4f& x, const Sk4f& y, const Sk4f& z, const Sk4f& w) { |
| 85 | return { |
| 86 | { x[0], y[0], z[0], w[0] }, |
| 87 | { x[1], y[1], z[1], w[1] }, |
| 88 | { x[2], y[2], z[2], w[2] }, |
| 89 | { x[3], y[3], z[3], w[3] }, |
| 90 | }; |
| 91 | } |
| 92 | |
| 93 | inline Sk4x4f Sk4x4f::Transpose(const float fs[16]) { |
| 94 | return Transpose(Sk4f::Load(fs+0), Sk4f::Load(fs+4), Sk4f::Load(fs+8), Sk4f::Load(fs+12)); |
| 95 | } |
| 96 | |
| 97 | inline Sk4x4f Sk4x4f::Transpose(const uint8_t bs[16]) { |
| 98 | return { |
| 99 | { (float)bs[0], (float)bs[4], (float)bs[ 8], (float)bs[12] }, |
| 100 | { (float)bs[1], (float)bs[5], (float)bs[ 9], (float)bs[13] }, |
| 101 | { (float)bs[2], (float)bs[6], (float)bs[10], (float)bs[14] }, |
| 102 | { (float)bs[3], (float)bs[7], (float)bs[11], (float)bs[15] }, |
| 103 | }; |
| 104 | } |
| 105 | |
| 106 | inline void Sk4x4f::transpose(Sk4f* x, Sk4f* y, Sk4f* z, Sk4f* w) const { |
| 107 | *x = { r[0], g[0], b[0], a[0] }; |
| 108 | *y = { r[1], g[1], b[1], a[1] }; |
| 109 | *z = { r[2], g[2], b[2], a[2] }; |
| 110 | *w = { r[3], g[3], b[3], a[3] }; |
| 111 | } |
| 112 | |
| 113 | inline void Sk4x4f::transpose(float fs[16]) const { |
| 114 | Sk4f x,y,z,w; |
| 115 | this->transpose(&x,&y,&z,&w); |
| 116 | x.store(fs+ 0); |
| 117 | y.store(fs+ 4); |
| 118 | z.store(fs+ 8); |
| 119 | w.store(fs+12); |
| 120 | } |
| 121 | |
| 122 | inline void Sk4x4f::transpose(uint8_t bs[16]) const { |
| 123 | bs[ 0] = (uint8_t)r[0]; bs[ 1] = (uint8_t)g[0]; bs[ 2] = (uint8_t)b[0]; bs[ 3] = (uint8_t)a[0]; |
| 124 | bs[ 4] = (uint8_t)r[1]; bs[ 5] = (uint8_t)g[1]; bs[ 6] = (uint8_t)b[1]; bs[ 7] = (uint8_t)a[1]; |
| 125 | bs[ 8] = (uint8_t)r[2]; bs[ 9] = (uint8_t)g[2]; bs[10] = (uint8_t)b[2]; bs[11] = (uint8_t)a[2]; |
| 126 | bs[12] = (uint8_t)r[3]; bs[13] = (uint8_t)g[3]; bs[14] = (uint8_t)b[3]; bs[15] = (uint8_t)a[3]; |
| 127 | } |
| 128 | |
| 129 | #endif |
| 130 | |
| 131 | #endif//Sk4x4f_DEFINED |