mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | inline Sk4px::Sk4px(SkPMColor px) : INHERITED(_mm_set1_epi32(px)) {} |
| 9 | |
| 10 | inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { |
| 11 | return Sk16b(_mm_loadu_si128((const __m128i*)px)); |
| 12 | } |
| 13 | inline Sk4px Sk4px::Load2(const SkPMColor px[2]) { |
| 14 | return Sk16b(_mm_loadl_epi64((const __m128i*)px)); |
| 15 | } |
| 16 | inline Sk4px Sk4px::Load1(const SkPMColor px[1]) { return Sk16b(_mm_cvtsi32_si128(*px)); } |
| 17 | |
| 18 | inline void Sk4px::store4(SkPMColor px[4]) const { _mm_storeu_si128((__m128i*)px, this->fVec); } |
| 19 | inline void Sk4px::store2(SkPMColor px[2]) const { _mm_storel_epi64((__m128i*)px, this->fVec); } |
| 20 | inline void Sk4px::store1(SkPMColor px[1]) const { *px = _mm_cvtsi128_si32(this->fVec); } |
| 21 | |
| 22 | inline Sk4px::Wide Sk4px::widenLo() const { |
| 23 | return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()), |
| 24 | _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128())); |
| 25 | } |
| 26 | |
| 27 | inline Sk4px::Wide Sk4px::widenHi() const { |
| 28 | return Sk16h(_mm_unpacklo_epi8(_mm_setzero_si128(), this->fVec), |
| 29 | _mm_unpackhi_epi8(_mm_setzero_si128(), this->fVec)); |
| 30 | } |
| 31 | |
| 32 | inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const { |
| 33 | return this->widenLo() * Sk4px(other).widenLo(); |
| 34 | } |
| 35 | |
| 36 | inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { |
| 37 | Sk4px::Wide r = (*this + other) >> 8; |
| 38 | return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec)); |
| 39 | } |
mtklein | 8a90edc | 2015-05-13 12:19:42 -0700 | [diff] [blame^] | 40 | |
| 41 | // Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t). |
| 42 | // These are safe on x86, often with no speed penalty. |
| 43 | |
| 44 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 45 | inline Sk4px Sk4px::alphas() const { |
| 46 | static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); |
| 47 | __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3); |
| 48 | return Sk16b(_mm_shuffle_epi8(this->fVec, splat)); |
| 49 | } |
| 50 | |
| 51 | inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { |
| 52 | uint32_t as = *(const uint32_t*)a; |
| 53 | __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0); |
| 54 | return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat)); |
| 55 | } |
| 56 | #else |
| 57 | inline Sk4px Sk4px::alphas() const { |
| 58 | static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); |
| 59 | __m128i as = _mm_srli_epi32(this->fVec, 24); // ___3 ___2 ___1 ___0 |
| 60 | as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 |
| 61 | as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 |
| 62 | return Sk16b(as); |
| 63 | } |
| 64 | |
| 65 | inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { |
| 66 | __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____ 3210 |
| 67 | as = _mm_unpacklo_epi8 (as, _mm_setzero_si128()); // ____ ____ _3_2 _1_0 |
| 68 | as = _mm_unpacklo_epi16(as, _mm_setzero_si128()); // ___3 ___2 ___1 ___0 |
| 69 | as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 |
| 70 | as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 |
| 71 | return Sk16b(as); |
| 72 | } |
| 73 | #endif |
| 74 | |
| 75 | inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) { |
| 76 | uint32_t as = *(const uint16_t*)a; // Aa -> Aa00 |
| 77 | return Load4Alphas((const SkAlpha*)&as); |
| 78 | } |