blob: 6ce6dd56980321bbc7ca679e17966a42219c73ae [file] [log] [blame]
mtkleind2ffd362015-05-12 06:11:21 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc33d6142018-12-12 08:47:54 -05008namespace { // NOLINT(google-build-namespaces)
mtkleinaa999cb2015-05-22 17:18:21 -07009
Mike Klein7dfe6d92018-12-18 14:53:37 -050010inline Sk4px::Wide Sk4px::widen() const {
mtkleind2ffd362015-05-12 06:11:21 -070011 return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()),
12 _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128()));
13}
14
mtklein082e3292015-08-12 11:56:43 -070015inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
Mike Klein7dfe6d92018-12-18 14:53:37 -050016 return this->widen() * Sk4px(other).widen();
mtkleind2ffd362015-05-12 06:11:21 -070017}
18
mtklein082e3292015-08-12 11:56:43 -070019inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
mtkleind2ffd362015-05-12 06:11:21 -070020 Sk4px::Wide r = (*this + other) >> 8;
21 return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
22}
mtklein8a90edc2015-05-13 12:19:42 -070023
mtkleincbf4fba2015-11-17 14:19:52 -080024inline Sk4px Sk4px::Wide::div255() const {
25 // (x + 127) / 255 == ((x+128) * 257)>>16,
26 // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
27 const __m128i _128 = _mm_set1_epi16(128),
28 _257 = _mm_set1_epi16(257);
29 return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
30 _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
31}
32
mtklein8a90edc2015-05-13 12:19:42 -070033// Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
34// These are safe on x86, often with no speed penalty.
35
36#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
mtklein082e3292015-08-12 11:56:43 -070037 inline Sk4px Sk4px::alphas() const {
mtklein8a90edc2015-05-13 12:19:42 -070038 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
39 __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3);
40 return Sk16b(_mm_shuffle_epi8(this->fVec, splat));
41 }
42
mtklein082e3292015-08-12 11:56:43 -070043 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
Mike Klein475c5e92018-08-08 10:23:17 -040044 uint32_t as;
45 memcpy(&as, a, 4);
mtklein8a90edc2015-05-13 12:19:42 -070046 __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
47 return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat));
48 }
49#else
mtklein082e3292015-08-12 11:56:43 -070050 inline Sk4px Sk4px::alphas() const {
mtklein8a90edc2015-05-13 12:19:42 -070051 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
mtkleinf769c1f2015-11-17 12:05:57 -080052 // We exploit that A >= rgb for any premul pixel.
53 __m128i as = fVec; // 3xxx 2xxx 1xxx 0xxx
54 as = _mm_max_epu8(as, _mm_srli_epi32(as, 8)); // 33xx 22xx 11xx 00xx
55 as = _mm_max_epu8(as, _mm_srli_epi32(as, 16)); // 3333 2222 1111 0000
mtklein8a90edc2015-05-13 12:19:42 -070056 return Sk16b(as);
57 }
58
mtklein082e3292015-08-12 11:56:43 -070059 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
Mike Klein475c5e92018-08-08 10:23:17 -040060 __m128i as;
61 memcpy(&as, a, 4); // ____ ____ ____ 3210
62 as = _mm_unpacklo_epi8 (as, as); // ____ ____ 3322 1100
63 as = _mm_unpacklo_epi16(as, as); // 3333 2222 1111 0000
mtklein8a90edc2015-05-13 12:19:42 -070064 return Sk16b(as);
65 }
66#endif
67
mtklein082e3292015-08-12 11:56:43 -070068inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
Mike Klein475c5e92018-08-08 10:23:17 -040069 uint16_t alphas;
70 memcpy(&alphas, a, 2);
71 uint32_t alphas_and_two_zeros = alphas; // Aa -> Aa00
72
73 return Load4Alphas((const SkAlpha*)&alphas_and_two_zeros);
mtklein8a90edc2015-05-13 12:19:42 -070074}
mtklein0135a412015-05-15 10:36:21 -070075
mtklein082e3292015-08-12 11:56:43 -070076} // namespace