blob: feb1d98f8df3c894b912ea7279c15c12b6e51995 [file] [log] [blame]
commit-bot@chromium.org47591072014-02-19 03:09:52 +00001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkColor_opts_SSE2_DEFINED
9#define SkColor_opts_SSE2_DEFINED
10
11#include <emmintrin.h>
12
qiankun.miaof04713d2014-11-14 08:23:53 -080013#define ASSERT_EQ(a,b) SkASSERT(0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8((a), (b))))
14
commit-bot@chromium.org7bf10152014-04-25 02:08:31 +000015// Because no _mm_mul_epi32() in SSE2, we emulate it here.
16// Multiplies 4 32-bit integers from a by 4 32-bit intergers from b.
17// The 4 multiplication results should be represented within 32-bit
18// integers, otherwise they would be overflow.
19static inline __m128i Multiply32_SSE2(const __m128i& a, const __m128i& b) {
20 // Calculate results of a0 * b0 and a2 * b2.
21 __m128i r1 = _mm_mul_epu32(a, b);
22 // Calculate results of a1 * b1 and a3 * b3.
23 __m128i r2 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4));
24 // Shuffle results to [63..0] and interleave the results.
25 __m128i r = _mm_unpacklo_epi32(_mm_shuffle_epi32(r1, _MM_SHUFFLE(0,0,2,0)),
26 _mm_shuffle_epi32(r2, _MM_SHUFFLE(0,0,2,0)));
27 return r;
28}
29
commit-bot@chromium.org54299652014-04-14 14:54:22 +000030static inline __m128i SkAlpha255To256_SSE2(const __m128i& alpha) {
31 return _mm_add_epi32(alpha, _mm_set1_epi32(1));
32}
33
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +000034// See #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b) in SkXfermode.cpp.
35static inline __m128i SkAlphaMulAlpha_SSE2(const __m128i& a,
36 const __m128i& b) {
37 __m128i prod = _mm_mullo_epi16(a, b);
38 prod = _mm_add_epi32(prod, _mm_set1_epi32(128));
39 prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8));
40 prod = _mm_srli_epi32(prod, 8);
41
42 return prod;
43}
44
commit-bot@chromium.org54299652014-04-14 14:54:22 +000045// Portable version SkAlphaMulQ is in SkColorPriv.h.
46static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const __m128i& scale) {
mtklein785982e2014-11-25 10:05:43 -080047 const __m128i mask = _mm_set1_epi32(0xFF00FF);
commit-bot@chromium.org54299652014-04-14 14:54:22 +000048 __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale);
49
50 // uint32_t rb = ((c & mask) * scale) >> 8
mtklein785982e2014-11-25 10:05:43 -080051 __m128i rb = _mm_and_si128(mask, c);
commit-bot@chromium.org54299652014-04-14 14:54:22 +000052 rb = _mm_mullo_epi16(rb, s);
53 rb = _mm_srli_epi16(rb, 8);
54
55 // uint32_t ag = ((c >> 8) & mask) * scale
56 __m128i ag = _mm_srli_epi16(c, 8);
mtklein785982e2014-11-25 10:05:43 -080057 ASSERT_EQ(ag, _mm_and_si128(mask, ag)); // ag = _mm_srli_epi16(c, 8) did this for us.
commit-bot@chromium.org54299652014-04-14 14:54:22 +000058 ag = _mm_mullo_epi16(ag, s);
59
60 // (rb & mask) | (ag & ~mask)
mtklein785982e2014-11-25 10:05:43 -080061 ASSERT_EQ(rb, _mm_and_si128(mask, rb)); // rb = _mm_srli_epi16(rb, 8) did this for us.
62 ag = _mm_andnot_si128(mask, ag);
qiankun.miao533a3272014-11-25 05:59:38 -080063 return _mm_or_si128(rb, ag);
64}
65
66// Fast path for SkAlphaMulQ_SSE2 with a constant scale factor.
67static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const unsigned scale) {
mtklein785982e2014-11-25 10:05:43 -080068 const __m128i mask = _mm_set1_epi32(0xFF00FF);
qiankun.miao533a3272014-11-25 05:59:38 -080069 __m128i s = _mm_set1_epi16(scale << 8); // Move scale factor to upper byte of word.
70
71 // With mulhi, red and blue values are already in the right place and
72 // don't need to be divided by 256.
mtklein785982e2014-11-25 10:05:43 -080073 __m128i rb = _mm_and_si128(mask, c);
qiankun.miao533a3272014-11-25 05:59:38 -080074 rb = _mm_mulhi_epu16(rb, s);
75
mtklein785982e2014-11-25 10:05:43 -080076 __m128i ag = _mm_andnot_si128(mask, c);
qiankun.miao533a3272014-11-25 05:59:38 -080077 ag = _mm_mulhi_epu16(ag, s); // Alpha and green values are in the higher byte of each word.
mtklein785982e2014-11-25 10:05:43 -080078 ag = _mm_andnot_si128(mask, ag);
qiankun.miao533a3272014-11-25 05:59:38 -080079
commit-bot@chromium.org54299652014-04-14 14:54:22 +000080 return _mm_or_si128(rb, ag);
81}
82
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +000083static inline __m128i SkGetPackedA32_SSE2(const __m128i& src) {
mtklein8d029a42015-01-26 07:07:03 -080084#if SK_A32_SHIFT == 24 // It's very common (universal?) that alpha is the top byte.
85 return _mm_srli_epi32(src, 24); // You'd hope the compiler would remove the left shift then,
86#else // but I've seen Clang just do a dumb left shift of zero. :(
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +000087 __m128i a = _mm_slli_epi32(src, (24 - SK_A32_SHIFT));
88 return _mm_srli_epi32(a, 24);
mtklein8d029a42015-01-26 07:07:03 -080089#endif
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +000090}
91
92static inline __m128i SkGetPackedR32_SSE2(const __m128i& src) {
93 __m128i r = _mm_slli_epi32(src, (24 - SK_R32_SHIFT));
94 return _mm_srli_epi32(r, 24);
95}
96
97static inline __m128i SkGetPackedG32_SSE2(const __m128i& src) {
98 __m128i g = _mm_slli_epi32(src, (24 - SK_G32_SHIFT));
99 return _mm_srli_epi32(g, 24);
100}
101
102static inline __m128i SkGetPackedB32_SSE2(const __m128i& src) {
103 __m128i b = _mm_slli_epi32(src, (24 - SK_B32_SHIFT));
104 return _mm_srli_epi32(b, 24);
105}
106
107static inline __m128i SkMul16ShiftRound_SSE2(const __m128i& a,
108 const __m128i& b, int shift) {
commit-bot@chromium.org47591072014-02-19 03:09:52 +0000109 __m128i prod = _mm_mullo_epi16(a, b);
110 prod = _mm_add_epi16(prod, _mm_set1_epi16(1 << (shift - 1)));
111 prod = _mm_add_epi16(prod, _mm_srli_epi16(prod, shift));
112 prod = _mm_srli_epi16(prod, shift);
113
114 return prod;
115}
116
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +0000117static inline __m128i SkPackRGB16_SSE2(const __m128i& r,
118 const __m128i& g, const __m128i& b) {
119 __m128i dr = _mm_slli_epi16(r, SK_R16_SHIFT);
120 __m128i dg = _mm_slli_epi16(g, SK_G16_SHIFT);
121 __m128i db = _mm_slli_epi16(b, SK_B16_SHIFT);
commit-bot@chromium.org47591072014-02-19 03:09:52 +0000122
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +0000123 __m128i c = _mm_or_si128(dr, dg);
124 return _mm_or_si128(c, db);
commit-bot@chromium.org47591072014-02-19 03:09:52 +0000125}
126
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +0000127static inline __m128i SkPackARGB32_SSE2(const __m128i& a, const __m128i& r,
128 const __m128i& g, const __m128i& b) {
129 __m128i da = _mm_slli_epi32(a, SK_A32_SHIFT);
130 __m128i dr = _mm_slli_epi32(r, SK_R32_SHIFT);
131 __m128i dg = _mm_slli_epi32(g, SK_G32_SHIFT);
132 __m128i db = _mm_slli_epi32(b, SK_B32_SHIFT);
133
134 __m128i c = _mm_or_si128(da, dr);
135 c = _mm_or_si128(c, dg);
136 return _mm_or_si128(c, db);
137}
138
139static inline __m128i SkPacked16ToR32_SSE2(const __m128i& src) {
140 __m128i r = _mm_srli_epi32(src, SK_R16_SHIFT);
141 r = _mm_and_si128(r, _mm_set1_epi32(SK_R16_MASK));
142 r = _mm_or_si128(_mm_slli_epi32(r, (8 - SK_R16_BITS)),
143 _mm_srli_epi32(r, (2 * SK_R16_BITS - 8)));
144
145 return r;
146}
147
148static inline __m128i SkPacked16ToG32_SSE2(const __m128i& src) {
149 __m128i g = _mm_srli_epi32(src, SK_G16_SHIFT);
150 g = _mm_and_si128(g, _mm_set1_epi32(SK_G16_MASK));
151 g = _mm_or_si128(_mm_slli_epi32(g, (8 - SK_G16_BITS)),
152 _mm_srli_epi32(g, (2 * SK_G16_BITS - 8)));
153
154 return g;
155}
156
157static inline __m128i SkPacked16ToB32_SSE2(const __m128i& src) {
158 __m128i b = _mm_srli_epi32(src, SK_B16_SHIFT);
159 b = _mm_and_si128(b, _mm_set1_epi32(SK_B16_MASK));
160 b = _mm_or_si128(_mm_slli_epi32(b, (8 - SK_B16_BITS)),
161 _mm_srli_epi32(b, (2 * SK_B16_BITS - 8)));
162
163 return b;
164}
165
166static inline __m128i SkPixel16ToPixel32_SSE2(const __m128i& src) {
167 __m128i r = SkPacked16ToR32_SSE2(src);
168 __m128i g = SkPacked16ToG32_SSE2(src);
169 __m128i b = SkPacked16ToB32_SSE2(src);
170
171 return SkPackARGB32_SSE2(_mm_set1_epi32(0xFF), r, g, b);
172}
173
174static inline __m128i SkPixel32ToPixel16_ToU16_SSE2(const __m128i& src_pixel1,
175 const __m128i& src_pixel2) {
176 // Calculate result r.
177 __m128i r1 = _mm_srli_epi32(src_pixel1,
178 SK_R32_SHIFT + (8 - SK_R16_BITS));
179 r1 = _mm_and_si128(r1, _mm_set1_epi32(SK_R16_MASK));
180 __m128i r2 = _mm_srli_epi32(src_pixel2,
181 SK_R32_SHIFT + (8 - SK_R16_BITS));
182 r2 = _mm_and_si128(r2, _mm_set1_epi32(SK_R16_MASK));
183 __m128i r = _mm_packs_epi32(r1, r2);
184
185 // Calculate result g.
186 __m128i g1 = _mm_srli_epi32(src_pixel1,
187 SK_G32_SHIFT + (8 - SK_G16_BITS));
188 g1 = _mm_and_si128(g1, _mm_set1_epi32(SK_G16_MASK));
189 __m128i g2 = _mm_srli_epi32(src_pixel2,
190 SK_G32_SHIFT + (8 - SK_G16_BITS));
191 g2 = _mm_and_si128(g2, _mm_set1_epi32(SK_G16_MASK));
192 __m128i g = _mm_packs_epi32(g1, g2);
193
194 // Calculate result b.
195 __m128i b1 = _mm_srli_epi32(src_pixel1,
196 SK_B32_SHIFT + (8 - SK_B16_BITS));
197 b1 = _mm_and_si128(b1, _mm_set1_epi32(SK_B16_MASK));
198 __m128i b2 = _mm_srli_epi32(src_pixel2,
199 SK_B32_SHIFT + (8 - SK_B16_BITS));
200 b2 = _mm_and_si128(b2, _mm_set1_epi32(SK_B16_MASK));
201 __m128i b = _mm_packs_epi32(b1, b2);
202
203 // Store 8 16-bit colors in dst.
204 __m128i d_pixel = SkPackRGB16_SSE2(r, g, b);
205
206 return d_pixel;
207}
208
stephana4bf1ce22015-02-02 10:02:48 -0800209// Portable version is SkPMSrcOver in SkColorPriv.h.
210static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) {
211 return _mm_add_epi32(src,
212 SkAlphaMulQ_SSE2(dst, _mm_sub_epi32(_mm_set1_epi32(256),
213 SkGetPackedA32_SSE2(src))));
214}
215
216// Portable version is SkBlendARGB32 in SkColorPriv.h.
qiankun.miao2253aa92014-11-25 06:35:02 -0800217static inline __m128i SkBlendARGB32_SSE2(const __m128i& src, const __m128i& dst,
218 const __m128i& aa) {
219 __m128i src_scale = SkAlpha255To256_SSE2(aa);
220 // SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale))
221 __m128i dst_scale = SkGetPackedA32_SSE2(src);
222 dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
223 dst_scale = _mm_srli_epi16(dst_scale, 8);
224 dst_scale = _mm_sub_epi32(_mm_set1_epi32(256), dst_scale);
225
226 __m128i result = SkAlphaMulQ_SSE2(src, src_scale);
227 return _mm_add_epi8(result, SkAlphaMulQ_SSE2(dst, dst_scale));
228}
229
230// Fast path for SkBlendARGB32_SSE2 with a constant alpha factor.
231static inline __m128i SkBlendARGB32_SSE2(const __m128i& src, const __m128i& dst,
232 const unsigned aa) {
233 unsigned alpha = SkAlpha255To256(aa);
234 __m128i src_scale = _mm_set1_epi32(alpha);
235 // SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale))
236 __m128i dst_scale = SkGetPackedA32_SSE2(src);
237 dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
238 dst_scale = _mm_srli_epi16(dst_scale, 8);
239 dst_scale = _mm_sub_epi32(_mm_set1_epi32(256), dst_scale);
240
241 __m128i result = SkAlphaMulQ_SSE2(src, alpha);
242 return _mm_add_epi8(result, SkAlphaMulQ_SSE2(dst, dst_scale));
243}
244
qiankun.miaof04713d2014-11-14 08:23:53 -0800245#undef ASSERT_EQ
commit-bot@chromium.orgc524e982014-04-09 15:43:46 +0000246#endif // SkColor_opts_SSE2_DEFINED