blob: 1f0477378e35534c4b4389898f5ab86e54d8867d [file] [log] [blame]
tomhudson@google.com95ad1552012-02-14 18:28:54 +00001/*
2 * Copyright 2012 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
tomhudson@google.com95ad1552012-02-14 18:28:54 +00008#include "SkBitmapProcState_opts_SSSE3.h"
reed@google.com9cfc83c2013-07-22 17:18:18 +00009#include "SkPaint.h"
tomhudson@google.com95ad1552012-02-14 18:28:54 +000010#include "SkUtils.h"
11
commit-bot@chromium.org32d58e62014-02-21 22:35:45 +000012#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
13#include <tmmintrin.h> // SSSE3
14
tomhudson@google.com95ad1552012-02-14 18:28:54 +000015// adding anonymous namespace seemed to force gcc to inline directly the
16// instantiation, instead of creating the functions
17// S32_generic_D32_filter_DX_SSSE3<true> and
18// S32_generic_D32_filter_DX_SSSE3<false> which were then called by the
19// external functions.
20namespace {
21// In this file, variations for alpha and non alpha versions are implemented
22// with a template, as it makes the code more compact and a bit easier to
23// maintain, while making the compiler generate the same exact code as with
24// two functions that only differ by a few lines.
25
26
27// Prepare all necessary constants for a round of processing for two pixel
28// pairs.
29// @param xy is the location where the xy parameters for four pixels should be
30// read from. It is identical in concept with argument two of
31// S32_{opaque}_D32_filter_DX methods.
32// @param mask_3FFF vector of 32 bit constants containing 3FFF,
33// suitable to mask the bottom 14 bits of a XY value.
34// @param mask_000F vector of 32 bit constants containing 000F,
35// suitable to mask the bottom 4 bits of a XY value.
36// @param sixteen_8bit vector of 8 bit components containing the value 16.
37// @param mask_dist_select vector of 8 bit components containing the shuffling
38// parameters to reorder x[0-3] parameters.
39// @param all_x_result vector of 8 bit components that will contain the
40// (4x(x3), 4x(x2), 4x(x1), 4x(x0)) upon return.
41// @param sixteen_minus_x vector of 8 bit components, containing
42// (4x(16 - x3), 4x(16 - x2), 4x(16 - x1), 4x(16 - x0))
43inline void PrepareConstantsTwoPixelPairs(const uint32_t* xy,
tomhudson@google.com4ef14f82012-02-14 19:42:39 +000044 const __m128i& mask_3FFF,
45 const __m128i& mask_000F,
46 const __m128i& sixteen_8bit,
47 const __m128i& mask_dist_select,
tomhudson@google.com95ad1552012-02-14 18:28:54 +000048 __m128i* all_x_result,
49 __m128i* sixteen_minus_x,
50 int* x0,
51 int* x1) {
52 const __m128i xx = _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
53
54 // 4 delta X
55 // (x03, x02, x01, x00)
56 const __m128i x0_wide = _mm_srli_epi32(xx, 18);
57 // (x13, x12, x11, x10)
58 const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF);
59
60 _mm_storeu_si128(reinterpret_cast<__m128i *>(x0), x0_wide);
61 _mm_storeu_si128(reinterpret_cast<__m128i *>(x1), x1_wide);
62
63 __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F);
64
65 // (4x(x3), 4x(x2), 4x(x1), 4x(x0))
66 all_x = _mm_shuffle_epi8(all_x, mask_dist_select);
67
68 *all_x_result = all_x;
69 // (4x(16-x3), 4x(16-x2), 4x(16-x1), 4x(16-x0))
70 *sixteen_minus_x = _mm_sub_epi8(sixteen_8bit, all_x);
71}
72
tomhudson@google.comae29b882012-03-06 14:59:04 +000073// Prepare all necessary constants for a round of processing for two pixel
74// pairs.
75// @param xy is the location where the xy parameters for four pixels should be
76// read from. It is identical in concept with argument two of
77// S32_{opaque}_D32_filter_DXDY methods.
78// @param mask_3FFF vector of 32 bit constants containing 3FFF,
79// suitable to mask the bottom 14 bits of a XY value.
80// @param mask_000F vector of 32 bit constants containing 000F,
81// suitable to mask the bottom 4 bits of a XY value.
82// @param sixteen_8bit vector of 8 bit components containing the value 16.
83// @param mask_dist_select vector of 8 bit components containing the shuffling
84// parameters to reorder x[0-3] parameters.
85// @param all_xy_result vector of 8 bit components that will contain the
86// (4x(y1), 4x(y0), 4x(x1), 4x(x0)) upon return.
87// @param sixteen_minus_x vector of 8 bit components, containing
88// (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)).
89inline void PrepareConstantsTwoPixelPairsDXDY(const uint32_t* xy,
90 const __m128i& mask_3FFF,
91 const __m128i& mask_000F,
92 const __m128i& sixteen_8bit,
93 const __m128i& mask_dist_select,
94 __m128i* all_xy_result,
95 __m128i* sixteen_minus_xy,
96 int* xy0, int* xy1) {
rmistry@google.comfbfcd562012-08-23 18:09:54 +000097 const __m128i xy_wide =
tomhudson@google.comae29b882012-03-06 14:59:04 +000098 _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
99
100 // (x10, y10, x00, y00)
101 __m128i xy0_wide = _mm_srli_epi32(xy_wide, 18);
102 // (y10, y00, x10, x00)
103 xy0_wide = _mm_shuffle_epi32(xy0_wide, _MM_SHUFFLE(2, 0, 3, 1));
104 // (x11, y11, x01, y01)
105 __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF);
106 // (y11, y01, x11, x01)
107 xy1_wide = _mm_shuffle_epi32(xy1_wide, _MM_SHUFFLE(2, 0, 3, 1));
108
109 _mm_storeu_si128(reinterpret_cast<__m128i *>(xy0), xy0_wide);
110 _mm_storeu_si128(reinterpret_cast<__m128i *>(xy1), xy1_wide);
111
112 // (x1, y1, x0, y0)
113 __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F);
114 // (y1, y0, x1, x0)
115 all_xy = _mm_shuffle_epi32(all_xy, _MM_SHUFFLE(2, 0, 3, 1));
116 // (4x(y1), 4x(y0), 4x(x1), 4x(x0))
117 all_xy = _mm_shuffle_epi8(all_xy, mask_dist_select);
118
119 *all_xy_result = all_xy;
120 // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0))
121 *sixteen_minus_xy = _mm_sub_epi8(sixteen_8bit, all_xy);
122}
123
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000124// Helper function used when processing one pixel pair.
125// @param pixel0..3 are the four input pixels
126// @param scale_x vector of 8 bit components to multiply the pixel[0:3]. This
127// will contain (4x(x1, 16-x1), 4x(x0, 16-x0))
128// or (4x(x3, 16-x3), 4x(x2, 16-x2))
129// @return a vector of 16 bit components containing:
130// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
131inline __m128i ProcessPixelPairHelper(uint32_t pixel0,
132 uint32_t pixel1,
133 uint32_t pixel2,
134 uint32_t pixel3,
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000135 const __m128i& scale_x) {
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000136 __m128i a0, a1, a2, a3;
137 // Load 2 pairs of pixels
138 a0 = _mm_cvtsi32_si128(pixel0);
139 a1 = _mm_cvtsi32_si128(pixel1);
140
141 // Interleave pixels.
142 // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
143 a0 = _mm_unpacklo_epi8(a0, a1);
144
145 a2 = _mm_cvtsi32_si128(pixel2);
146 a3 = _mm_cvtsi32_si128(pixel3);
147 // (0, 0, 0, 0, 0, 0, 0, 0, Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2)
148 a2 = _mm_unpacklo_epi8(a2, a3);
149
150 // two pairs of pixel pairs, interleaved.
151 // (Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2,
152 // Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
153 a0 = _mm_unpacklo_epi64(a0, a2);
154
155 // multiply and sum to 16 bit components.
156 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
157 // At that point, we use up a bit less than 12 bits for each 16 bit
158 // component:
159 // All components are less than 255. So,
160 // C0 * (16 - x) + C1 * x <= 255 * (16 - x) + 255 * x = 255 * 16.
161 return _mm_maddubs_epi16(a0, scale_x);
162}
163
164// Scale back the results after multiplications to the [0:255] range, and scale
165// by alpha when has_alpha is true.
166// Depending on whether one set or two sets of multiplications had been applied,
167// the results have to be shifted by four places (dividing by 16), or shifted
168// by eight places (dividing by 256), since each multiplication is by a quantity
169// in the range [0:16].
170template<bool has_alpha, int scale>
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000171inline __m128i ScaleFourPixels(__m128i* pixels,
172 const __m128i& alpha) {
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000173 // Divide each 16 bit component by 16 (or 256 depending on scale).
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000174 *pixels = _mm_srli_epi16(*pixels, scale);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000175
176 if (has_alpha) {
177 // Multiply by alpha.
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000178 *pixels = _mm_mullo_epi16(*pixels, alpha);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000179
180 // Divide each 16 bit component by 256.
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000181 *pixels = _mm_srli_epi16(*pixels, 8);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000182 }
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000183 return *pixels;
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000184}
185
186// Wrapper to calculate two output pixels from four input pixels. The
187// arguments are the same as ProcessPixelPairHelper. Technically, there are
188// eight input pixels, but since sub_y == 0, the factors applied to half of the
189// pixels is zero (sub_y), and are therefore omitted here to save on some
190// processing.
191// @param alpha when has_alpha is true, scale all resulting components by this
192// value.
193// @return a vector of 16 bit components containing:
194// ((Aa2 * (16 - x1) + Aa3 * x1) * alpha, ...,
195// (Ra0 * (16 - x0) + Ra1 * x0) * alpha) (when has_alpha is true)
196// otherwise
197// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
198// In both cases, the results are renormalized (divided by 16) to match the
199// expected formats when storing back the results into memory.
200template<bool has_alpha>
201inline __m128i ProcessPixelPairZeroSubY(uint32_t pixel0,
202 uint32_t pixel1,
203 uint32_t pixel2,
204 uint32_t pixel3,
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000205 const __m128i& scale_x,
206 const __m128i& alpha) {
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000207 __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
208 scale_x);
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000209 return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000210}
211
212// Same as ProcessPixelPairZeroSubY, expect processing one output pixel at a
213// time instead of two. As in the above function, only two pixels are needed
214// to generate a single pixel since sub_y == 0.
215// @return same as ProcessPixelPairZeroSubY, except that only the bottom 4
216// 16 bit components are set.
217template<bool has_alpha>
218inline __m128i ProcessOnePixelZeroSubY(uint32_t pixel0,
219 uint32_t pixel1,
220 __m128i scale_x,
221 __m128i alpha) {
222 __m128i a0 = _mm_cvtsi32_si128(pixel0);
223 __m128i a1 = _mm_cvtsi32_si128(pixel1);
224
225 // Interleave
226 a0 = _mm_unpacklo_epi8(a0, a1);
227
228 // (a0 * (16-x) + a1 * x)
229 __m128i sum = _mm_maddubs_epi16(a0, scale_x);
230
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000231 return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000232}
233
234// Methods when sub_y != 0
235
236
237// Same as ProcessPixelPairHelper, except that the values are scaled by y.
238// @param y vector of 16 bit components containing 'y' values. There are two
239// cases in practice, where y will contain the sub_y constant, or will
240// contain the 16 - sub_y constant.
241// @return vector of 16 bit components containing:
242// (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , y * (Ra0 * (16 - x0) + Ra1 * x0))
243inline __m128i ProcessPixelPair(uint32_t pixel0,
244 uint32_t pixel1,
245 uint32_t pixel2,
246 uint32_t pixel3,
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000247 const __m128i& scale_x,
248 const __m128i& y) {
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000249 __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
250 scale_x);
251
252 // first row times 16-y or y depending on whether 'y' represents one or
253 // the other.
254 // Values will be up to 255 * 16 * 16 = 65280.
255 // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... ,
256 // y * (Ra0 * (16 - x0) + Ra1 * x0))
257 sum = _mm_mullo_epi16(sum, y);
258
259 return sum;
260}
261
262// Process two pixel pairs out of eight input pixels.
263// In other methods, the distinct pixels are passed one by one, but in this
264// case, the rows, and index offsets to the pixels into the row are passed
265// to generate the 8 pixels.
266// @param row0..1 top and bottom row where to find input pixels.
267// @param x0..1 offsets into the row for all eight input pixels.
268// @param all_y vector of 16 bit components containing the constant sub_y
269// @param neg_y vector of 16 bit components containing the constant 16 - sub_y
270// @param alpha vector of 16 bit components containing the alpha value to scale
271// the results by, when has_alpha is true.
272// @return
273// (alpha * ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
274// y * (Aa2' * (16-x1) + Aa3' * x1)),
275// ...
276// alpha * ((16-y) * (Ra0 * (16-x0) + Ra1 * x0) +
277// y * (Ra0' * (16-x0) + Ra1' * x0))
278// With the factor alpha removed when has_alpha is false.
279// The values are scaled back to 16 bit components, but with only the bottom
280// 8 bits being set.
281template<bool has_alpha>
282inline __m128i ProcessTwoPixelPairs(const uint32_t* row0,
283 const uint32_t* row1,
284 const int* x0,
285 const int* x1,
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000286 const __m128i& scale_x,
287 const __m128i& all_y,
288 const __m128i& neg_y,
289 const __m128i& alpha) {
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000290 __m128i sum0 = ProcessPixelPair(
291 row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
292 scale_x, neg_y);
293 __m128i sum1 = ProcessPixelPair(
294 row1[x0[0]], row1[x1[0]], row1[x0[1]], row1[x1[1]],
295 scale_x, all_y);
296
297 // 2 samples fully summed.
298 // ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
299 // y * (Aa2' * (16-x1) + Aa3' * x1),
300 // ...
301 // (16-y) * (Ra0 * (16 - x0) + Ra1 * x0)) +
302 // y * (Ra0' * (16-x0) + Ra1' * x0))
303 // Each component, again can be at most 256 * 255 = 65280, so no overflow.
304 sum0 = _mm_add_epi16(sum0, sum1);
305
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000306 return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000307}
308
tomhudson@google.comae29b882012-03-06 14:59:04 +0000309// Similar to ProcessTwoPixelPairs except the pixel indexes.
310template<bool has_alpha>
311inline __m128i ProcessTwoPixelPairsDXDY(const uint32_t* row00,
312 const uint32_t* row01,
313 const uint32_t* row10,
314 const uint32_t* row11,
315 const int* xy0,
316 const int* xy1,
317 const __m128i& scale_x,
318 const __m128i& all_y,
319 const __m128i& neg_y,
320 const __m128i& alpha) {
321 // first row
322 __m128i sum0 = ProcessPixelPair(
323 row00[xy0[0]], row00[xy1[0]], row10[xy0[1]], row10[xy1[1]],
324 scale_x, neg_y);
325 // second row
326 __m128i sum1 = ProcessPixelPair(
327 row01[xy0[0]], row01[xy1[0]], row11[xy0[1]], row11[xy1[1]],
328 scale_x, all_y);
329
330 // 2 samples fully summed.
331 // ((16-y1) * (Aa2 * (16-x1) + Aa3 * x1) +
332 // y0 * (Aa2' * (16-x1) + Aa3' * x1),
333 // ...
334 // (16-y0) * (Ra0 * (16 - x0) + Ra1 * x0)) +
335 // y0 * (Ra0' * (16-x0) + Ra1' * x0))
336 // Each component, again can be at most 256 * 255 = 65280, so no overflow.
337 sum0 = _mm_add_epi16(sum0, sum1);
338
339 return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
340}
341
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000342
343// Same as ProcessPixelPair, except that performing the math one output pixel
344// at a time. This means that only the bottom four 16 bit components are set.
345inline __m128i ProcessOnePixel(uint32_t pixel0, uint32_t pixel1,
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000346 const __m128i& scale_x, const __m128i& y) {
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000347 __m128i a0 = _mm_cvtsi32_si128(pixel0);
348 __m128i a1 = _mm_cvtsi32_si128(pixel1);
349
350 // Interleave
351 // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
352 a0 = _mm_unpacklo_epi8(a0, a1);
353
354 // (a0 * (16-x) + a1 * x)
355 a0 = _mm_maddubs_epi16(a0, scale_x);
356
357 // scale row by y
358 return _mm_mullo_epi16(a0, y);
359}
360
361// Notes about the various tricks that are used in this implementation:
362// - specialization for sub_y == 0.
363// Statistically, 1/16th of the samples will have sub_y == 0. When this
364// happens, the math goes from:
365// (16 - x)*(16 - y)*a00 + x*(16 - y)*a01 + (16 - x)*y*a10 + x*y*a11
366// to:
367// (16 - x)*a00 + 16*x*a01
368// much simpler. The simplification makes for an easy boost in performance.
369// - calculating 4 output pixels at a time.
370// This allows loading the coefficients x0 and x1 and shuffling them to the
371// optimum location only once per loop, instead of twice per loop.
372// This also allows us to store the four pixels with a single store.
373// - Use of 2 special SSSE3 instructions (comparatively to the SSE2 instruction
374// version):
375// _mm_shuffle_epi8 : this allows us to spread the coefficients x[0-3] loaded
376// in 32 bit values to 8 bit values repeated four times.
377// _mm_maddubs_epi16 : this allows us to perform multiplications and additions
378// in one swoop of 8bit values storing the results in 16 bit values. This
379// instruction is actually crucial for the speed of the implementation since
380// as one can see in the SSE2 implementation, all inputs have to be used as
381// 16 bits because the results are 16 bits. This basically allows us to process
382// twice as many pixel components per iteration.
383//
384// As a result, this method behaves faster than the traditional SSE2. The actual
385// boost varies greatly on the underlying architecture.
386template<bool has_alpha>
387void S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
388 const uint32_t* xy,
389 int count, uint32_t* colors) {
390 SkASSERT(count > 0 && colors != NULL);
reed@google.com9cfc83c2013-07-22 17:18:18 +0000391 SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000392 SkASSERT(s.fBitmap->config() == SkBitmap::kARGB_8888_Config);
393 if (has_alpha) {
394 SkASSERT(s.fAlphaScale < 256);
395 } else {
396 SkASSERT(s.fAlphaScale == 256);
397 }
398
399 const uint8_t* src_addr =
400 static_cast<const uint8_t*>(s.fBitmap->getPixels());
scroggo@google.come5f48242013-02-25 21:47:41 +0000401 const size_t rb = s.fBitmap->rowBytes();
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000402 const uint32_t XY = *xy++;
403 const unsigned y0 = XY >> 14;
404 const uint32_t* row0 =
405 reinterpret_cast<const uint32_t*>(src_addr + (y0 >> 4) * rb);
406 const uint32_t* row1 =
407 reinterpret_cast<const uint32_t*>(src_addr + (XY & 0x3FFF) * rb);
408 const unsigned sub_y = y0 & 0xF;
409
410 // vector constants
411 const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
412 8, 8, 8, 8,
413 4, 4, 4, 4,
414 0, 0, 0, 0);
415 const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
416 const __m128i mask_000F = _mm_set1_epi32(0x000F);
417 const __m128i sixteen_8bit = _mm_set1_epi8(16);
418 // (0, 0, 0, 0, 0, 0, 0, 0)
419 const __m128i zero = _mm_setzero_si128();
420
tomhudson@google.com8afae612012-08-14 15:03:35 +0000421 __m128i alpha = _mm_setzero_si128();
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000422 if (has_alpha)
423 // 8x(alpha)
424 alpha = _mm_set1_epi16(s.fAlphaScale);
425
426 if (sub_y == 0) {
427 // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
428 while (count > 3) {
429 count -= 4;
430
431 int x0[4];
432 int x1[4];
433 __m128i all_x, sixteen_minus_x;
434 PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
435 sixteen_8bit, mask_dist_select,
436 &all_x, &sixteen_minus_x, x0, x1);
437 xy += 4;
438
439 // First pair of pixel pairs.
440 // (4x(x1, 16-x1), 4x(x0, 16-x0))
441 __m128i scale_x;
442 scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
443
444 __m128i sum0 = ProcessPixelPairZeroSubY<has_alpha>(
445 row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
446 scale_x, alpha);
447
448 // second pair of pixel pairs
449 // (4x (x3, 16-x3), 4x (16-x2, x2))
450 scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
451
452 __m128i sum1 = ProcessPixelPairZeroSubY<has_alpha>(
453 row0[x0[2]], row0[x1[2]], row0[x0[3]], row0[x1[3]],
454 scale_x, alpha);
455
456 // Pack lower 4 16 bit values of sum into lower 4 bytes.
457 sum0 = _mm_packus_epi16(sum0, sum1);
458
459 // Extract low int and store.
460 _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
461
462 colors += 4;
463 }
464
465 // handle remainder
466 while (count-- > 0) {
467 uint32_t xx = *xy++; // x0:14 | 4 | x1:14
468 unsigned x0 = xx >> 18;
469 unsigned x1 = xx & 0x3FFF;
470
471 // 16x(x)
472 const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
473
474 // (16x(16-x))
475 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
476
477 scale_x = _mm_unpacklo_epi8(scale_x, all_x);
478
479 __m128i sum = ProcessOnePixelZeroSubY<has_alpha>(
480 row0[x0], row0[x1],
481 scale_x, alpha);
482
483 // Pack lower 4 16 bit values of sum into lower 4 bytes.
484 sum = _mm_packus_epi16(sum, zero);
485
486 // Extract low int and store.
487 *colors++ = _mm_cvtsi128_si32(sum);
488 }
489 } else { // more general case, y != 0
490 // 8x(16)
491 const __m128i sixteen_16bit = _mm_set1_epi16(16);
492
493 // 8x (y)
494 const __m128i all_y = _mm_set1_epi16(sub_y);
495
496 // 8x (16-y)
497 const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
498
499 // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
500 while (count > 3) {
501 count -= 4;
502
503 int x0[4];
504 int x1[4];
505 __m128i all_x, sixteen_minus_x;
506 PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
507 sixteen_8bit, mask_dist_select,
508 &all_x, &sixteen_minus_x, x0, x1);
509 xy += 4;
510
511 // First pair of pixel pairs
512 // (4x(x1, 16-x1), 4x(x0, 16-x0))
513 __m128i scale_x;
514 scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
515
516 __m128i sum0 = ProcessTwoPixelPairs<has_alpha>(
517 row0, row1, x0, x1,
518 scale_x, all_y, neg_y, alpha);
519
520 // second pair of pixel pairs
521 // (4x (x3, 16-x3), 4x (16-x2, x2))
522 scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
523
524 __m128i sum1 = ProcessTwoPixelPairs<has_alpha>(
525 row0, row1, x0 + 2, x1 + 2,
526 scale_x, all_y, neg_y, alpha);
527
528 // Do the final packing of the two results
529
530 // Pack lower 4 16 bit values of sum into lower 4 bytes.
531 sum0 = _mm_packus_epi16(sum0, sum1);
532
533 // Extract low int and store.
534 _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
535
536 colors += 4;
537 }
538
539 // Left over.
540 while (count-- > 0) {
541 const uint32_t xx = *xy++; // x0:14 | 4 | x1:14
542 const unsigned x0 = xx >> 18;
543 const unsigned x1 = xx & 0x3FFF;
544
545 // 16x(x)
546 const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
547
548 // 16x (16-x)
549 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
550
551 // (8x (x, 16-x))
552 scale_x = _mm_unpacklo_epi8(scale_x, all_x);
553
554 // first row.
555 __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
556 // second row.
557 __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
558
559 // Add both rows for full sample
560 sum0 = _mm_add_epi16(sum0, sum1);
561
tomhudson@google.com4ef14f82012-02-14 19:42:39 +0000562 sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000563
564 // Pack lower 4 16 bit values of sum into lower 4 bytes.
565 sum0 = _mm_packus_epi16(sum0, zero);
566
567 // Extract low int and store.
568 *colors++ = _mm_cvtsi128_si32(sum0);
569 }
570 }
571}
tomhudson@google.comae29b882012-03-06 14:59:04 +0000572
573/*
574 * Similar to S32_generic_D32_filter_DX_SSSE3, we do not need to handle the
575 * special case suby == 0 as suby is changing in every loop.
576 */
577template<bool has_alpha>
578void S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
579 const uint32_t* xy,
580 int count, uint32_t* colors) {
581 SkASSERT(count > 0 && colors != NULL);
reed@google.com9cfc83c2013-07-22 17:18:18 +0000582 SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
tomhudson@google.comae29b882012-03-06 14:59:04 +0000583 SkASSERT(s.fBitmap->config() == SkBitmap::kARGB_8888_Config);
584 if (has_alpha) {
585 SkASSERT(s.fAlphaScale < 256);
586 } else {
587 SkASSERT(s.fAlphaScale == 256);
588 }
589
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000590 const uint8_t* src_addr =
tomhudson@google.comae29b882012-03-06 14:59:04 +0000591 static_cast<const uint8_t*>(s.fBitmap->getPixels());
scroggo@google.come5f48242013-02-25 21:47:41 +0000592 const size_t rb = s.fBitmap->rowBytes();
tomhudson@google.comae29b882012-03-06 14:59:04 +0000593
594 // vector constants
595 const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
596 8, 8, 8, 8,
597 4, 4, 4, 4,
598 0, 0, 0, 0);
599 const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
600 const __m128i mask_000F = _mm_set1_epi32(0x000F);
601 const __m128i sixteen_8bit = _mm_set1_epi8(16);
602
603 __m128i alpha;
604 if (has_alpha) {
605 // 8x(alpha)
606 alpha = _mm_set1_epi16(s.fAlphaScale);
607 }
608
609 // Unroll 2x, interleave bytes, use pmaddubsw (all_x is small)
610 while (count >= 2) {
611 int xy0[4];
612 int xy1[4];
613 __m128i all_xy, sixteen_minus_xy;
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000614 PrepareConstantsTwoPixelPairsDXDY(xy, mask_3FFF, mask_000F,
tomhudson@google.comae29b882012-03-06 14:59:04 +0000615 sixteen_8bit, mask_dist_select,
616 &all_xy, &sixteen_minus_xy, xy0, xy1);
617
618 // (4x(x1, 16-x1), 4x(x0, 16-x0))
619 __m128i scale_x = _mm_unpacklo_epi8(sixteen_minus_xy, all_xy);
620 // (4x(0, y1), 4x(0, y0))
621 __m128i all_y = _mm_unpackhi_epi8(all_xy, _mm_setzero_si128());
622 __m128i neg_y = _mm_sub_epi16(_mm_set1_epi16(16), all_y);
623
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000624 const uint32_t* row00 =
tomhudson@google.comae29b882012-03-06 14:59:04 +0000625 reinterpret_cast<const uint32_t*>(src_addr + xy0[2] * rb);
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000626 const uint32_t* row01 =
627 reinterpret_cast<const uint32_t*>(src_addr + xy1[2] * rb);
628 const uint32_t* row10 =
tomhudson@google.comae29b882012-03-06 14:59:04 +0000629 reinterpret_cast<const uint32_t*>(src_addr + xy0[3] * rb);
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000630 const uint32_t* row11 =
tomhudson@google.comae29b882012-03-06 14:59:04 +0000631 reinterpret_cast<const uint32_t*>(src_addr + xy1[3] * rb);
632
633 __m128i sum0 = ProcessTwoPixelPairsDXDY<has_alpha>(
634 row00, row01, row10, row11, xy0, xy1,
635 scale_x, all_y, neg_y, alpha);
636
637 // Pack lower 4 16 bit values of sum into lower 4 bytes.
638 sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
639
640 // Extract low int and store.
641 _mm_storel_epi64(reinterpret_cast<__m128i *>(colors), sum0);
642
643 xy += 4;
644 colors += 2;
645 count -= 2;
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000646 }
tomhudson@google.comae29b882012-03-06 14:59:04 +0000647
648 // Handle the remainder
649 while (count-- > 0) {
650 uint32_t data = *xy++;
651 unsigned y0 = data >> 14;
652 unsigned y1 = data & 0x3FFF;
653 unsigned subY = y0 & 0xF;
654 y0 >>= 4;
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000655
tomhudson@google.comae29b882012-03-06 14:59:04 +0000656 data = *xy++;
657 unsigned x0 = data >> 14;
658 unsigned x1 = data & 0x3FFF;
659 unsigned subX = x0 & 0xF;
660 x0 >>= 4;
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000661
662 const uint32_t* row0 =
tomhudson@google.comae29b882012-03-06 14:59:04 +0000663 reinterpret_cast<const uint32_t*>(src_addr + y0 * rb);
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000664 const uint32_t* row1 =
665 reinterpret_cast<const uint32_t*>(src_addr + y1 * rb);
tomhudson@google.comae29b882012-03-06 14:59:04 +0000666
667 // 16x(x)
668 const __m128i all_x = _mm_set1_epi8(subX);
669
670 // 16x (16-x)
671 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
672
673 // (8x (x, 16-x))
674 scale_x = _mm_unpacklo_epi8(scale_x, all_x);
675
676 // 8x(16)
677 const __m128i sixteen_16bit = _mm_set1_epi16(16);
678
679 // 8x (y)
680 const __m128i all_y = _mm_set1_epi16(subY);
681
682 // 8x (16-y)
683 const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
684
685 // first row.
686 __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
687 // second row.
688 __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
689
690 // Add both rows for full sample
691 sum0 = _mm_add_epi16(sum0, sum1);
692
693 sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
694
695 // Pack lower 4 16 bit values of sum into lower 4 bytes.
696 sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
697
698 // Extract low int and store.
699 *colors++ = _mm_cvtsi128_si32(sum0);
700 }
701}
tomhudson@google.com95ad1552012-02-14 18:28:54 +0000702} // namepace
703
704void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
705 const uint32_t* xy,
706 int count, uint32_t* colors) {
707 S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors);
708}
709
710void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
711 const uint32_t* xy,
712 int count, uint32_t* colors) {
713 S32_generic_D32_filter_DX_SSSE3<true>(s, xy, count, colors);
714}
tomhudson@google.comae29b882012-03-06 14:59:04 +0000715
716void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
717 const uint32_t* xy,
718 int count, uint32_t* colors) {
719 S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors);
720}
721
722void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
723 const uint32_t* xy,
724 int count, uint32_t* colors) {
725 S32_generic_D32_filter_DXDY_SSSE3<true>(s, xy, count, colors);
726}
commit-bot@chromium.org32d58e62014-02-21 22:35:45 +0000727
728#else //SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
729
730void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
731 const uint32_t* xy,
732 int count, uint32_t* colors) {
733 sk_throw();
734}
735
736void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
737 const uint32_t* xy,
738 int count, uint32_t* colors) {
739 sk_throw();
740}
741
742void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
743 const uint32_t* xy,
744 int count, uint32_t* colors) {
745 sk_throw();
746}
747
748void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
749 const uint32_t* xy,
750 int count, uint32_t* colors) {
751 sk_throw();
752}
753
754#endif