blob: 1efd1eddfe8b2f668e486d7133d8a2af1adac11c [file] [log] [blame]
Florin Malitad1c550e2016-12-19 10:55:41 -05001/* Copyright 2009 Motorola
digit@google.comfce02ac2012-08-01 14:25:07 +00002 *
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7#include "SkBitmapProcState.h"
digit@google.comfce02ac2012-08-01 14:25:07 +00008#include "SkShader.h"
9#include "SkUtilsArm.h"
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +000010#include "SkBitmapProcState_utils.h"
digit@google.comfce02ac2012-08-01 14:25:07 +000011
commit-bot@chromium.orga96176d2014-01-28 15:18:54 +000012#include <arm_neon.h>
13
digit@google.comfce02ac2012-08-01 14:25:07 +000014extern const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs_neon[];
15extern const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs_neon[];
16
17static void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
18static void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
19
commit-bot@chromium.orga96176d2014-01-28 15:18:54 +000020// TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
21static inline int16x8_t sbpsm_clamp_tile8(int32x4_t low, int32x4_t high, unsigned max) {
22 int16x8_t res;
digit@google.comfce02ac2012-08-01 14:25:07 +000023
commit-bot@chromium.orga96176d2014-01-28 15:18:54 +000024 // get the hi 16s of all those 32s
25 res = vuzpq_s16(vreinterpretq_s16_s32(low), vreinterpretq_s16_s32(high)).val[1];
26
27 // clamp
28 res = vmaxq_s16(res, vdupq_n_s16(0));
29 res = vminq_s16(res, vdupq_n_s16(max));
30
31 return res;
32}
33
34// TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
35static inline int32x4_t sbpsm_clamp_tile4(int32x4_t f, unsigned max) {
36 int32x4_t res;
37
38 // get the hi 16s of all those 32s
39 res = vshrq_n_s32(f, 16);
40
41 // clamp
42 res = vmaxq_s32(res, vdupq_n_s32(0));
43 res = vminq_s32(res, vdupq_n_s32(max));
44
45 return res;
46}
47
Florin Malitad1c550e2016-12-19 10:55:41 -050048// EXTRACT_LOW_BITS(fy, max) (((fy) >> 12) & 0xF)
commit-bot@chromium.orga96176d2014-01-28 15:18:54 +000049static inline int32x4_t sbpsm_clamp_tile4_low_bits(int32x4_t fx) {
50 int32x4_t ret;
51
52 ret = vshrq_n_s32(fx, 12);
53
54 /* We don't need the mask below because the caller will
55 * overwrite the non-masked bits
56 */
57 //ret = vandq_s32(ret, vdupq_n_s32(0xF));
58
59 return ret;
60}
61
62// TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16)
63static inline int16x8_t sbpsm_repeat_tile8(int32x4_t low, int32x4_t high, unsigned max) {
64 uint16x8_t res;
65 uint32x4_t tmpl, tmph;
66
67 // get the lower 16 bits
68 res = vuzpq_u16(vreinterpretq_u16_s32(low), vreinterpretq_u16_s32(high)).val[0];
69
70 // bare multiplication, not SkFixedMul
71 tmpl = vmull_u16(vget_low_u16(res), vdup_n_u16(max+1));
72 tmph = vmull_u16(vget_high_u16(res), vdup_n_u16(max+1));
73
74 // extraction of the 16 upper bits
75 res = vuzpq_u16(vreinterpretq_u16_u32(tmpl), vreinterpretq_u16_u32(tmph)).val[1];
76
77 return vreinterpretq_s16_u16(res);
78}
79
80// TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16)
81static inline int32x4_t sbpsm_repeat_tile4(int32x4_t f, unsigned max) {
82 uint16x4_t res;
83 uint32x4_t tmp;
84
85 // get the lower 16 bits
86 res = vmovn_u32(vreinterpretq_u32_s32(f));
87
88 // bare multiplication, not SkFixedMul
89 tmp = vmull_u16(res, vdup_n_u16(max+1));
90
91 // extraction of the 16 upper bits
92 tmp = vshrq_n_u32(tmp, 16);
93
94 return vreinterpretq_s32_u32(tmp);
95}
96
Florin Malitad1c550e2016-12-19 10:55:41 -050097// EXTRACT_LOW_BITS(fx, max) ((((fx) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
commit-bot@chromium.orga96176d2014-01-28 15:18:54 +000098static inline int32x4_t sbpsm_repeat_tile4_low_bits(int32x4_t fx, unsigned max) {
99 uint16x4_t res;
100 uint32x4_t tmp;
101 int32x4_t ret;
102
103 // get the lower 16 bits
104 res = vmovn_u32(vreinterpretq_u32_s32(fx));
105
106 // bare multiplication, not SkFixedMul
107 tmp = vmull_u16(res, vdup_n_u16(max + 1));
108
109 // shift and mask
110 ret = vshrq_n_s32(vreinterpretq_s32_u32(tmp), 12);
111
112 /* We don't need the mask below because the caller will
113 * overwrite the non-masked bits
114 */
115 //ret = vandq_s32(ret, vdupq_n_s32(0xF));
116
117 return ret;
118}
119
120#define MAKENAME(suffix) ClampX_ClampY ## suffix ## _neon
121#define TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max)
122#define TILEY_PROCF(fy, max) SkClampMax((fy) >> 16, max)
123#define TILEX_PROCF_NEON8(l, h, max) sbpsm_clamp_tile8(l, h, max)
124#define TILEY_PROCF_NEON8(l, h, max) sbpsm_clamp_tile8(l, h, max)
125#define TILEX_PROCF_NEON4(fx, max) sbpsm_clamp_tile4(fx, max)
126#define TILEY_PROCF_NEON4(fy, max) sbpsm_clamp_tile4(fy, max)
Florin Malitad1c550e2016-12-19 10:55:41 -0500127#define EXTRACT_LOW_BITS(v, max) (((v) >> 12) & 0xF)
128#define EXTRACT_LOW_BITS_NEON4(v, max) sbpsm_clamp_tile4_low_bits(v)
commit-bot@chromium.orga96176d2014-01-28 15:18:54 +0000129#define CHECK_FOR_DECAL
130#include "SkBitmapProcState_matrix_neon.h"
131
132#define MAKENAME(suffix) RepeatX_RepeatY ## suffix ## _neon
133#define TILEX_PROCF(fx, max) SK_USHIFT16(((fx) & 0xFFFF) * ((max) + 1))
134#define TILEY_PROCF(fy, max) SK_USHIFT16(((fy) & 0xFFFF) * ((max) + 1))
135#define TILEX_PROCF_NEON8(l, h, max) sbpsm_repeat_tile8(l, h, max)
136#define TILEY_PROCF_NEON8(l, h, max) sbpsm_repeat_tile8(l, h, max)
137#define TILEX_PROCF_NEON4(fx, max) sbpsm_repeat_tile4(fx, max)
138#define TILEY_PROCF_NEON4(fy, max) sbpsm_repeat_tile4(fy, max)
Florin Malitad1c550e2016-12-19 10:55:41 -0500139#define EXTRACT_LOW_BITS(v, max) ((((v) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
140#define EXTRACT_LOW_BITS_NEON4(v, max) sbpsm_repeat_tile4_low_bits(v, max)
commit-bot@chromium.orga96176d2014-01-28 15:18:54 +0000141#include "SkBitmapProcState_matrix_neon.h"
digit@google.comfce02ac2012-08-01 14:25:07 +0000142
143
digit@google.comfce02ac2012-08-01 14:25:07 +0000144
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000145void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
digit@google.comfce02ac2012-08-01 14:25:07 +0000146 if (count >= 8) {
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000147 // SkFixed is 16.16 fixed point
148 SkFixed dx8 = dx * 8;
149 int32x4_t vdx8 = vdupq_n_s32(dx8);
digit@google.comfce02ac2012-08-01 14:25:07 +0000150
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000151 // setup lbase and hbase
digit@google.comfce02ac2012-08-01 14:25:07 +0000152 int32x4_t lbase, hbase;
digit@google.comfce02ac2012-08-01 14:25:07 +0000153 lbase = vdupq_n_s32(fx);
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000154 lbase = vsetq_lane_s32(fx + dx, lbase, 1);
155 lbase = vsetq_lane_s32(fx + dx + dx, lbase, 2);
156 lbase = vsetq_lane_s32(fx + dx + dx + dx, lbase, 3);
157 hbase = lbase + vdupq_n_s32(4 * dx);
digit@google.comfce02ac2012-08-01 14:25:07 +0000158
digit@google.comfce02ac2012-08-01 14:25:07 +0000159 do {
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000160 // store the upper 16 bits
161 vst1q_u32(dst, vreinterpretq_u32_s16(
162 vuzpq_s16(vreinterpretq_s16_s32(lbase), vreinterpretq_s16_s32(hbase)).val[1]
163 ));
digit@google.comfce02ac2012-08-01 14:25:07 +0000164
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000165 // on to the next group of 8
166 lbase += vdx8;
167 hbase += vdx8;
168 dst += 4; // we did 8 elements but the result is twice smaller
digit@google.comfce02ac2012-08-01 14:25:07 +0000169 count -= 8;
170 fx += dx8;
171 } while (count >= 8);
digit@google.comfce02ac2012-08-01 14:25:07 +0000172 }
173
174 uint16_t* xx = (uint16_t*)dst;
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000175 for (int i = count; i > 0; --i) {
digit@google.comfce02ac2012-08-01 14:25:07 +0000176 *xx++ = SkToU16(fx >> 16); fx += dx;
177 }
178}
179
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000180void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
digit@google.comfce02ac2012-08-01 14:25:07 +0000181 if (count >= 8) {
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000182 SkFixed dx8 = dx * 8;
183 int32x4_t vdx8 = vdupq_n_s32(dx8);
digit@google.comfce02ac2012-08-01 14:25:07 +0000184
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000185 int32x4_t wide_fx, wide_fx2;
digit@google.comfce02ac2012-08-01 14:25:07 +0000186 wide_fx = vdupq_n_s32(fx);
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000187 wide_fx = vsetq_lane_s32(fx + dx, wide_fx, 1);
188 wide_fx = vsetq_lane_s32(fx + dx + dx, wide_fx, 2);
189 wide_fx = vsetq_lane_s32(fx + dx + dx + dx, wide_fx, 3);
digit@google.comfce02ac2012-08-01 14:25:07 +0000190
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000191 wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx));
digit@google.comfce02ac2012-08-01 14:25:07 +0000192
193 while (count >= 8) {
194 int32x4_t wide_out;
195 int32x4_t wide_out2;
196
197 wide_out = vshlq_n_s32(vshrq_n_s32(wide_fx, 12), 14);
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000198 wide_out = wide_out | (vshrq_n_s32(wide_fx,16) + vdupq_n_s32(1));
digit@google.comfce02ac2012-08-01 14:25:07 +0000199
200 wide_out2 = vshlq_n_s32(vshrq_n_s32(wide_fx2, 12), 14);
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000201 wide_out2 = wide_out2 | (vshrq_n_s32(wide_fx2,16) + vdupq_n_s32(1));
digit@google.comfce02ac2012-08-01 14:25:07 +0000202
203 vst1q_u32(dst, vreinterpretq_u32_s32(wide_out));
204 vst1q_u32(dst+4, vreinterpretq_u32_s32(wide_out2));
205
206 dst += 8;
commit-bot@chromium.orga8c09662013-09-05 18:27:57 +0000207 fx += dx8;
208 wide_fx += vdx8;
209 wide_fx2 += vdx8;
digit@google.comfce02ac2012-08-01 14:25:07 +0000210 count -= 8;
211 }
212 }
213
214 if (count & 1)
215 {
216 SkASSERT((fx >> (16 + 14)) == 0);
217 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
218 fx += dx;
219 }
220 while ((count -= 2) >= 0)
221 {
222 SkASSERT((fx >> (16 + 14)) == 0);
223 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
224 fx += dx;
225
226 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
227 fx += dx;
228 }
229}