blob: e3a97b3611a47676edcee6097a4229f3dc06eb71 [file] [log] [blame]
DRC62bae202014-12-22 13:42:26 +00001/*
2 * AltiVec optimizations for libjpeg-turbo
3 *
4 * Copyright (C) 2014, D. R. Commander.
5 * Copyright (C) 2014, Jay Foad.
6 * All rights reserved.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
10 *
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, subject to the following restrictions:
14 *
15 * 1. The origin of this software must not be misrepresented; you must not
16 * claim that you wrote the original software. If you use this software
17 * in a product, an acknowledgment in the product documentation would be
18 * appreciated but is not required.
19 * 2. Altered source versions must be plainly marked as such, and must not be
20 * misrepresented as being the original software.
21 * 3. This notice may not be removed or altered from any source distribution.
22 */
23
24/* This file is included by jccolor-altivec.c */
25
26
27void jsimd_rgb_ycc_convert_altivec (JDIMENSION img_width, JSAMPARRAY input_buf,
28 JSAMPIMAGE output_buf,
29 JDIMENSION output_row, int num_rows)
30{
DRC25347882015-01-10 11:32:36 +000031 JSAMPROW inptr, outptr0, outptr1, outptr2;
32 int pitch = img_width * RGB_PIXELSIZE, offset, num_cols;
33 unsigned char __attribute__((aligned(16))) tmpbuf[RGB_PIXELSIZE * 16];
34
35 __vector unsigned char rgb0, rgb1 = {0}, rgb2 = {0}, rgb3 = {0},
36 rgbg0, rgbg1, rgbg2, rgbg3, y, cb, cr;
DRC62bae202014-12-22 13:42:26 +000037#if RGB_PIXELSIZE == 4
DRC25347882015-01-10 11:32:36 +000038 __vector unsigned char rgb4 = {0};
DRC62bae202014-12-22 13:42:26 +000039#endif
40 __vector short rg0, rg1, rg2, rg3, bg0, bg1, bg2, bg3;
41 __vector unsigned short y01, y23, cr01, cr23, cb01, cb23;
42 __vector int y0, y1, y2, y3, cr0, cr1, cr2, cr3, cb0, cb1, cb2, cb3;
43
44 /* Constants */
45 __vector short pw_f0299_f0337 = { __4X2(F_0_299, F_0_337) },
46 pw_f0114_f0250 = { __4X2(F_0_114, F_0_250) },
47 pw_mf016_mf033 = { __4X2(-F_0_168, -F_0_331) },
48 pw_mf008_mf041 = { __4X2(-F_0_081, -F_0_418) };
49 __vector unsigned short pw_f050_f000 = { __4X2(F_0_500, 0) };
50 __vector int pd_onehalf = { __4X(ONE_HALF) },
51 pd_onehalfm1_cj = { __4X(ONE_HALF - 1 + (CENTERJSAMPLE << SCALEBITS)) };
52 __vector unsigned char zero = { __16X(0) },
53 shift_pack_index =
54 { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29};
55
56 while (--num_rows >= 0) {
57 inptr = *input_buf++;
58 outptr0 = output_buf[0][output_row];
59 outptr1 = output_buf[1][output_row];
60 outptr2 = output_buf[2][output_row];
61 output_row++;
62
DRC25347882015-01-10 11:32:36 +000063 for (num_cols = pitch; num_cols > 0;
64 num_cols -= RGB_PIXELSIZE * 16, inptr += RGB_PIXELSIZE * 16,
DRC62bae202014-12-22 13:42:26 +000065 outptr0 += 16, outptr1 += 16, outptr2 += 16) {
66
DRC25347882015-01-10 11:32:36 +000067 /* Load 16 pixels == 48 or 64 bytes */
68 offset = (size_t)inptr & 15;
69 if (offset) {
DRC62bae202014-12-22 13:42:26 +000070 __vector unsigned char unaligned_shift_index;
DRC25347882015-01-10 11:32:36 +000071 int bytes = num_cols + offset;
72
73 if (bytes >= (RGB_PIXELSIZE + 1) * 16) {
74 /* Fast path -- we have enough buffer space to load all vectors.
75 * Even if we don't need them all, this is faster than narrowing
76 * down which ones we need.
77 */
78 rgb0 = vec_ld(0, inptr);
DRC62bae202014-12-22 13:42:26 +000079 rgb1 = vec_ld(16, inptr);
DRC62bae202014-12-22 13:42:26 +000080 rgb2 = vec_ld(32, inptr);
DRC62bae202014-12-22 13:42:26 +000081 rgb3 = vec_ld(48, inptr);
DRC25347882015-01-10 11:32:36 +000082#if RGB_PIXELSIZE == 4
83 rgb4 = vec_ld(64, inptr);
84#endif
85 } else {
86 if (bytes & 15) {
87 /* Slow path to prevent buffer overread. Since there is no way to
88 * read a partial AltiVec register, overread would occur on the
89 * last chunk of the last image row if the right edge is not on a
90 * 16-byte boundary. It could also occur on other rows if the
91 * bytes per row is low enough. Since we can't determine whether
92 * we're on the last image row, we have to assume every row is the
93 * last.
94 */
95 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16));
96 rgb0 = vec_ld(0, tmpbuf);
97 rgb1 = vec_ld(16, tmpbuf);
98 rgb2 = vec_ld(32, tmpbuf);
99#if RGB_PIXELSIZE == 4
100 rgb3 = vec_ld(48, tmpbuf);
101#endif
102 goto start; /* Skip permutation */
103 } else {
104 /* Medium path -- if the right edge is vector-aligned, then we can
105 * read full vectors (but with a lot of branches.)
106 */
107 rgb0 = vec_ld(0, inptr);
108 if (bytes > 16) {
109 rgb1 = vec_ld(16, inptr);
110 if (bytes > 32) {
111 rgb2 = vec_ld(32, inptr);
112 if (bytes > 48) {
113 rgb3 = vec_ld(48, inptr);
114#if RGB_PIXELSIZE == 4
115 if (bytes > 64)
116 rgb4 = vec_ld(64, inptr);
117#endif
118 }
119 }
120 }
121 }
122 }
123
DRC62bae202014-12-22 13:42:26 +0000124 unaligned_shift_index = vec_lvsl(0, inptr);
125 rgb0 = vec_perm(rgb0, rgb1, unaligned_shift_index);
126 rgb1 = vec_perm(rgb1, rgb2, unaligned_shift_index);
127 rgb2 = vec_perm(rgb2, rgb3, unaligned_shift_index);
DRC25347882015-01-10 11:32:36 +0000128#if RGB_PIXELSIZE == 4
129 rgb3 = vec_perm(rgb3, rgb4, unaligned_shift_index);
130#endif
DRC62bae202014-12-22 13:42:26 +0000131 } else {
DRC25347882015-01-10 11:32:36 +0000132 if (num_cols >= RGB_PIXELSIZE * 16) {
133 /* Fast path */
134 rgb0 = vec_ld(0, inptr);
DRC62bae202014-12-22 13:42:26 +0000135 rgb1 = vec_ld(16, inptr);
DRC62bae202014-12-22 13:42:26 +0000136 rgb2 = vec_ld(32, inptr);
DRC25347882015-01-10 11:32:36 +0000137#if RGB_PIXELSIZE == 4
138 rgb3 = vec_ld(48, inptr);
139#endif
140 } else {
141 if (num_cols & 15) {
142 /* Slow path */
143 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16));
144 rgb0 = vec_ld(0, tmpbuf);
145 rgb1 = vec_ld(16, tmpbuf);
146 rgb2 = vec_ld(32, tmpbuf);
147#if RGB_PIXELSIZE == 4
148 rgb3 = vec_ld(48, tmpbuf);
149#endif
150 } else {
151 /* Medium path */
152 rgb0 = vec_ld(0, inptr);
153 if (num_cols > 16) {
154 rgb1 = vec_ld(16, inptr);
155 if (num_cols > 32) {
156 rgb2 = vec_ld(32, inptr);
157#if RGB_PIXELSIZE == 4
158 if (num_cols > 48)
159 rgb3 = vec_ld(48, inptr);
160#endif
161 }
162 }
163 }
164 }
DRC62bae202014-12-22 13:42:26 +0000165 }
166
DRC25347882015-01-10 11:32:36 +0000167start:
168#if RGB_PIXELSIZE == 3
DRC62bae202014-12-22 13:42:26 +0000169 /* rgb0 = R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
170 * rgb1 = G5 B5 R6 G6 B6 R7 G7 B7 R8 G8 B8 R9 G9 B9 Ra Ga
171 * rgb2 = Ba Rb Gb Bb Rc Gc Bc Rd Gd Bd Re Ge Be Rf Gf Bf
172 *
173 * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
174 * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
175 * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
176 * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
177 */
178 rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX0);
179 rgbg1 = vec_perm(rgb0, rgb1, (__vector unsigned char)RGBG_INDEX1);
180 rgbg2 = vec_perm(rgb1, rgb2, (__vector unsigned char)RGBG_INDEX2);
181 rgbg3 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX3);
182#else
DRC62bae202014-12-22 13:42:26 +0000183 /* rgb0 = R0 G0 B0 X0 R1 G1 B1 X1 R2 G2 B2 X2 R3 G3 B3 X3
DRC8de75d02015-01-08 10:42:54 +0000184 * rgb1 = R4 G4 B4 X4 R5 G5 B5 X5 R6 G6 B6 X6 R7 G7 B7 X7
185 * rgb2 = R8 G8 B8 X8 R9 G9 B9 X9 Ra Ga Ba Xa Rb Gb Bb Xb
186 * rgb3 = Rc Gc Bc Xc Rd Gd Bd Xd Re Ge Be Xe Rf Gf Bf Xf
DRC62bae202014-12-22 13:42:26 +0000187 *
188 * rgbg0 = R0 G0 R1 G1 R2 G2 R3 G3 B0 G0 B1 G1 B2 G2 B3 G3
189 * rgbg1 = R4 G4 R5 G5 R6 G6 R7 G7 B4 G4 B5 G5 B6 G6 B7 G7
190 * rgbg2 = R8 G8 R9 G9 Ra Ga Rb Gb B8 G8 B9 G9 Ba Ga Bb Gb
191 * rgbg3 = Rc Gc Rd Gd Re Ge Rf Gf Bc Gc Bd Gd Be Ge Bf Gf
192 */
193 rgbg0 = vec_perm(rgb0, rgb0, (__vector unsigned char)RGBG_INDEX);
194 rgbg1 = vec_perm(rgb1, rgb1, (__vector unsigned char)RGBG_INDEX);
195 rgbg2 = vec_perm(rgb2, rgb2, (__vector unsigned char)RGBG_INDEX);
196 rgbg3 = vec_perm(rgb3, rgb3, (__vector unsigned char)RGBG_INDEX);
197#endif
198
199 /* rg0 = R0 G0 R1 G1 R2 G2 R3 G3
200 * bg0 = B0 G0 B1 G1 B2 G2 B3 G3
201 * ...
202 *
203 * NOTE: We have to use vec_merge*() here because vec_unpack*() doesn't
204 * support unsigned vectors.
205 */
206 rg0 = (__vector signed short)vec_mergeh(zero, rgbg0);
207 bg0 = (__vector signed short)vec_mergel(zero, rgbg0);
208 rg1 = (__vector signed short)vec_mergeh(zero, rgbg1);
209 bg1 = (__vector signed short)vec_mergel(zero, rgbg1);
210 rg2 = (__vector signed short)vec_mergeh(zero, rgbg2);
211 bg2 = (__vector signed short)vec_mergel(zero, rgbg2);
212 rg3 = (__vector signed short)vec_mergeh(zero, rgbg3);
213 bg3 = (__vector signed short)vec_mergel(zero, rgbg3);
214
215 /* (Original)
216 * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
217 * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE
218 * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE
219 *
220 * (This implementation)
221 * Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G
222 * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE
223 * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE
224 */
225
226 /* Calculate Y values */
227
228 y0 = vec_msums(rg0, pw_f0299_f0337, pd_onehalf);
229 y1 = vec_msums(rg1, pw_f0299_f0337, pd_onehalf);
230 y2 = vec_msums(rg2, pw_f0299_f0337, pd_onehalf);
231 y3 = vec_msums(rg3, pw_f0299_f0337, pd_onehalf);
232 y0 = vec_msums(bg0, pw_f0114_f0250, y0);
233 y1 = vec_msums(bg1, pw_f0114_f0250, y1);
234 y2 = vec_msums(bg2, pw_f0114_f0250, y2);
235 y3 = vec_msums(bg3, pw_f0114_f0250, y3);
236 /* Clever way to avoid 4 shifts + 2 packs. This packs the high word from
237 * each dword into a new 16-bit vector, which is the equivalent of
238 * descaling the 32-bit results (right-shifting by 16 bits) and then
239 * packing them.
240 */
241 y01 = vec_perm((__vector unsigned short)y0, (__vector unsigned short)y1,
242 shift_pack_index);
243 y23 = vec_perm((__vector unsigned short)y2, (__vector unsigned short)y3,
244 shift_pack_index);
245 y = vec_pack(y01, y23);
246 vec_st(y, 0, outptr0);
247
248 /* Calculate Cb values */
249 cb0 = vec_msums(rg0, pw_mf016_mf033, pd_onehalfm1_cj);
250 cb1 = vec_msums(rg1, pw_mf016_mf033, pd_onehalfm1_cj);
251 cb2 = vec_msums(rg2, pw_mf016_mf033, pd_onehalfm1_cj);
252 cb3 = vec_msums(rg3, pw_mf016_mf033, pd_onehalfm1_cj);
253 cb0 = (__vector int)vec_msum((__vector unsigned short)bg0, pw_f050_f000,
254 (__vector unsigned int)cb0);
255 cb1 = (__vector int)vec_msum((__vector unsigned short)bg1, pw_f050_f000,
256 (__vector unsigned int)cb1);
257 cb2 = (__vector int)vec_msum((__vector unsigned short)bg2, pw_f050_f000,
258 (__vector unsigned int)cb2);
259 cb3 = (__vector int)vec_msum((__vector unsigned short)bg3, pw_f050_f000,
260 (__vector unsigned int)cb3);
261 cb01 = vec_perm((__vector unsigned short)cb0,
262 (__vector unsigned short)cb1, shift_pack_index);
263 cb23 = vec_perm((__vector unsigned short)cb2,
264 (__vector unsigned short)cb3, shift_pack_index);
265 cb = vec_pack(cb01, cb23);
266 vec_st(cb, 0, outptr1);
267
268 /* Calculate Cr values */
269 cr0 = vec_msums(bg0, pw_mf008_mf041, pd_onehalfm1_cj);
270 cr1 = vec_msums(bg1, pw_mf008_mf041, pd_onehalfm1_cj);
271 cr2 = vec_msums(bg2, pw_mf008_mf041, pd_onehalfm1_cj);
272 cr3 = vec_msums(bg3, pw_mf008_mf041, pd_onehalfm1_cj);
273 cr0 = (__vector int)vec_msum((__vector unsigned short)rg0, pw_f050_f000,
274 (__vector unsigned int)cr0);
275 cr1 = (__vector int)vec_msum((__vector unsigned short)rg1, pw_f050_f000,
276 (__vector unsigned int)cr1);
277 cr2 = (__vector int)vec_msum((__vector unsigned short)rg2, pw_f050_f000,
278 (__vector unsigned int)cr2);
279 cr3 = (__vector int)vec_msum((__vector unsigned short)rg3, pw_f050_f000,
280 (__vector unsigned int)cr3);
281 cr01 = vec_perm((__vector unsigned short)cr0,
282 (__vector unsigned short)cr1, shift_pack_index);
283 cr23 = vec_perm((__vector unsigned short)cr2,
284 (__vector unsigned short)cr3, shift_pack_index);
285 cr = vec_pack(cr01, cr23);
286 vec_st(cr, 0, outptr2);
287 }
288 }
289}