blob: 594f1c54594bced7cef326f0ad22dfd614d7eb5c [file] [log] [blame]
Glenn Kasten632e0c02011-12-16 10:42:58 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andy Hunge0ccb202014-07-27 20:11:31 -070017#include <cutils/bitops.h> /* for popcount() */
Glenn Kasten632e0c02011-12-16 10:42:58 -080018#include <audio_utils/primitives.h>
Andy Hunge0ccb202014-07-27 20:11:31 -070019#include "private/private.h"
Glenn Kasten632e0c02011-12-16 10:42:58 -080020
Andy Hung1307aa72017-11-17 15:46:55 -080021void ditherAndClamp(int32_t *out, const int32_t *sums, size_t pairs)
Glenn Kasten632e0c02011-12-16 10:42:58 -080022{
Andy Hung1307aa72017-11-17 15:46:55 -080023 for (; pairs > 0; --pairs) {
24 const int32_t l = clamp16(*sums++ >> 12);
25 const int32_t r = clamp16(*sums++ >> 12);
26 *out++ = (r << 16) | (l & 0xFFFF);
Glenn Kasten632e0c02011-12-16 10:42:58 -080027 }
28}
Glenn Kastenddb2e932012-01-16 13:21:31 -080029
Andy Hung62c3d9f2017-11-21 16:36:41 -080030void memcpy_to_i16_from_q4_27(int16_t *dst, const int32_t *src, size_t count)
31{
32 for (; count > 0; --count) {
33 *dst++ = clamp16(*src++ >> 12);
34 }
35}
36
Glenn Kastenddb2e932012-01-16 13:21:31 -080037void memcpy_to_i16_from_u8(int16_t *dst, const uint8_t *src, size_t count)
38{
39 dst += count;
40 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -080041 for (; count > 0; --count) {
Glenn Kastenddb2e932012-01-16 13:21:31 -080042 *--dst = (int16_t)(*--src - 0x80) << 8;
43 }
44}
Glenn Kasten7a0baca2012-07-19 13:59:50 -070045
Glenn Kasten78da2ac2012-11-12 14:39:36 -080046void memcpy_to_u8_from_i16(uint8_t *dst, const int16_t *src, size_t count)
47{
Andy Hung1307aa72017-11-17 15:46:55 -080048 for (; count > 0; --count) {
Glenn Kasten78da2ac2012-11-12 14:39:36 -080049 *dst++ = (*src++ >> 8) + 0x80;
50 }
51}
52
Andy Hung23ef1b32015-01-13 13:56:37 -080053void memcpy_to_u8_from_float(uint8_t *dst, const float *src, size_t count)
54{
Andy Hung1307aa72017-11-17 15:46:55 -080055 for (; count > 0; --count) {
Andy Hung23ef1b32015-01-13 13:56:37 -080056 *dst++ = clamp8_from_float(*src++);
57 }
58}
59
Glenn Kasten5d436052013-06-21 14:01:54 -070060void memcpy_to_i16_from_i32(int16_t *dst, const int32_t *src, size_t count)
61{
Andy Hung1307aa72017-11-17 15:46:55 -080062 for (; count > 0; --count) {
Glenn Kasten5d436052013-06-21 14:01:54 -070063 *dst++ = *src++ >> 16;
64 }
65}
66
67void memcpy_to_i16_from_float(int16_t *dst, const float *src, size_t count)
68{
Andy Hung1307aa72017-11-17 15:46:55 -080069 for (; count > 0; --count) {
Andy Hung65b5ccd2014-03-18 12:00:55 -070070 *dst++ = clamp16_from_float(*src++);
Glenn Kasten5d436052013-06-21 14:01:54 -070071 }
72}
73
Andy Hungb878e522014-04-04 13:05:43 -070074void memcpy_to_float_from_q4_27(float *dst, const int32_t *src, size_t count)
Andy Hungaecb15e2014-02-24 19:07:40 -080075{
Andy Hung1307aa72017-11-17 15:46:55 -080076 for (; count > 0; --count) {
Andy Hungd2a25cd2014-04-02 11:23:56 -070077 *dst++ = float_from_q4_27(*src++);
Andy Hungaecb15e2014-02-24 19:07:40 -080078 }
79}
80
Andy Hunge0454e22014-03-06 13:04:56 -080081void memcpy_to_float_from_i16(float *dst, const int16_t *src, size_t count)
82{
Andy Hung22667a62017-11-17 15:54:27 -080083 dst += count;
84 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -080085 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -080086 *--dst = float_from_i16(*--src);
Andy Hunge0454e22014-03-06 13:04:56 -080087 }
88}
89
Andy Hung23ef1b32015-01-13 13:56:37 -080090void memcpy_to_float_from_u8(float *dst, const uint8_t *src, size_t count)
91{
Andy Hung22667a62017-11-17 15:54:27 -080092 dst += count;
93 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -080094 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -080095 *--dst = float_from_u8(*--src);
Andy Hung23ef1b32015-01-13 13:56:37 -080096 }
97}
98
Andy Hunge0454e22014-03-06 13:04:56 -080099void memcpy_to_float_from_p24(float *dst, const uint8_t *src, size_t count)
100{
Andy Hung22667a62017-11-17 15:54:27 -0800101 dst += count;
102 src += count * 3;
Andy Hung1307aa72017-11-17 15:46:55 -0800103 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800104 src -= 3;
105 *--dst = float_from_p24(src);
Andy Hunge0454e22014-03-06 13:04:56 -0800106 }
107}
108
109void memcpy_to_i16_from_p24(int16_t *dst, const uint8_t *src, size_t count)
110{
Andy Hung1307aa72017-11-17 15:46:55 -0800111 for (; count > 0; --count) {
Andy Hung2af0e172017-03-31 14:08:14 -0700112#if HAVE_BIG_ENDIAN
Andy Hunge0454e22014-03-06 13:04:56 -0800113 *dst++ = src[1] | (src[0] << 8);
114#else
115 *dst++ = src[1] | (src[2] << 8);
116#endif
117 src += 3;
118 }
119}
120
Glenn Kasten95880cb2015-05-28 15:19:16 -0700121void memcpy_to_i32_from_p24(int32_t *dst, const uint8_t *src, size_t count)
122{
Andy Hung22667a62017-11-17 15:54:27 -0800123 dst += count;
124 src += count * 3;
Andy Hung1307aa72017-11-17 15:46:55 -0800125 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800126 src -= 3;
Andy Hung2af0e172017-03-31 14:08:14 -0700127#if HAVE_BIG_ENDIAN
Andy Hung22667a62017-11-17 15:54:27 -0800128 *--dst = (src[2] << 8) | (src[1] << 16) | (src[0] << 24);
Glenn Kasten95880cb2015-05-28 15:19:16 -0700129#else
Andy Hung22667a62017-11-17 15:54:27 -0800130 *--dst = (src[0] << 8) | (src[1] << 16) | (src[2] << 24);
Glenn Kasten95880cb2015-05-28 15:19:16 -0700131#endif
Glenn Kasten95880cb2015-05-28 15:19:16 -0700132 }
133}
134
Andy Hunge0454e22014-03-06 13:04:56 -0800135void memcpy_to_p24_from_i16(uint8_t *dst, const int16_t *src, size_t count)
136{
Andy Hung22667a62017-11-17 15:54:27 -0800137 dst += count * 3;
138 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800139 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800140 dst -= 3;
Andy Hungc6ddd032018-04-25 17:59:24 -0700141 const int16_t sample = *--src;
Andy Hung2af0e172017-03-31 14:08:14 -0700142#if HAVE_BIG_ENDIAN
Andy Hungc6ddd032018-04-25 17:59:24 -0700143 dst[0] = sample >> 8;
144 dst[1] = sample;
Andy Hung22667a62017-11-17 15:54:27 -0800145 dst[2] = 0;
Andy Hunge0454e22014-03-06 13:04:56 -0800146#else
Andy Hung22667a62017-11-17 15:54:27 -0800147 dst[0] = 0;
Andy Hungc6ddd032018-04-25 17:59:24 -0700148 dst[1] = sample;
149 dst[2] = sample >> 8;
Andy Hunge0454e22014-03-06 13:04:56 -0800150#endif
151 }
152}
153
154void memcpy_to_p24_from_float(uint8_t *dst, const float *src, size_t count)
155{
Andy Hung1307aa72017-11-17 15:46:55 -0800156 for (; count > 0; --count) {
Andy Hunge0454e22014-03-06 13:04:56 -0800157 int32_t ival = clamp24_from_float(*src++);
158
Andy Hung2af0e172017-03-31 14:08:14 -0700159#if HAVE_BIG_ENDIAN
Andy Hunge0454e22014-03-06 13:04:56 -0800160 *dst++ = ival >> 16;
161 *dst++ = ival >> 8;
162 *dst++ = ival;
163#else
164 *dst++ = ival;
165 *dst++ = ival >> 8;
166 *dst++ = ival >> 16;
167#endif
168 }
169}
170
Glenn Kasteneee45152014-05-02 12:41:04 -0700171void memcpy_to_p24_from_q8_23(uint8_t *dst, const int32_t *src, size_t count)
172{
Andy Hung1307aa72017-11-17 15:46:55 -0800173 for (; count > 0; --count) {
Glenn Kasteneee45152014-05-02 12:41:04 -0700174 int32_t ival = clamp24_from_q8_23(*src++);
175
Andy Hung2af0e172017-03-31 14:08:14 -0700176#if HAVE_BIG_ENDIAN
Glenn Kasteneee45152014-05-02 12:41:04 -0700177 *dst++ = ival >> 16;
178 *dst++ = ival >> 8;
179 *dst++ = ival;
180#else
181 *dst++ = ival;
182 *dst++ = ival >> 8;
183 *dst++ = ival >> 16;
184#endif
185 }
186}
187
Glenn Kastendaa1a002015-05-29 16:50:24 -0700188void memcpy_to_p24_from_i32(uint8_t *dst, const int32_t *src, size_t count)
189{
Andy Hung1307aa72017-11-17 15:46:55 -0800190 for (; count > 0; --count) {
Glenn Kastendaa1a002015-05-29 16:50:24 -0700191 int32_t ival = *src++ >> 8;
192
Andy Hung2af0e172017-03-31 14:08:14 -0700193#if HAVE_BIG_ENDIAN
Glenn Kastendaa1a002015-05-29 16:50:24 -0700194 *dst++ = ival >> 16;
195 *dst++ = ival >> 8;
196 *dst++ = ival;
197#else
198 *dst++ = ival;
199 *dst++ = ival >> 8;
200 *dst++ = ival >> 16;
201#endif
202 }
203}
204
Andy Hungd5829882014-03-12 11:46:15 -0700205void memcpy_to_q8_23_from_i16(int32_t *dst, const int16_t *src, size_t count)
206{
Andy Hung22667a62017-11-17 15:54:27 -0800207 dst += count;
208 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800209 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800210 *--dst = (int32_t)*--src << 8;
Andy Hungd5829882014-03-12 11:46:15 -0700211 }
212}
213
214void memcpy_to_q8_23_from_float_with_clamp(int32_t *dst, const float *src, size_t count)
215{
Andy Hung1307aa72017-11-17 15:46:55 -0800216 for (; count > 0; --count) {
Andy Hungd5829882014-03-12 11:46:15 -0700217 *dst++ = clamp24_from_float(*src++);
218 }
219}
220
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700221void memcpy_to_q8_23_from_p24(int32_t *dst, const uint8_t *src, size_t count)
222{
Andy Hung22667a62017-11-17 15:54:27 -0800223 dst += count;
224 src += count * 3;
Andy Hung1307aa72017-11-17 15:46:55 -0800225 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800226 src -= 3;
Andy Hung2af0e172017-03-31 14:08:14 -0700227#if HAVE_BIG_ENDIAN
Andy Hung22667a62017-11-17 15:54:27 -0800228 *--dst = (int8_t)src[0] << 16 | src[1] << 8 | src[2];
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700229#else
Andy Hung22667a62017-11-17 15:54:27 -0800230 *--dst = (int8_t)src[2] << 16 | src[1] << 8 | src[0];
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700231#endif
Haynes Mathew George78ac9f82015-04-09 13:50:13 -0700232 }
233}
234
Andy Hungb878e522014-04-04 13:05:43 -0700235void memcpy_to_q4_27_from_float(int32_t *dst, const float *src, size_t count)
236{
Andy Hung1307aa72017-11-17 15:46:55 -0800237 for (; count > 0; --count) {
Andy Hungb878e522014-04-04 13:05:43 -0700238 *dst++ = clampq4_27_from_float(*src++);
239 }
240}
241
Andy Hungd5829882014-03-12 11:46:15 -0700242void memcpy_to_i16_from_q8_23(int16_t *dst, const int32_t *src, size_t count)
243{
Andy Hung1307aa72017-11-17 15:46:55 -0800244 for (; count > 0; --count) {
Andy Hungd5829882014-03-12 11:46:15 -0700245 *dst++ = clamp16(*src++ >> 8);
246 }
247}
248
249void memcpy_to_float_from_q8_23(float *dst, const int32_t *src, size_t count)
250{
Andy Hung1307aa72017-11-17 15:46:55 -0800251 for (; count > 0; --count) {
Andy Hungd5829882014-03-12 11:46:15 -0700252 *dst++ = float_from_q8_23(*src++);
253 }
254}
255
Andy Hung2c63fb62014-03-12 14:44:12 -0700256void memcpy_to_i32_from_i16(int32_t *dst, const int16_t *src, size_t count)
257{
Andy Hung22667a62017-11-17 15:54:27 -0800258 dst += count;
259 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800260 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800261 *--dst = (int32_t)*--src << 16;
Andy Hung2c63fb62014-03-12 14:44:12 -0700262 }
263}
264
265void memcpy_to_i32_from_float(int32_t *dst, const float *src, size_t count)
266{
Andy Hung1307aa72017-11-17 15:46:55 -0800267 for (; count > 0; --count) {
Andy Hung2c63fb62014-03-12 14:44:12 -0700268 *dst++ = clamp32_from_float(*src++);
269 }
270}
271
272void memcpy_to_float_from_i32(float *dst, const int32_t *src, size_t count)
273{
Andy Hung1307aa72017-11-17 15:46:55 -0800274 for (; count > 0; --count) {
Andy Hung2c63fb62014-03-12 14:44:12 -0700275 *dst++ = float_from_i32(*src++);
276 }
277}
278
Kevin Rocard2d98fd32017-11-09 22:12:51 -0800279void memcpy_to_float_from_float_with_clamping(float *dst, const float *src, size_t count,
280 float absMax) {
281 // Note: using NEON intrinsics (vminq_f32, vld1q_f32...) did NOT accelerate
282 // the function when benchmarked. The compiler already vectorize using FMINNM f32x4 & similar.
283 // Note: clamping induce a ~20% overhead compared to memcpy for count in [64, 512]
284 // See primitives_benchmark
Andy Hung1307aa72017-11-17 15:46:55 -0800285 for (; count > 0; --count) {
Kevin Rocard2d98fd32017-11-09 22:12:51 -0800286 const float sample = *src++;
287 *dst++ = fmax(-absMax, fmin(absMax, sample));
288 }
289}
290
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700291void downmix_to_mono_i16_from_stereo_i16(int16_t *dst, const int16_t *src, size_t count)
292{
Andy Hung1307aa72017-11-17 15:46:55 -0800293 for (; count > 0; --count) {
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700294 *dst++ = (int16_t)(((int32_t)src[0] + (int32_t)src[1]) >> 1);
295 src += 2;
296 }
297}
298
299void upmix_to_stereo_i16_from_mono_i16(int16_t *dst, const int16_t *src, size_t count)
300{
Andy Hung22667a62017-11-17 15:54:27 -0800301 dst += count * 2;
302 src += count;
Andy Hung1307aa72017-11-17 15:46:55 -0800303 for (; count > 0; --count) {
Andy Hung22667a62017-11-17 15:54:27 -0800304 const int32_t temp = *--src;
305 dst -= 2;
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700306 dst[0] = temp;
307 dst[1] = temp;
Glenn Kasten7a0baca2012-07-19 13:59:50 -0700308 }
309}
Glenn Kasteneb247df2014-02-21 10:00:51 -0800310
Andy Hung9c8dd452015-04-21 13:21:36 -0700311void downmix_to_mono_float_from_stereo_float(float *dst, const float *src, size_t frames)
312{
Andy Hung1307aa72017-11-17 15:46:55 -0800313 for (; frames > 0; --frames) {
Andy Hung9c8dd452015-04-21 13:21:36 -0700314 *dst++ = (src[0] + src[1]) * 0.5;
315 src += 2;
316 }
317}
318
319void upmix_to_stereo_float_from_mono_float(float *dst, const float *src, size_t frames)
320{
Andy Hung22667a62017-11-17 15:54:27 -0800321 dst += frames * 2;
322 src += frames;
Andy Hung1307aa72017-11-17 15:46:55 -0800323 for (; frames > 0; --frames) {
Andy Hung22667a62017-11-17 15:54:27 -0800324 const float temp = *--src;
325 dst -= 2;
Andy Hung9c8dd452015-04-21 13:21:36 -0700326 dst[0] = temp;
327 dst[1] = temp;
Andy Hung9c8dd452015-04-21 13:21:36 -0700328 }
329}
330
Glenn Kasteneb247df2014-02-21 10:00:51 -0800331size_t nonZeroMono32(const int32_t *samples, size_t count)
332{
333 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800334 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800335 nonZero += *samples++ != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800336 }
337 return nonZero;
338}
339
340size_t nonZeroMono16(const int16_t *samples, size_t count)
341{
342 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800343 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800344 nonZero += *samples++ != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800345 }
346 return nonZero;
347}
348
349size_t nonZeroStereo32(const int32_t *frames, size_t count)
350{
351 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800352 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800353 nonZero += frames[0] != 0 || frames[1] != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800354 frames += 2;
355 }
356 return nonZero;
357}
358
359size_t nonZeroStereo16(const int16_t *frames, size_t count)
360{
361 size_t nonZero = 0;
Andy Hung1307aa72017-11-17 15:46:55 -0800362 for (; count > 0; --count) {
Andy Hung0ebba4b2017-11-17 16:02:19 -0800363 nonZero += frames[0] != 0 || frames[1] != 0;
Glenn Kasteneb247df2014-02-21 10:00:51 -0800364 frames += 2;
365 }
366 return nonZero;
367}
Andy Hung3af2af22014-05-22 18:40:30 -0700368
Andy Hung3af2af22014-05-22 18:40:30 -0700369/*
370 * C macro to do channel mask copying independent of dst/src sample type.
371 * Don't pass in any expressions for the macro arguments here.
372 */
373#define copy_frame_by_mask(dst, dmask, src, smask, count, zero) \
374{ \
375 uint32_t bit, ormask; \
Andy Hung1307aa72017-11-17 15:46:55 -0800376 for (; (count) > 0; --(count)) { \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700377 ormask = (dmask) | (smask); \
Andy Hung3af2af22014-05-22 18:40:30 -0700378 while (ormask) { \
379 bit = ormask & -ormask; /* get lowest bit */ \
380 ormask ^= bit; /* remove lowest bit */ \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700381 if ((dmask) & bit) { \
382 *(dst)++ = (smask) & bit ? *(src)++ : (zero); \
Andy Hung3af2af22014-05-22 18:40:30 -0700383 } else { /* source channel only */ \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700384 ++(src); \
Andy Hung3af2af22014-05-22 18:40:30 -0700385 } \
386 } \
387 } \
388}
389
390void memcpy_by_channel_mask(void *dst, uint32_t dst_mask,
391 const void *src, uint32_t src_mask, size_t sample_size, size_t count)
392{
393#if 0
394 /* alternate way of handling memcpy_by_channel_mask by using the idxary */
395 int8_t idxary[32];
396 uint32_t src_channels = popcount(src_mask);
397 uint32_t dst_channels =
398 memcpy_by_index_array_initialization(idxary, 32, dst_mask, src_mask);
399
400 memcpy_by_idxary(dst, dst_channels, src, src_channels, idxary, sample_size, count);
401#else
402 if (dst_mask == src_mask) {
403 memcpy(dst, src, sample_size * popcount(dst_mask) * count);
404 return;
405 }
406 switch (sample_size) {
407 case 1: {
408 uint8_t *udst = (uint8_t*)dst;
409 const uint8_t *usrc = (const uint8_t*)src;
410
411 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, 0);
412 } break;
413 case 2: {
414 uint16_t *udst = (uint16_t*)dst;
415 const uint16_t *usrc = (const uint16_t*)src;
416
417 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, 0);
418 } break;
419 case 3: { /* could be slow. use a struct to represent 3 bytes of data. */
420 uint8x3_t *udst = (uint8x3_t*)dst;
421 const uint8x3_t *usrc = (const uint8x3_t*)src;
422 static const uint8x3_t zero; /* tricky - we use this to zero out a sample */
423
424 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, zero);
425 } break;
426 case 4: {
427 uint32_t *udst = (uint32_t*)dst;
428 const uint32_t *usrc = (const uint32_t*)src;
429
430 copy_frame_by_mask(udst, dst_mask, usrc, src_mask, count, 0);
431 } break;
432 default:
433 abort(); /* illegal value */
434 break;
435 }
436#endif
437}
438
439/*
440 * C macro to do copying by index array, to rearrange samples
441 * within a frame. This is independent of src/dst sample type.
442 * Don't pass in any expressions for the macro arguments here.
443 */
444#define copy_frame_by_idx(dst, dst_channels, src, src_channels, idxary, count, zero) \
445{ \
446 unsigned i; \
447 int index; \
Andy Hung1307aa72017-11-17 15:46:55 -0800448 for (; (count) > 0; --(count)) { \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700449 for (i = 0; i < (dst_channels); ++i) { \
450 index = (idxary)[i]; \
451 *(dst)++ = index < 0 ? (zero) : (src)[index]; \
Andy Hung3af2af22014-05-22 18:40:30 -0700452 } \
Chih-Hung Hsieh2c853ac2016-05-11 15:13:31 -0700453 (src) += (src_channels); \
Andy Hung3af2af22014-05-22 18:40:30 -0700454 } \
455}
456
457void memcpy_by_index_array(void *dst, uint32_t dst_channels,
458 const void *src, uint32_t src_channels,
459 const int8_t *idxary, size_t sample_size, size_t count)
460{
461 switch (sample_size) {
462 case 1: {
463 uint8_t *udst = (uint8_t*)dst;
464 const uint8_t *usrc = (const uint8_t*)src;
465
466 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, 0);
467 } break;
468 case 2: {
469 uint16_t *udst = (uint16_t*)dst;
470 const uint16_t *usrc = (const uint16_t*)src;
471
472 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, 0);
473 } break;
474 case 3: { /* could be slow. use a struct to represent 3 bytes of data. */
475 uint8x3_t *udst = (uint8x3_t*)dst;
476 const uint8x3_t *usrc = (const uint8x3_t*)src;
477 static const uint8x3_t zero;
478
479 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, zero);
480 } break;
481 case 4: {
482 uint32_t *udst = (uint32_t*)dst;
483 const uint32_t *usrc = (const uint32_t*)src;
484
485 copy_frame_by_idx(udst, dst_channels, usrc, src_channels, idxary, count, 0);
486 } break;
487 default:
488 abort(); /* illegal value */
489 break;
490 }
491}
492
493size_t memcpy_by_index_array_initialization(int8_t *idxary, size_t idxcount,
494 uint32_t dst_mask, uint32_t src_mask)
495{
496 size_t n = 0;
497 int srcidx = 0;
498 uint32_t bit, ormask = src_mask | dst_mask;
499
500 while (ormask && n < idxcount) {
501 bit = ormask & -ormask; /* get lowest bit */
502 ormask ^= bit; /* remove lowest bit */
503 if (src_mask & dst_mask & bit) { /* matching channel */
504 idxary[n++] = srcidx++;
505 } else if (src_mask & bit) { /* source channel only */
506 ++srcidx;
507 } else { /* destination channel only */
508 idxary[n++] = -1;
509 }
510 }
511 return n + popcount(ormask & dst_mask);
512}
Andy Hung5a0d0282015-02-02 15:34:13 -0800513
514size_t memcpy_by_index_array_initialization_src_index(int8_t *idxary, size_t idxcount,
515 uint32_t dst_mask, uint32_t src_mask) {
516 size_t dst_count = popcount(dst_mask);
517 if (idxcount == 0) {
518 return dst_count;
519 }
520 if (dst_count > idxcount) {
521 dst_count = idxcount;
522 }
523
524 size_t src_idx, dst_idx;
525 for (src_idx = 0, dst_idx = 0; dst_idx < dst_count; ++dst_idx) {
526 if (src_mask & 1) {
527 idxary[dst_idx] = src_idx++;
528 } else {
529 idxary[dst_idx] = -1;
530 }
531 src_mask >>= 1;
532 }
533 return dst_idx;
534}
Andy Hung291a1942015-02-27 14:20:33 -0800535
536size_t memcpy_by_index_array_initialization_dst_index(int8_t *idxary, size_t idxcount,
537 uint32_t dst_mask, uint32_t src_mask) {
538 size_t src_idx, dst_idx;
539 size_t dst_count = __builtin_popcount(dst_mask);
540 size_t src_count = __builtin_popcount(src_mask);
541 if (idxcount == 0) {
542 return dst_count;
543 }
544 if (dst_count > idxcount) {
545 dst_count = idxcount;
546 }
547 for (src_idx = 0, dst_idx = 0; dst_idx < dst_count; ++src_idx) {
548 if (dst_mask & 1) {
549 idxary[dst_idx++] = src_idx < src_count ? (signed)src_idx : -1;
550 }
551 dst_mask >>= 1;
552 }
553 return dst_idx;
554}
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700555
556void accumulate_i16(int16_t *dst, const int16_t *src, size_t count) {
557 while (count--) {
558 *dst = clamp16((int32_t)*dst + *src++);
559 ++dst;
560 }
561}
562
563void accumulate_u8(uint8_t *dst, const uint8_t *src, size_t count) {
564 int32_t sum;
Andy Hung1307aa72017-11-17 15:46:55 -0800565 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700566 // 8-bit samples are centered around 0x80.
567 sum = *dst + *src++ - 0x80;
568 // Clamp to [0, 0xff].
569 *dst++ = (sum & 0x100) ? (~sum >> 9) : sum;
570 }
571}
572
573void accumulate_p24(uint8_t *dst, const uint8_t *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800574 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700575 // Unpack.
576 int32_t dst_q8_23 = 0;
577 int32_t src_q8_23 = 0;
578 memcpy_to_q8_23_from_p24(&dst_q8_23, dst, 1);
579 memcpy_to_q8_23_from_p24(&src_q8_23, src, 1);
580
581 // Accumulate and overwrite.
582 dst_q8_23 += src_q8_23;
583 memcpy_to_p24_from_q8_23(dst, &dst_q8_23, 1);
584
585 // Move on to next sample.
586 dst += 3;
587 src += 3;
588 }
589}
590
591void accumulate_q8_23(int32_t *dst, const int32_t *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800592 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700593 *dst = clamp24_from_q8_23(*dst + *src++);
594 ++dst;
595 }
596}
597
598void accumulate_i32(int32_t *dst, const int32_t *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800599 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700600 *dst = clamp32((int64_t)*dst + *src++);
601 ++dst;
602 }
603}
604
605void accumulate_float(float *dst, const float *src, size_t count) {
Andy Hung1307aa72017-11-17 15:46:55 -0800606 for (; count > 0; --count) {
Ari Hausman-Cohen90ac5f62017-08-16 18:32:22 -0700607 *dst++ += *src++;
608 }
609}