Elliott Hughes | ae0e7bc | 2018-01-12 14:46:04 -0800 | [diff] [blame] | 1 | /* libFLAC - Free Lossless Audio Codec library |
| 2 | * Copyright (C) 2000-2009 Josh Coalson |
| 3 | * Copyright (C) 2011-2016 Xiph.Org Foundation |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * |
| 9 | * - Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * |
| 12 | * - Redistributions in binary form must reproduce the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer in the |
| 14 | * documentation and/or other materials provided with the distribution. |
| 15 | * |
| 16 | * - Neither the name of the Xiph.org Foundation nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR |
| 24 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 25 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 26 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 27 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 28 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 29 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 30 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | */ |
| 32 | |
| 33 | #ifdef HAVE_CONFIG_H |
| 34 | # include <config.h> |
| 35 | #endif |
| 36 | |
| 37 | #include "private/cpu.h" |
| 38 | |
| 39 | #ifndef FLAC__NO_ASM |
| 40 | #if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN |
| 41 | #include "private/stream_encoder.h" |
| 42 | #include "private/bitmath.h" |
| 43 | #ifdef FLAC__SSSE3_SUPPORTED |
| 44 | |
| 45 | #include <stdlib.h> /* for abs() */ |
| 46 | #include <tmmintrin.h> /* SSSE3 */ |
| 47 | #include "FLAC/assert.h" |
| 48 | |
| 49 | FLAC__SSE_TARGET("ssse3") |
| 50 | void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residual[], FLAC__uint64 abs_residual_partition_sums[], |
| 51 | unsigned residual_samples, unsigned predictor_order, unsigned min_partition_order, unsigned max_partition_order, unsigned bps) |
| 52 | { |
| 53 | const unsigned default_partition_samples = (residual_samples + predictor_order) >> max_partition_order; |
| 54 | unsigned partitions = 1u << max_partition_order; |
| 55 | |
| 56 | FLAC__ASSERT(default_partition_samples > predictor_order); |
| 57 | |
| 58 | /* first do max_partition_order */ |
| 59 | { |
| 60 | const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples); |
| 61 | unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order); |
| 62 | |
| 63 | if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < threshold) { |
| 64 | for(partition = residual_sample = 0; partition < partitions; partition++) { |
| 65 | __m128i mm_sum = _mm_setzero_si128(); |
| 66 | unsigned e1, e3; |
| 67 | end += default_partition_samples; |
| 68 | |
| 69 | e1 = (residual_sample + 3) & ~3; e3 = end & ~3; |
| 70 | if(e1 > end) |
| 71 | e1 = end; /* try flac -l 1 -b 16 and you'll be here */ |
| 72 | |
| 73 | /* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast */ |
| 74 | for( ; residual_sample < e1; residual_sample++) { |
| 75 | __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample])); |
| 76 | mm_sum = _mm_add_epi32(mm_sum, mm_res); |
| 77 | } |
| 78 | |
| 79 | for( ; residual_sample < e3; residual_sample+=4) { |
| 80 | __m128i mm_res = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample))); |
| 81 | mm_sum = _mm_add_epi32(mm_sum, mm_res); |
| 82 | } |
| 83 | |
| 84 | for( ; residual_sample < end; residual_sample++) { |
| 85 | __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample])); |
| 86 | mm_sum = _mm_add_epi32(mm_sum, mm_res); |
| 87 | } |
| 88 | |
| 89 | mm_sum = _mm_hadd_epi32(mm_sum, mm_sum); |
| 90 | mm_sum = _mm_hadd_epi32(mm_sum, mm_sum); |
| 91 | abs_residual_partition_sums[partition] = (FLAC__uint32)_mm_cvtsi128_si32(mm_sum); |
| 92 | /* workaround for a bug in MSVC2015U2 - see https://connect.microsoft.com/VisualStudio/feedback/details/2659191/incorrect-code-generation-for-x86-64 */ |
| 93 | #if (defined _MSC_VER) && (_MSC_FULL_VER == 190023918) && (defined FLAC__CPU_X86_64) |
| 94 | abs_residual_partition_sums[partition] &= 0xFFFFFFFF; |
| 95 | #endif |
| 96 | } |
| 97 | } |
| 98 | else { /* have to pessimistically use 64 bits for accumulator */ |
| 99 | for(partition = residual_sample = 0; partition < partitions; partition++) { |
| 100 | __m128i mm_sum = _mm_setzero_si128(); |
| 101 | unsigned e1, e3; |
| 102 | end += default_partition_samples; |
| 103 | |
| 104 | e1 = (residual_sample + 1) & ~1; e3 = end & ~1; |
| 105 | FLAC__ASSERT(e1 <= end); |
| 106 | |
| 107 | for( ; residual_sample < e1; residual_sample++) { |
| 108 | __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample])); /* 0 0 0 |r0| == 00 |r0_64| */ |
| 109 | mm_sum = _mm_add_epi64(mm_sum, mm_res); |
| 110 | } |
| 111 | |
| 112 | for( ; residual_sample < e3; residual_sample+=2) { |
| 113 | __m128i mm_res = _mm_abs_epi32(_mm_loadl_epi64((const __m128i*)(residual+residual_sample))); /* 0 0 |r1| |r0| */ |
| 114 | mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0 |r1| 0 |r0| == |r1_64| |r0_64| */ |
| 115 | mm_sum = _mm_add_epi64(mm_sum, mm_res); |
| 116 | } |
| 117 | |
| 118 | for( ; residual_sample < end; residual_sample++) { |
| 119 | __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample])); |
| 120 | mm_sum = _mm_add_epi64(mm_sum, mm_res); |
| 121 | } |
| 122 | |
| 123 | mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8)); |
| 124 | _mm_storel_epi64((__m128i*)(abs_residual_partition_sums+partition), mm_sum); |
| 125 | } |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | /* now merge partitions for lower orders */ |
| 130 | { |
| 131 | unsigned from_partition = 0, to_partition = partitions; |
| 132 | int partition_order; |
| 133 | for(partition_order = (int)max_partition_order - 1; partition_order >= (int)min_partition_order; partition_order--) { |
| 134 | unsigned i; |
| 135 | partitions >>= 1; |
| 136 | for(i = 0; i < partitions; i++) { |
| 137 | abs_residual_partition_sums[to_partition++] = |
| 138 | abs_residual_partition_sums[from_partition ] + |
| 139 | abs_residual_partition_sums[from_partition+1]; |
| 140 | from_partition += 2; |
| 141 | } |
| 142 | } |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | #endif /* FLAC__SSSE3_SUPPORTED */ |
| 147 | #endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */ |
| 148 | #endif /* FLAC__NO_ASM */ |