Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 1 | // Copyright 2011 Google Inc. All Rights Reserved. |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 2 | // |
Vikas Arora | 0406ce1 | 2013-08-09 15:57:12 -0700 | [diff] [blame] | 3 | // Use of this source code is governed by a BSD-style license |
| 4 | // that can be found in the COPYING file in the root of the source |
| 5 | // tree. An additional intellectual property rights grant can be found |
| 6 | // in the file PATENTS. All contributing project authors may |
| 7 | // be found in the AUTHORS file in the root of the source tree. |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 8 | // ----------------------------------------------------------------------------- |
| 9 | // |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 10 | // SSE2 version of some decoding functions (idct, loop filtering). |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 11 | // |
| 12 | // Author: somnath@google.com (Somnath Banerjee) |
| 13 | // cduvivier@google.com (Christian Duvivier) |
| 14 | |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 15 | #include "./dsp.h" |
| 16 | |
| 17 | #if defined(WEBP_USE_SSE2) |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 18 | |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 19 | // The 3-coeff sparse transform in SSE2 is not really faster than the plain-C |
| 20 | // one it seems => disable it by default. Uncomment the following to enable: |
| 21 | // #define USE_TRANSFORM_AC3 |
| 22 | |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 23 | #include <emmintrin.h> |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 24 | #include "../dec/vp8i.h" |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 25 | |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 26 | //------------------------------------------------------------------------------ |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 27 | // Transforms (Paragraph 14.4) |
| 28 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 29 | static void Transform(const int16_t* in, uint8_t* dst, int do_two) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 30 | // This implementation makes use of 16-bit fixed point versions of two |
| 31 | // multiply constants: |
| 32 | // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 |
| 33 | // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 |
| 34 | // |
| 35 | // To be able to use signed 16-bit integers, we use the following trick to |
| 36 | // have constants within range: |
| 37 | // - Associated constants are obtained by subtracting the 16-bit fixed point |
| 38 | // version of one: |
| 39 | // k = K - (1 << 16) => K = k + (1 << 16) |
| 40 | // K1 = 85267 => k1 = 20091 |
| 41 | // K2 = 35468 => k2 = -30068 |
| 42 | // - The multiplication of a variable by a constant become the sum of the |
| 43 | // variable and the multiplication of that variable by the associated |
| 44 | // constant: |
| 45 | // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x |
| 46 | const __m128i k1 = _mm_set1_epi16(20091); |
| 47 | const __m128i k2 = _mm_set1_epi16(-30068); |
| 48 | __m128i T0, T1, T2, T3; |
| 49 | |
| 50 | // Load and concatenate the transform coefficients (we'll do two transforms |
| 51 | // in parallel). In the case of only one transform, the second half of the |
| 52 | // vectors will just contain random value we'll never use nor store. |
| 53 | __m128i in0, in1, in2, in3; |
| 54 | { |
| 55 | in0 = _mm_loadl_epi64((__m128i*)&in[0]); |
| 56 | in1 = _mm_loadl_epi64((__m128i*)&in[4]); |
| 57 | in2 = _mm_loadl_epi64((__m128i*)&in[8]); |
| 58 | in3 = _mm_loadl_epi64((__m128i*)&in[12]); |
| 59 | // a00 a10 a20 a30 x x x x |
| 60 | // a01 a11 a21 a31 x x x x |
| 61 | // a02 a12 a22 a32 x x x x |
| 62 | // a03 a13 a23 a33 x x x x |
| 63 | if (do_two) { |
| 64 | const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]); |
| 65 | const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]); |
| 66 | const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]); |
| 67 | const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]); |
| 68 | in0 = _mm_unpacklo_epi64(in0, inB0); |
| 69 | in1 = _mm_unpacklo_epi64(in1, inB1); |
| 70 | in2 = _mm_unpacklo_epi64(in2, inB2); |
| 71 | in3 = _mm_unpacklo_epi64(in3, inB3); |
| 72 | // a00 a10 a20 a30 b00 b10 b20 b30 |
| 73 | // a01 a11 a21 a31 b01 b11 b21 b31 |
| 74 | // a02 a12 a22 a32 b02 b12 b22 b32 |
| 75 | // a03 a13 a23 a33 b03 b13 b23 b33 |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | // Vertical pass and subsequent transpose. |
| 80 | { |
| 81 | // First pass, c and d calculations are longer because of the "trick" |
| 82 | // multiplications. |
| 83 | const __m128i a = _mm_add_epi16(in0, in2); |
| 84 | const __m128i b = _mm_sub_epi16(in0, in2); |
| 85 | // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 |
| 86 | const __m128i c1 = _mm_mulhi_epi16(in1, k2); |
| 87 | const __m128i c2 = _mm_mulhi_epi16(in3, k1); |
| 88 | const __m128i c3 = _mm_sub_epi16(in1, in3); |
| 89 | const __m128i c4 = _mm_sub_epi16(c1, c2); |
| 90 | const __m128i c = _mm_add_epi16(c3, c4); |
| 91 | // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 |
| 92 | const __m128i d1 = _mm_mulhi_epi16(in1, k1); |
| 93 | const __m128i d2 = _mm_mulhi_epi16(in3, k2); |
| 94 | const __m128i d3 = _mm_add_epi16(in1, in3); |
| 95 | const __m128i d4 = _mm_add_epi16(d1, d2); |
| 96 | const __m128i d = _mm_add_epi16(d3, d4); |
| 97 | |
| 98 | // Second pass. |
| 99 | const __m128i tmp0 = _mm_add_epi16(a, d); |
| 100 | const __m128i tmp1 = _mm_add_epi16(b, c); |
| 101 | const __m128i tmp2 = _mm_sub_epi16(b, c); |
| 102 | const __m128i tmp3 = _mm_sub_epi16(a, d); |
| 103 | |
| 104 | // Transpose the two 4x4. |
| 105 | // a00 a01 a02 a03 b00 b01 b02 b03 |
| 106 | // a10 a11 a12 a13 b10 b11 b12 b13 |
| 107 | // a20 a21 a22 a23 b20 b21 b22 b23 |
| 108 | // a30 a31 a32 a33 b30 b31 b32 b33 |
| 109 | const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1); |
| 110 | const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3); |
| 111 | const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1); |
| 112 | const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3); |
| 113 | // a00 a10 a01 a11 a02 a12 a03 a13 |
| 114 | // a20 a30 a21 a31 a22 a32 a23 a33 |
| 115 | // b00 b10 b01 b11 b02 b12 b03 b13 |
| 116 | // b20 b30 b21 b31 b22 b32 b23 b33 |
| 117 | const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); |
| 118 | const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); |
| 119 | const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); |
| 120 | const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); |
| 121 | // a00 a10 a20 a30 a01 a11 a21 a31 |
| 122 | // b00 b10 b20 b30 b01 b11 b21 b31 |
| 123 | // a02 a12 a22 a32 a03 a13 a23 a33 |
| 124 | // b02 b12 a22 b32 b03 b13 b23 b33 |
| 125 | T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); |
| 126 | T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); |
| 127 | T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); |
| 128 | T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); |
| 129 | // a00 a10 a20 a30 b00 b10 b20 b30 |
| 130 | // a01 a11 a21 a31 b01 b11 b21 b31 |
| 131 | // a02 a12 a22 a32 b02 b12 b22 b32 |
| 132 | // a03 a13 a23 a33 b03 b13 b23 b33 |
| 133 | } |
| 134 | |
| 135 | // Horizontal pass and subsequent transpose. |
| 136 | { |
| 137 | // First pass, c and d calculations are longer because of the "trick" |
| 138 | // multiplications. |
| 139 | const __m128i four = _mm_set1_epi16(4); |
| 140 | const __m128i dc = _mm_add_epi16(T0, four); |
| 141 | const __m128i a = _mm_add_epi16(dc, T2); |
| 142 | const __m128i b = _mm_sub_epi16(dc, T2); |
| 143 | // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 |
| 144 | const __m128i c1 = _mm_mulhi_epi16(T1, k2); |
| 145 | const __m128i c2 = _mm_mulhi_epi16(T3, k1); |
| 146 | const __m128i c3 = _mm_sub_epi16(T1, T3); |
| 147 | const __m128i c4 = _mm_sub_epi16(c1, c2); |
| 148 | const __m128i c = _mm_add_epi16(c3, c4); |
| 149 | // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 |
| 150 | const __m128i d1 = _mm_mulhi_epi16(T1, k1); |
| 151 | const __m128i d2 = _mm_mulhi_epi16(T3, k2); |
| 152 | const __m128i d3 = _mm_add_epi16(T1, T3); |
| 153 | const __m128i d4 = _mm_add_epi16(d1, d2); |
| 154 | const __m128i d = _mm_add_epi16(d3, d4); |
| 155 | |
| 156 | // Second pass. |
| 157 | const __m128i tmp0 = _mm_add_epi16(a, d); |
| 158 | const __m128i tmp1 = _mm_add_epi16(b, c); |
| 159 | const __m128i tmp2 = _mm_sub_epi16(b, c); |
| 160 | const __m128i tmp3 = _mm_sub_epi16(a, d); |
| 161 | const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); |
| 162 | const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); |
| 163 | const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); |
| 164 | const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); |
| 165 | |
| 166 | // Transpose the two 4x4. |
| 167 | // a00 a01 a02 a03 b00 b01 b02 b03 |
| 168 | // a10 a11 a12 a13 b10 b11 b12 b13 |
| 169 | // a20 a21 a22 a23 b20 b21 b22 b23 |
| 170 | // a30 a31 a32 a33 b30 b31 b32 b33 |
| 171 | const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1); |
| 172 | const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3); |
| 173 | const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1); |
| 174 | const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3); |
| 175 | // a00 a10 a01 a11 a02 a12 a03 a13 |
| 176 | // a20 a30 a21 a31 a22 a32 a23 a33 |
| 177 | // b00 b10 b01 b11 b02 b12 b03 b13 |
| 178 | // b20 b30 b21 b31 b22 b32 b23 b33 |
| 179 | const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); |
| 180 | const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); |
| 181 | const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); |
| 182 | const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); |
| 183 | // a00 a10 a20 a30 a01 a11 a21 a31 |
| 184 | // b00 b10 b20 b30 b01 b11 b21 b31 |
| 185 | // a02 a12 a22 a32 a03 a13 a23 a33 |
| 186 | // b02 b12 a22 b32 b03 b13 b23 b33 |
| 187 | T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); |
| 188 | T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); |
| 189 | T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); |
| 190 | T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); |
| 191 | // a00 a10 a20 a30 b00 b10 b20 b30 |
| 192 | // a01 a11 a21 a31 b01 b11 b21 b31 |
| 193 | // a02 a12 a22 a32 b02 b12 b22 b32 |
| 194 | // a03 a13 a23 a33 b03 b13 b23 b33 |
| 195 | } |
| 196 | |
| 197 | // Add inverse transform to 'dst' and store. |
| 198 | { |
Vikas Arora | 1e7bf88 | 2013-03-13 16:43:18 -0700 | [diff] [blame] | 199 | const __m128i zero = _mm_setzero_si128(); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 200 | // Load the reference(s). |
| 201 | __m128i dst0, dst1, dst2, dst3; |
| 202 | if (do_two) { |
| 203 | // Load eight bytes/pixels per line. |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 204 | dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS)); |
| 205 | dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS)); |
| 206 | dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS)); |
| 207 | dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS)); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 208 | } else { |
| 209 | // Load four bytes/pixels per line. |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 210 | dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS)); |
| 211 | dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS)); |
| 212 | dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS)); |
| 213 | dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS)); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 214 | } |
| 215 | // Convert to 16b. |
| 216 | dst0 = _mm_unpacklo_epi8(dst0, zero); |
| 217 | dst1 = _mm_unpacklo_epi8(dst1, zero); |
| 218 | dst2 = _mm_unpacklo_epi8(dst2, zero); |
| 219 | dst3 = _mm_unpacklo_epi8(dst3, zero); |
| 220 | // Add the inverse transform(s). |
| 221 | dst0 = _mm_add_epi16(dst0, T0); |
| 222 | dst1 = _mm_add_epi16(dst1, T1); |
| 223 | dst2 = _mm_add_epi16(dst2, T2); |
| 224 | dst3 = _mm_add_epi16(dst3, T3); |
| 225 | // Unsigned saturate to 8b. |
| 226 | dst0 = _mm_packus_epi16(dst0, dst0); |
| 227 | dst1 = _mm_packus_epi16(dst1, dst1); |
| 228 | dst2 = _mm_packus_epi16(dst2, dst2); |
| 229 | dst3 = _mm_packus_epi16(dst3, dst3); |
| 230 | // Store the results. |
| 231 | if (do_two) { |
| 232 | // Store eight bytes/pixels per line. |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 233 | _mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0); |
| 234 | _mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1); |
| 235 | _mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2); |
| 236 | _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 237 | } else { |
| 238 | // Store four bytes/pixels per line. |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 239 | *(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0); |
| 240 | *(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1); |
| 241 | *(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2); |
| 242 | *(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 243 | } |
| 244 | } |
| 245 | } |
| 246 | |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 247 | #if defined(USE_TRANSFORM_AC3) |
| 248 | #define MUL(a, b) (((a) * (b)) >> 16) |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 249 | static void TransformAC3(const int16_t* in, uint8_t* dst) { |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 250 | static const int kC1 = 20091 + (1 << 16); |
| 251 | static const int kC2 = 35468; |
| 252 | const __m128i A = _mm_set1_epi16(in[0] + 4); |
| 253 | const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2)); |
| 254 | const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1)); |
| 255 | const int c1 = MUL(in[1], kC2); |
| 256 | const int d1 = MUL(in[1], kC1); |
| 257 | const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1); |
| 258 | const __m128i B = _mm_adds_epi16(A, CD); |
| 259 | const __m128i m0 = _mm_adds_epi16(B, d4); |
| 260 | const __m128i m1 = _mm_adds_epi16(B, c4); |
| 261 | const __m128i m2 = _mm_subs_epi16(B, c4); |
| 262 | const __m128i m3 = _mm_subs_epi16(B, d4); |
| 263 | const __m128i zero = _mm_setzero_si128(); |
| 264 | // Load the source pixels. |
| 265 | __m128i dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS)); |
| 266 | __m128i dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS)); |
| 267 | __m128i dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS)); |
| 268 | __m128i dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS)); |
| 269 | // Convert to 16b. |
| 270 | dst0 = _mm_unpacklo_epi8(dst0, zero); |
| 271 | dst1 = _mm_unpacklo_epi8(dst1, zero); |
| 272 | dst2 = _mm_unpacklo_epi8(dst2, zero); |
| 273 | dst3 = _mm_unpacklo_epi8(dst3, zero); |
| 274 | // Add the inverse transform. |
| 275 | dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3)); |
| 276 | dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3)); |
| 277 | dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3)); |
| 278 | dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3)); |
| 279 | // Unsigned saturate to 8b. |
| 280 | dst0 = _mm_packus_epi16(dst0, dst0); |
| 281 | dst1 = _mm_packus_epi16(dst1, dst1); |
| 282 | dst2 = _mm_packus_epi16(dst2, dst2); |
| 283 | dst3 = _mm_packus_epi16(dst3, dst3); |
| 284 | // Store the results. |
| 285 | *(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0); |
| 286 | *(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1); |
| 287 | *(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2); |
| 288 | *(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3); |
| 289 | } |
| 290 | #undef MUL |
| 291 | #endif // USE_TRANSFORM_AC3 |
| 292 | |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 293 | //------------------------------------------------------------------------------ |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 294 | // Loop Filter (Paragraph 15) |
| 295 | |
| 296 | // Compute abs(p - q) = subs(p - q) OR subs(q - p) |
| 297 | #define MM_ABS(p, q) _mm_or_si128( \ |
| 298 | _mm_subs_epu8((q), (p)), \ |
| 299 | _mm_subs_epu8((p), (q))) |
| 300 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 301 | // Shift each byte of "x" by 3 bits while preserving by the sign bit. |
| 302 | static WEBP_INLINE void SignedShift8b(__m128i* const x) { |
| 303 | const __m128i zero = _mm_setzero_si128(); |
| 304 | const __m128i signs = _mm_cmpgt_epi8(zero, *x); |
| 305 | const __m128i lo_0 = _mm_unpacklo_epi8(*x, signs); // s8 -> s16 sign extend |
| 306 | const __m128i hi_0 = _mm_unpackhi_epi8(*x, signs); |
| 307 | const __m128i lo_1 = _mm_srai_epi16(lo_0, 3); |
| 308 | const __m128i hi_1 = _mm_srai_epi16(hi_0, 3); |
| 309 | *x = _mm_packs_epi16(lo_1, hi_1); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | #define FLIP_SIGN_BIT2(a, b) { \ |
| 313 | a = _mm_xor_si128(a, sign_bit); \ |
| 314 | b = _mm_xor_si128(b, sign_bit); \ |
| 315 | } |
| 316 | |
| 317 | #define FLIP_SIGN_BIT4(a, b, c, d) { \ |
| 318 | FLIP_SIGN_BIT2(a, b); \ |
| 319 | FLIP_SIGN_BIT2(c, d); \ |
| 320 | } |
| 321 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 322 | // input/output is uint8_t |
| 323 | static WEBP_INLINE void GetNotHEV(const __m128i* const p1, |
| 324 | const __m128i* const p0, |
| 325 | const __m128i* const q0, |
| 326 | const __m128i* const q1, |
| 327 | int hev_thresh, __m128i* const not_hev) { |
| 328 | const __m128i zero = _mm_setzero_si128(); |
| 329 | const __m128i t_1 = MM_ABS(*p1, *p0); |
| 330 | const __m128i t_2 = MM_ABS(*q1, *q0); |
| 331 | |
| 332 | const __m128i h = _mm_set1_epi8(hev_thresh); |
| 333 | const __m128i t_3 = _mm_subs_epu8(t_1, h); // abs(p1 - p0) - hev_tresh |
| 334 | const __m128i t_4 = _mm_subs_epu8(t_2, h); // abs(q1 - q0) - hev_tresh |
| 335 | |
| 336 | *not_hev = _mm_or_si128(t_3, t_4); |
| 337 | *not_hev = _mm_cmpeq_epi8(*not_hev, zero); // not_hev <= t1 && not_hev <= t2 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 338 | } |
| 339 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 340 | // input pixels are int8_t |
| 341 | static WEBP_INLINE void GetBaseDelta(const __m128i* const p1, |
| 342 | const __m128i* const p0, |
| 343 | const __m128i* const q0, |
| 344 | const __m128i* const q1, |
| 345 | __m128i* const delta) { |
| 346 | // beware of addition order, for saturation! |
| 347 | const __m128i p1_q1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 |
| 348 | const __m128i q0_p0 = _mm_subs_epi8(*q0, *p0); // q0 - p0 |
| 349 | const __m128i s1 = _mm_adds_epi8(p1_q1, q0_p0); // p1 - q1 + 1 * (q0 - p0) |
| 350 | const __m128i s2 = _mm_adds_epi8(q0_p0, s1); // p1 - q1 + 2 * (q0 - p0) |
| 351 | const __m128i s3 = _mm_adds_epi8(q0_p0, s2); // p1 - q1 + 3 * (q0 - p0) |
| 352 | *delta = s3; |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 353 | } |
| 354 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 355 | // input and output are int8_t |
| 356 | static WEBP_INLINE void DoSimpleFilter(__m128i* const p0, __m128i* const q0, |
| 357 | const __m128i* const fl) { |
| 358 | const __m128i k3 = _mm_set1_epi8(3); |
| 359 | const __m128i k4 = _mm_set1_epi8(4); |
| 360 | __m128i v3 = _mm_adds_epi8(*fl, k3); |
| 361 | __m128i v4 = _mm_adds_epi8(*fl, k4); |
| 362 | |
| 363 | SignedShift8b(&v4); // v4 >> 3 |
| 364 | SignedShift8b(&v3); // v3 >> 3 |
| 365 | *q0 = _mm_subs_epi8(*q0, v4); // q0 -= v4 |
| 366 | *p0 = _mm_adds_epi8(*p0, v3); // p0 += v3 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 367 | } |
| 368 | |
| 369 | // Updates values of 2 pixels at MB edge during complex filtering. |
| 370 | // Update operations: |
Vikas Arora | 1e7bf88 | 2013-03-13 16:43:18 -0700 | [diff] [blame] | 371 | // q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)] |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 372 | // Pixels 'pi' and 'qi' are int8_t on input, uint8_t on output (sign flip). |
| 373 | static WEBP_INLINE void Update2Pixels(__m128i* const pi, __m128i* const qi, |
| 374 | const __m128i* const a0_lo, |
| 375 | const __m128i* const a0_hi) { |
| 376 | const __m128i a1_lo = _mm_srai_epi16(*a0_lo, 7); |
| 377 | const __m128i a1_hi = _mm_srai_epi16(*a0_hi, 7); |
| 378 | const __m128i delta = _mm_packs_epi16(a1_lo, a1_hi); |
| 379 | const __m128i sign_bit = _mm_set1_epi8(0x80); |
| 380 | *pi = _mm_adds_epi8(*pi, delta); |
| 381 | *qi = _mm_subs_epi8(*qi, delta); |
| 382 | FLIP_SIGN_BIT2(*pi, *qi); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 383 | } |
| 384 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 385 | // input pixels are uint8_t |
| 386 | static WEBP_INLINE void NeedsFilter(const __m128i* const p1, |
| 387 | const __m128i* const p0, |
| 388 | const __m128i* const q0, |
| 389 | const __m128i* const q1, |
| 390 | int thresh, __m128i* const mask) { |
| 391 | const __m128i m_thresh = _mm_set1_epi8(thresh); |
| 392 | const __m128i t1 = MM_ABS(*p1, *q1); // abs(p1 - q1) |
| 393 | const __m128i kFE = _mm_set1_epi8(0xFE); |
| 394 | const __m128i t2 = _mm_and_si128(t1, kFE); // set lsb of each byte to zero |
| 395 | const __m128i t3 = _mm_srli_epi16(t2, 1); // abs(p1 - q1) / 2 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 396 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 397 | const __m128i t4 = MM_ABS(*p0, *q0); // abs(p0 - q0) |
| 398 | const __m128i t5 = _mm_adds_epu8(t4, t4); // abs(p0 - q0) * 2 |
| 399 | const __m128i t6 = _mm_adds_epu8(t5, t3); // abs(p0-q0)*2 + abs(p1-q1)/2 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 400 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 401 | const __m128i t7 = _mm_subs_epu8(t6, m_thresh); // mask <= m_thresh |
| 402 | *mask = _mm_cmpeq_epi8(t7, _mm_setzero_si128()); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 403 | } |
| 404 | |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 405 | //------------------------------------------------------------------------------ |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 406 | // Edge filtering functions |
| 407 | |
| 408 | // Applies filter on 2 pixels (p0 and q0) |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 409 | static WEBP_INLINE void DoFilter2(__m128i* const p1, __m128i* const p0, |
| 410 | __m128i* const q0, __m128i* const q1, |
| 411 | int thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 412 | __m128i a, mask; |
| 413 | const __m128i sign_bit = _mm_set1_epi8(0x80); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 414 | // convert p1/q1 to int8_t (for GetBaseDelta) |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 415 | const __m128i p1s = _mm_xor_si128(*p1, sign_bit); |
| 416 | const __m128i q1s = _mm_xor_si128(*q1, sign_bit); |
| 417 | |
| 418 | NeedsFilter(p1, p0, q0, q1, thresh, &mask); |
| 419 | |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 420 | FLIP_SIGN_BIT2(*p0, *q0); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 421 | GetBaseDelta(&p1s, p0, q0, &q1s, &a); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 422 | a = _mm_and_si128(a, mask); // mask filter values we don't care about |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 423 | DoSimpleFilter(p0, q0, &a); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 424 | FLIP_SIGN_BIT2(*p0, *q0); |
| 425 | } |
| 426 | |
| 427 | // Applies filter on 4 pixels (p1, p0, q0 and q1) |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 428 | static WEBP_INLINE void DoFilter4(__m128i* const p1, __m128i* const p0, |
| 429 | __m128i* const q0, __m128i* const q1, |
| 430 | const __m128i* const mask, int hev_thresh) { |
| 431 | const __m128i sign_bit = _mm_set1_epi8(0x80); |
| 432 | const __m128i k64 = _mm_set1_epi8(0x40); |
| 433 | const __m128i zero = _mm_setzero_si128(); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 434 | __m128i not_hev; |
| 435 | __m128i t1, t2, t3; |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 436 | |
| 437 | // compute hev mask |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 438 | GetNotHEV(p1, p0, q0, q1, hev_thresh, ¬_hev); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 439 | |
| 440 | // convert to signed values |
| 441 | FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); |
| 442 | |
| 443 | t1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 |
| 444 | t1 = _mm_andnot_si128(not_hev, t1); // hev(p1 - q1) |
| 445 | t2 = _mm_subs_epi8(*q0, *p0); // q0 - p0 |
| 446 | t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 1 * (q0 - p0) |
| 447 | t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 2 * (q0 - p0) |
| 448 | t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 3 * (q0 - p0) |
| 449 | t1 = _mm_and_si128(t1, *mask); // mask filter values we don't care about |
| 450 | |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 451 | t2 = _mm_set1_epi8(3); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 452 | t3 = _mm_set1_epi8(4); |
| 453 | t2 = _mm_adds_epi8(t1, t2); // 3 * (q0 - p0) + (p1 - q1) + 3 |
| 454 | t3 = _mm_adds_epi8(t1, t3); // 3 * (q0 - p0) + (p1 - q1) + 4 |
| 455 | SignedShift8b(&t2); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3 |
| 456 | SignedShift8b(&t3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 457 | *p0 = _mm_adds_epi8(*p0, t2); // p0 += t2 |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 458 | *q0 = _mm_subs_epi8(*q0, t3); // q0 -= t3 |
| 459 | FLIP_SIGN_BIT2(*p0, *q0); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 460 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 461 | // this is equivalent to signed (a + 1) >> 1 calculation |
| 462 | t2 = _mm_add_epi8(t3, sign_bit); |
| 463 | t3 = _mm_avg_epu8(t2, zero); |
| 464 | t3 = _mm_sub_epi8(t3, k64); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 465 | |
| 466 | t3 = _mm_and_si128(not_hev, t3); // if !hev |
| 467 | *q1 = _mm_subs_epi8(*q1, t3); // q1 -= t3 |
| 468 | *p1 = _mm_adds_epi8(*p1, t3); // p1 += t3 |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 469 | FLIP_SIGN_BIT2(*p1, *q1); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 470 | } |
| 471 | |
| 472 | // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2) |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 473 | static WEBP_INLINE void DoFilter6(__m128i* const p2, __m128i* const p1, |
| 474 | __m128i* const p0, __m128i* const q0, |
| 475 | __m128i* const q1, __m128i* const q2, |
| 476 | const __m128i* const mask, int hev_thresh) { |
| 477 | const __m128i zero = _mm_setzero_si128(); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 478 | const __m128i sign_bit = _mm_set1_epi8(0x80); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 479 | __m128i a, not_hev; |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 480 | |
| 481 | // compute hev mask |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 482 | GetNotHEV(p1, p0, q0, q1, hev_thresh, ¬_hev); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 483 | |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 484 | FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); |
| 485 | FLIP_SIGN_BIT2(*p2, *q2); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 486 | GetBaseDelta(p1, p0, q0, q1, &a); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 487 | |
| 488 | { // do simple filter on pixels with hev |
| 489 | const __m128i m = _mm_andnot_si128(not_hev, *mask); |
| 490 | const __m128i f = _mm_and_si128(a, m); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 491 | DoSimpleFilter(p0, q0, &f); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 492 | } |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 493 | |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 494 | { // do strong filter on pixels with not hev |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 495 | const __m128i k9 = _mm_set1_epi16(0x0900); |
| 496 | const __m128i k63 = _mm_set1_epi16(63); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 497 | |
| 498 | const __m128i m = _mm_and_si128(not_hev, *mask); |
| 499 | const __m128i f = _mm_and_si128(a, m); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 500 | |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 501 | const __m128i f_lo = _mm_unpacklo_epi8(zero, f); |
| 502 | const __m128i f_hi = _mm_unpackhi_epi8(zero, f); |
| 503 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 504 | const __m128i f9_lo = _mm_mulhi_epi16(f_lo, k9); // Filter (lo) * 9 |
| 505 | const __m128i f9_hi = _mm_mulhi_epi16(f_hi, k9); // Filter (hi) * 9 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 506 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 507 | const __m128i a2_lo = _mm_add_epi16(f9_lo, k63); // Filter * 9 + 63 |
| 508 | const __m128i a2_hi = _mm_add_epi16(f9_hi, k63); // Filter * 9 + 63 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 509 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 510 | const __m128i a1_lo = _mm_add_epi16(a2_lo, f9_lo); // Filter * 18 + 63 |
| 511 | const __m128i a1_hi = _mm_add_epi16(a2_hi, f9_hi); // Filter * 18 + 63 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 512 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 513 | const __m128i a0_lo = _mm_add_epi16(a1_lo, f9_lo); // Filter * 27 + 63 |
| 514 | const __m128i a0_hi = _mm_add_epi16(a1_hi, f9_hi); // Filter * 27 + 63 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 515 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 516 | Update2Pixels(p2, q2, &a2_lo, &a2_hi); |
| 517 | Update2Pixels(p1, q1, &a1_lo, &a1_hi); |
| 518 | Update2Pixels(p0, q0, &a0_lo, &a0_hi); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 519 | } |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | // reads 8 rows across a vertical edge. |
| 523 | // |
| 524 | // TODO(somnath): Investigate _mm_shuffle* also see if it can be broken into |
| 525 | // two Load4x4() to avoid code duplication. |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 526 | static WEBP_INLINE void Load8x4(const uint8_t* const b, int stride, |
| 527 | __m128i* const p, __m128i* const q) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 528 | __m128i t1, t2; |
| 529 | |
| 530 | // Load 0th, 1st, 4th and 5th rows |
| 531 | __m128i r0 = _mm_cvtsi32_si128(*((int*)&b[0 * stride])); // 03 02 01 00 |
| 532 | __m128i r1 = _mm_cvtsi32_si128(*((int*)&b[1 * stride])); // 13 12 11 10 |
| 533 | __m128i r4 = _mm_cvtsi32_si128(*((int*)&b[4 * stride])); // 43 42 41 40 |
| 534 | __m128i r5 = _mm_cvtsi32_si128(*((int*)&b[5 * stride])); // 53 52 51 50 |
| 535 | |
| 536 | r0 = _mm_unpacklo_epi32(r0, r4); // 43 42 41 40 03 02 01 00 |
| 537 | r1 = _mm_unpacklo_epi32(r1, r5); // 53 52 51 50 13 12 11 10 |
| 538 | |
| 539 | // t1 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00 |
| 540 | t1 = _mm_unpacklo_epi8(r0, r1); |
| 541 | |
| 542 | // Load 2nd, 3rd, 6th and 7th rows |
| 543 | r0 = _mm_cvtsi32_si128(*((int*)&b[2 * stride])); // 23 22 21 22 |
| 544 | r1 = _mm_cvtsi32_si128(*((int*)&b[3 * stride])); // 33 32 31 30 |
| 545 | r4 = _mm_cvtsi32_si128(*((int*)&b[6 * stride])); // 63 62 61 60 |
| 546 | r5 = _mm_cvtsi32_si128(*((int*)&b[7 * stride])); // 73 72 71 70 |
| 547 | |
| 548 | r0 = _mm_unpacklo_epi32(r0, r4); // 63 62 61 60 23 22 21 20 |
| 549 | r1 = _mm_unpacklo_epi32(r1, r5); // 73 72 71 70 33 32 31 30 |
| 550 | |
| 551 | // t2 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20 |
| 552 | t2 = _mm_unpacklo_epi8(r0, r1); |
| 553 | |
| 554 | // t1 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00 |
| 555 | // t2 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40 |
| 556 | r0 = t1; |
| 557 | t1 = _mm_unpacklo_epi16(t1, t2); |
| 558 | t2 = _mm_unpackhi_epi16(r0, t2); |
| 559 | |
| 560 | // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 |
| 561 | // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 |
| 562 | *p = _mm_unpacklo_epi32(t1, t2); |
| 563 | *q = _mm_unpackhi_epi32(t1, t2); |
| 564 | } |
| 565 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 566 | static WEBP_INLINE void Load16x4(const uint8_t* const r0, |
| 567 | const uint8_t* const r8, |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 568 | int stride, |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 569 | __m128i* const p1, __m128i* const p0, |
| 570 | __m128i* const q0, __m128i* const q1) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 571 | __m128i t1, t2; |
| 572 | // Assume the pixels around the edge (|) are numbered as follows |
| 573 | // 00 01 | 02 03 |
| 574 | // 10 11 | 12 13 |
| 575 | // ... | ... |
| 576 | // e0 e1 | e2 e3 |
| 577 | // f0 f1 | f2 f3 |
| 578 | // |
| 579 | // r0 is pointing to the 0th row (00) |
| 580 | // r8 is pointing to the 8th row (80) |
| 581 | |
| 582 | // Load |
| 583 | // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 |
| 584 | // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 |
| 585 | // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80 |
| 586 | // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82 |
| 587 | Load8x4(r0, stride, p1, q0); |
| 588 | Load8x4(r8, stride, p0, q1); |
| 589 | |
| 590 | t1 = *p1; |
| 591 | t2 = *q0; |
| 592 | // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00 |
| 593 | // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01 |
| 594 | // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02 |
| 595 | // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03 |
| 596 | *p1 = _mm_unpacklo_epi64(t1, *p0); |
| 597 | *p0 = _mm_unpackhi_epi64(t1, *p0); |
| 598 | *q0 = _mm_unpacklo_epi64(t2, *q1); |
| 599 | *q1 = _mm_unpackhi_epi64(t2, *q1); |
| 600 | } |
| 601 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 602 | static WEBP_INLINE void Store4x4(__m128i* const x, uint8_t* dst, int stride) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 603 | int i; |
| 604 | for (i = 0; i < 4; ++i, dst += stride) { |
| 605 | *((int32_t*)dst) = _mm_cvtsi128_si32(*x); |
| 606 | *x = _mm_srli_si128(*x, 4); |
| 607 | } |
| 608 | } |
| 609 | |
| 610 | // Transpose back and store |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 611 | static WEBP_INLINE void Store16x4(const __m128i* const p1, |
| 612 | const __m128i* const p0, |
| 613 | const __m128i* const q0, |
| 614 | const __m128i* const q1, |
| 615 | uint8_t* r0, uint8_t* r8, |
| 616 | int stride) { |
| 617 | __m128i t1, p1_s, p0_s, q0_s, q1_s; |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 618 | |
| 619 | // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00 |
| 620 | // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80 |
| 621 | t1 = *p0; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 622 | p0_s = _mm_unpacklo_epi8(*p1, t1); |
| 623 | p1_s = _mm_unpackhi_epi8(*p1, t1); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 624 | |
| 625 | // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02 |
| 626 | // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82 |
| 627 | t1 = *q0; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 628 | q0_s = _mm_unpacklo_epi8(t1, *q1); |
| 629 | q1_s = _mm_unpackhi_epi8(t1, *q1); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 630 | |
| 631 | // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00 |
| 632 | // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40 |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 633 | t1 = p0_s; |
| 634 | p0_s = _mm_unpacklo_epi16(t1, q0_s); |
| 635 | q0_s = _mm_unpackhi_epi16(t1, q0_s); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 636 | |
| 637 | // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80 |
| 638 | // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0 |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 639 | t1 = p1_s; |
| 640 | p1_s = _mm_unpacklo_epi16(t1, q1_s); |
| 641 | q1_s = _mm_unpackhi_epi16(t1, q1_s); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 642 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 643 | Store4x4(&p0_s, r0, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 644 | r0 += 4 * stride; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 645 | Store4x4(&q0_s, r0, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 646 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 647 | Store4x4(&p1_s, r8, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 648 | r8 += 4 * stride; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 649 | Store4x4(&q1_s, r8, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 650 | } |
| 651 | |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 652 | //------------------------------------------------------------------------------ |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 653 | // Simple In-loop filtering (Paragraph 15.2) |
| 654 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 655 | static void SimpleVFilter16(uint8_t* p, int stride, int thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 656 | // Load |
| 657 | __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]); |
| 658 | __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]); |
| 659 | __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]); |
| 660 | __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]); |
| 661 | |
| 662 | DoFilter2(&p1, &p0, &q0, &q1, thresh); |
| 663 | |
| 664 | // Store |
| 665 | _mm_storeu_si128((__m128i*)&p[-stride], p0); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 666 | _mm_storeu_si128((__m128i*)&p[0], q0); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 667 | } |
| 668 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 669 | static void SimpleHFilter16(uint8_t* p, int stride, int thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 670 | __m128i p1, p0, q0, q1; |
| 671 | |
| 672 | p -= 2; // beginning of p1 |
| 673 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 674 | Load16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 675 | DoFilter2(&p1, &p0, &q0, &q1, thresh); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 676 | Store16x4(&p1, &p0, &q0, &q1, p, p + 8 * stride, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 677 | } |
| 678 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 679 | static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 680 | int k; |
| 681 | for (k = 3; k > 0; --k) { |
| 682 | p += 4 * stride; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 683 | SimpleVFilter16(p, stride, thresh); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 684 | } |
| 685 | } |
| 686 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 687 | static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 688 | int k; |
| 689 | for (k = 3; k > 0; --k) { |
| 690 | p += 4; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 691 | SimpleHFilter16(p, stride, thresh); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 692 | } |
| 693 | } |
| 694 | |
Vikas Arora | a241572 | 2012-08-09 16:18:58 -0700 | [diff] [blame] | 695 | //------------------------------------------------------------------------------ |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 696 | // Complex In-loop filtering (Paragraph 15.3) |
| 697 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 698 | #define MAX_DIFF1(p3, p2, p1, p0, m) do { \ |
| 699 | m = MM_ABS(p1, p0); \ |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 700 | m = _mm_max_epu8(m, MM_ABS(p3, p2)); \ |
| 701 | m = _mm_max_epu8(m, MM_ABS(p2, p1)); \ |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 702 | } while (0) |
| 703 | |
| 704 | #define MAX_DIFF2(p3, p2, p1, p0, m) do { \ |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 705 | m = _mm_max_epu8(m, MM_ABS(p1, p0)); \ |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 706 | m = _mm_max_epu8(m, MM_ABS(p3, p2)); \ |
| 707 | m = _mm_max_epu8(m, MM_ABS(p2, p1)); \ |
| 708 | } while (0) |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 709 | |
| 710 | #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \ |
| 711 | e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]); \ |
| 712 | e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]); \ |
| 713 | e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]); \ |
| 714 | e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]); \ |
| 715 | } |
| 716 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 717 | #define LOADUV_H_EDGE(p, u, v, stride) do { \ |
| 718 | const __m128i U = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \ |
| 719 | const __m128i V = _mm_loadl_epi64((__m128i*)&(v)[(stride)]); \ |
| 720 | p = _mm_unpacklo_epi64(U, V); \ |
| 721 | } while (0) |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 722 | |
| 723 | #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \ |
| 724 | LOADUV_H_EDGE(e1, u, v, 0 * stride); \ |
| 725 | LOADUV_H_EDGE(e2, u, v, 1 * stride); \ |
| 726 | LOADUV_H_EDGE(e3, u, v, 2 * stride); \ |
| 727 | LOADUV_H_EDGE(e4, u, v, 3 * stride); \ |
| 728 | } |
| 729 | |
| 730 | #define STOREUV(p, u, v, stride) { \ |
| 731 | _mm_storel_epi64((__m128i*)&u[(stride)], p); \ |
| 732 | p = _mm_srli_si128(p, 8); \ |
| 733 | _mm_storel_epi64((__m128i*)&v[(stride)], p); \ |
| 734 | } |
| 735 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 736 | static WEBP_INLINE void ComplexMask(const __m128i* const p1, |
| 737 | const __m128i* const p0, |
| 738 | const __m128i* const q0, |
| 739 | const __m128i* const q1, |
| 740 | int thresh, int ithresh, |
| 741 | __m128i* const mask) { |
| 742 | const __m128i it = _mm_set1_epi8(ithresh); |
| 743 | const __m128i diff = _mm_subs_epu8(*mask, it); |
| 744 | const __m128i thresh_mask = _mm_cmpeq_epi8(diff, _mm_setzero_si128()); |
| 745 | __m128i filter_mask; |
| 746 | NeedsFilter(p1, p0, q0, q1, thresh, &filter_mask); |
| 747 | *mask = _mm_and_si128(thresh_mask, filter_mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | // on macroblock edges |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 751 | static void VFilter16(uint8_t* p, int stride, |
| 752 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 753 | __m128i t1; |
| 754 | __m128i mask; |
| 755 | __m128i p2, p1, p0, q0, q1, q2; |
| 756 | |
| 757 | // Load p3, p2, p1, p0 |
| 758 | LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0); |
| 759 | MAX_DIFF1(t1, p2, p1, p0, mask); |
| 760 | |
| 761 | // Load q0, q1, q2, q3 |
| 762 | LOAD_H_EDGES4(p, stride, q0, q1, q2, t1); |
| 763 | MAX_DIFF2(t1, q2, q1, q0, mask); |
| 764 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 765 | ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 766 | DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); |
| 767 | |
| 768 | // Store |
| 769 | _mm_storeu_si128((__m128i*)&p[-3 * stride], p2); |
| 770 | _mm_storeu_si128((__m128i*)&p[-2 * stride], p1); |
| 771 | _mm_storeu_si128((__m128i*)&p[-1 * stride], p0); |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 772 | _mm_storeu_si128((__m128i*)&p[+0 * stride], q0); |
| 773 | _mm_storeu_si128((__m128i*)&p[+1 * stride], q1); |
| 774 | _mm_storeu_si128((__m128i*)&p[+2 * stride], q2); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 775 | } |
| 776 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 777 | static void HFilter16(uint8_t* p, int stride, |
| 778 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 779 | __m128i mask; |
| 780 | __m128i p3, p2, p1, p0, q0, q1, q2, q3; |
| 781 | |
| 782 | uint8_t* const b = p - 4; |
| 783 | Load16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0 |
| 784 | MAX_DIFF1(p3, p2, p1, p0, mask); |
| 785 | |
| 786 | Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3 |
| 787 | MAX_DIFF2(q3, q2, q1, q0, mask); |
| 788 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 789 | ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 790 | DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); |
| 791 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 792 | Store16x4(&p3, &p2, &p1, &p0, b, b + 8 * stride, stride); |
| 793 | Store16x4(&q0, &q1, &q2, &q3, p, p + 8 * stride, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | // on three inner edges |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 797 | static void VFilter16i(uint8_t* p, int stride, |
| 798 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 799 | int k; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 800 | __m128i p3, p2, p1, p0; // loop invariants |
| 801 | |
| 802 | LOAD_H_EDGES4(p, stride, p3, p2, p1, p0); // prologue |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 803 | |
| 804 | for (k = 3; k > 0; --k) { |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 805 | __m128i mask, tmp1, tmp2; |
| 806 | uint8_t* const b = p + 2 * stride; // beginning of p1 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 807 | p += 4 * stride; |
| 808 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 809 | MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask |
| 810 | LOAD_H_EDGES4(p, stride, p3, p2, tmp1, tmp2); |
| 811 | MAX_DIFF2(p3, p2, tmp1, tmp2, mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 812 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 813 | // p3 and p2 are not just temporary variables here: they will be |
| 814 | // re-used for next span. And q2/q3 will become p1/p0 accordingly. |
| 815 | ComplexMask(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); |
| 816 | DoFilter4(&p1, &p0, &p3, &p2, &mask, hev_thresh); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 817 | |
| 818 | // Store |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 819 | _mm_storeu_si128((__m128i*)&b[0 * stride], p1); |
| 820 | _mm_storeu_si128((__m128i*)&b[1 * stride], p0); |
| 821 | _mm_storeu_si128((__m128i*)&b[2 * stride], p3); |
| 822 | _mm_storeu_si128((__m128i*)&b[3 * stride], p2); |
| 823 | |
| 824 | // rotate samples |
| 825 | p1 = tmp1; |
| 826 | p0 = tmp2; |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 827 | } |
| 828 | } |
| 829 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 830 | static void HFilter16i(uint8_t* p, int stride, |
| 831 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 832 | int k; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 833 | __m128i p3, p2, p1, p0; // loop invariants |
| 834 | |
| 835 | Load16x4(p, p + 8 * stride, stride, &p3, &p2, &p1, &p0); // prologue |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 836 | |
| 837 | for (k = 3; k > 0; --k) { |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 838 | __m128i mask, tmp1, tmp2; |
| 839 | uint8_t* const b = p + 2; // beginning of p1 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 840 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 841 | p += 4; // beginning of q0 (and next span) |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 842 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 843 | MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask |
| 844 | Load16x4(p, p + 8 * stride, stride, &p3, &p2, &tmp1, &tmp2); |
| 845 | MAX_DIFF2(p3, p2, tmp1, tmp2, mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 846 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 847 | ComplexMask(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); |
| 848 | DoFilter4(&p1, &p0, &p3, &p2, &mask, hev_thresh); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 849 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 850 | Store16x4(&p1, &p0, &p3, &p2, b, b + 8 * stride, stride); |
| 851 | |
| 852 | // rotate samples |
| 853 | p1 = tmp1; |
| 854 | p0 = tmp2; |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 855 | } |
| 856 | } |
| 857 | |
| 858 | // 8-pixels wide variant, for chroma filtering |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 859 | static void VFilter8(uint8_t* u, uint8_t* v, int stride, |
| 860 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 861 | __m128i mask; |
| 862 | __m128i t1, p2, p1, p0, q0, q1, q2; |
| 863 | |
| 864 | // Load p3, p2, p1, p0 |
| 865 | LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0); |
| 866 | MAX_DIFF1(t1, p2, p1, p0, mask); |
| 867 | |
| 868 | // Load q0, q1, q2, q3 |
| 869 | LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1); |
| 870 | MAX_DIFF2(t1, q2, q1, q0, mask); |
| 871 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 872 | ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 873 | DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); |
| 874 | |
| 875 | // Store |
| 876 | STOREUV(p2, u, v, -3 * stride); |
| 877 | STOREUV(p1, u, v, -2 * stride); |
| 878 | STOREUV(p0, u, v, -1 * stride); |
| 879 | STOREUV(q0, u, v, 0 * stride); |
| 880 | STOREUV(q1, u, v, 1 * stride); |
| 881 | STOREUV(q2, u, v, 2 * stride); |
| 882 | } |
| 883 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 884 | static void HFilter8(uint8_t* u, uint8_t* v, int stride, |
| 885 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 886 | __m128i mask; |
| 887 | __m128i p3, p2, p1, p0, q0, q1, q2, q3; |
| 888 | |
| 889 | uint8_t* const tu = u - 4; |
| 890 | uint8_t* const tv = v - 4; |
| 891 | Load16x4(tu, tv, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0 |
| 892 | MAX_DIFF1(p3, p2, p1, p0, mask); |
| 893 | |
| 894 | Load16x4(u, v, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3 |
| 895 | MAX_DIFF2(q3, q2, q1, q0, mask); |
| 896 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 897 | ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 898 | DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); |
| 899 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 900 | Store16x4(&p3, &p2, &p1, &p0, tu, tv, stride); |
| 901 | Store16x4(&q0, &q1, &q2, &q3, u, v, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 902 | } |
| 903 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 904 | static void VFilter8i(uint8_t* u, uint8_t* v, int stride, |
| 905 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 906 | __m128i mask; |
| 907 | __m128i t1, t2, p1, p0, q0, q1; |
| 908 | |
| 909 | // Load p3, p2, p1, p0 |
| 910 | LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0); |
| 911 | MAX_DIFF1(t2, t1, p1, p0, mask); |
| 912 | |
| 913 | u += 4 * stride; |
| 914 | v += 4 * stride; |
| 915 | |
| 916 | // Load q0, q1, q2, q3 |
| 917 | LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2); |
| 918 | MAX_DIFF2(t2, t1, q1, q0, mask); |
| 919 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 920 | ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 921 | DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); |
| 922 | |
| 923 | // Store |
| 924 | STOREUV(p1, u, v, -2 * stride); |
| 925 | STOREUV(p0, u, v, -1 * stride); |
| 926 | STOREUV(q0, u, v, 0 * stride); |
| 927 | STOREUV(q1, u, v, 1 * stride); |
| 928 | } |
| 929 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 930 | static void HFilter8i(uint8_t* u, uint8_t* v, int stride, |
| 931 | int thresh, int ithresh, int hev_thresh) { |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 932 | __m128i mask; |
| 933 | __m128i t1, t2, p1, p0, q0, q1; |
| 934 | Load16x4(u, v, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0 |
| 935 | MAX_DIFF1(t2, t1, p1, p0, mask); |
| 936 | |
| 937 | u += 4; // beginning of q0 |
| 938 | v += 4; |
| 939 | Load16x4(u, v, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3 |
| 940 | MAX_DIFF2(t2, t1, q1, q0, mask); |
| 941 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 942 | ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 943 | DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); |
| 944 | |
| 945 | u -= 2; // beginning of p1 |
| 946 | v -= 2; |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 947 | Store16x4(&p1, &p0, &q0, &q1, u, v, stride); |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 948 | } |
| 949 | |
Vikas Arora | 1e7bf88 | 2013-03-13 16:43:18 -0700 | [diff] [blame] | 950 | #endif // WEBP_USE_SSE2 |
| 951 | |
| 952 | //------------------------------------------------------------------------------ |
| 953 | // Entry point |
| 954 | |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 955 | extern void VP8DspInitSSE2(void); |
| 956 | |
| 957 | void VP8DspInitSSE2(void) { |
Vikas Arora | 1e7bf88 | 2013-03-13 16:43:18 -0700 | [diff] [blame] | 958 | #if defined(WEBP_USE_SSE2) |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 959 | VP8Transform = Transform; |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 960 | #if defined(USE_TRANSFORM_AC3) |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 961 | VP8TransformAC3 = TransformAC3; |
Vikas Arora | 8b72022 | 2014-01-02 16:48:02 -0800 | [diff] [blame] | 962 | #endif |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 963 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 964 | VP8VFilter16 = VFilter16; |
| 965 | VP8HFilter16 = HFilter16; |
| 966 | VP8VFilter8 = VFilter8; |
| 967 | VP8HFilter8 = HFilter8; |
| 968 | VP8VFilter16i = VFilter16i; |
| 969 | VP8HFilter16i = HFilter16i; |
| 970 | VP8VFilter8i = VFilter8i; |
| 971 | VP8HFilter8i = HFilter8i; |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 972 | |
Vikas Arora | af51b94 | 2014-08-28 10:51:12 -0700 | [diff] [blame] | 973 | VP8SimpleVFilter16 = SimpleVFilter16; |
| 974 | VP8SimpleHFilter16 = SimpleHFilter16; |
| 975 | VP8SimpleVFilter16i = SimpleVFilter16i; |
| 976 | VP8SimpleHFilter16i = SimpleHFilter16i; |
Vikas Arora | 1e7bf88 | 2013-03-13 16:43:18 -0700 | [diff] [blame] | 977 | #endif // WEBP_USE_SSE2 |
Vikas Arora | 4667279 | 2011-07-13 16:37:55 +0530 | [diff] [blame] | 978 | } |