robert.bradford | 10dd686 | 2014-11-05 06:59:34 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ |
| 3 | * instruction. |
| 4 | * |
| 5 | * A white paper describing this algorithm can be found at: |
| 6 | * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf |
| 7 | * |
| 8 | * Copyright (C) 2013 Intel Corporation. All rights reserved. |
| 9 | * Authors: |
| 10 | * Wajdi Feghali <wajdi.k.feghali@intel.com> |
| 11 | * Jim Guilford <james.guilford@intel.com> |
| 12 | * Vinodh Gopal <vinodh.gopal@intel.com> |
| 13 | * Erdinc Ozturk <erdinc.ozturk@intel.com> |
| 14 | * Jim Kukunas <james.t.kukunas@linux.intel.com> |
| 15 | * |
| 16 | * For conditions of distribution and use, see copyright notice in zlib.h |
| 17 | */ |
| 18 | |
| 19 | #include "deflate.h" |
| 20 | |
Noel Gordon | de5bab1 | 2020-04-22 03:02:47 +0000 | [diff] [blame] | 21 | #ifdef CRC32_SIMD_SSE42_PCLMUL |
| 22 | |
robert.bradford | 10dd686 | 2014-11-05 06:59:34 -0800 | [diff] [blame] | 23 | #include <inttypes.h> |
| 24 | #include <emmintrin.h> |
| 25 | #include <immintrin.h> |
| 26 | #include <wmmintrin.h> |
| 27 | |
| 28 | #define CRC_LOAD(s) \ |
| 29 | do { \ |
| 30 | __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\ |
| 31 | __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)s->crc0 + 1);\ |
| 32 | __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)s->crc0 + 2);\ |
| 33 | __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)s->crc0 + 3);\ |
| 34 | __m128i xmm_crc_part = _mm_loadu_si128((__m128i *)s->crc0 + 4); |
| 35 | |
| 36 | #define CRC_SAVE(s) \ |
| 37 | _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\ |
| 38 | _mm_storeu_si128((__m128i *)s->crc0 + 1, xmm_crc1);\ |
| 39 | _mm_storeu_si128((__m128i *)s->crc0 + 2, xmm_crc2);\ |
| 40 | _mm_storeu_si128((__m128i *)s->crc0 + 3, xmm_crc3);\ |
| 41 | _mm_storeu_si128((__m128i *)s->crc0 + 4, xmm_crc_part);\ |
| 42 | } while (0); |
| 43 | |
| 44 | ZLIB_INTERNAL void crc_fold_init(deflate_state *const s) |
| 45 | { |
| 46 | CRC_LOAD(s) |
| 47 | |
| 48 | xmm_crc0 = _mm_cvtsi32_si128(0x9db42487); |
| 49 | xmm_crc1 = _mm_setzero_si128(); |
| 50 | xmm_crc2 = _mm_setzero_si128(); |
| 51 | xmm_crc3 = _mm_setzero_si128(); |
| 52 | |
| 53 | CRC_SAVE(s) |
| 54 | |
| 55 | s->strm->adler = 0; |
| 56 | } |
| 57 | |
| 58 | local void fold_1(deflate_state *const s, |
| 59 | __m128i *xmm_crc0, __m128i *xmm_crc1, |
| 60 | __m128i *xmm_crc2, __m128i *xmm_crc3) |
| 61 | { |
| 62 | const __m128i xmm_fold4 = _mm_set_epi32( |
| 63 | 0x00000001, 0x54442bd4, |
| 64 | 0x00000001, 0xc6e41596); |
| 65 | |
| 66 | __m128i x_tmp3; |
| 67 | __m128 ps_crc0, ps_crc3, ps_res; |
| 68 | |
| 69 | x_tmp3 = *xmm_crc3; |
| 70 | |
| 71 | *xmm_crc3 = *xmm_crc0; |
| 72 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
| 73 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); |
| 74 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
| 75 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
| 76 | ps_res = _mm_xor_ps(ps_crc0, ps_crc3); |
| 77 | |
| 78 | *xmm_crc0 = *xmm_crc1; |
| 79 | *xmm_crc1 = *xmm_crc2; |
| 80 | *xmm_crc2 = x_tmp3; |
| 81 | *xmm_crc3 = _mm_castps_si128(ps_res); |
| 82 | } |
| 83 | |
| 84 | local void fold_2(deflate_state *const s, |
| 85 | __m128i *xmm_crc0, __m128i *xmm_crc1, |
| 86 | __m128i *xmm_crc2, __m128i *xmm_crc3) |
| 87 | { |
| 88 | const __m128i xmm_fold4 = _mm_set_epi32( |
| 89 | 0x00000001, 0x54442bd4, |
| 90 | 0x00000001, 0xc6e41596); |
| 91 | |
| 92 | __m128i x_tmp3, x_tmp2; |
| 93 | __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20; |
| 94 | |
| 95 | x_tmp3 = *xmm_crc3; |
| 96 | x_tmp2 = *xmm_crc2; |
| 97 | |
| 98 | *xmm_crc3 = *xmm_crc1; |
| 99 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); |
| 100 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); |
| 101 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
| 102 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
| 103 | ps_res31= _mm_xor_ps(ps_crc3, ps_crc1); |
| 104 | |
| 105 | *xmm_crc2 = *xmm_crc0; |
| 106 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
| 107 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); |
| 108 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
| 109 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
| 110 | ps_res20= _mm_xor_ps(ps_crc0, ps_crc2); |
| 111 | |
| 112 | *xmm_crc0 = x_tmp2; |
| 113 | *xmm_crc1 = x_tmp3; |
| 114 | *xmm_crc2 = _mm_castps_si128(ps_res20); |
| 115 | *xmm_crc3 = _mm_castps_si128(ps_res31); |
| 116 | } |
| 117 | |
| 118 | local void fold_3(deflate_state *const s, |
| 119 | __m128i *xmm_crc0, __m128i *xmm_crc1, |
| 120 | __m128i *xmm_crc2, __m128i *xmm_crc3) |
| 121 | { |
| 122 | const __m128i xmm_fold4 = _mm_set_epi32( |
| 123 | 0x00000001, 0x54442bd4, |
| 124 | 0x00000001, 0xc6e41596); |
| 125 | |
| 126 | __m128i x_tmp3; |
| 127 | __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10; |
| 128 | |
| 129 | x_tmp3 = *xmm_crc3; |
| 130 | |
| 131 | *xmm_crc3 = *xmm_crc2; |
| 132 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); |
| 133 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); |
| 134 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
| 135 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
| 136 | ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3); |
| 137 | |
| 138 | *xmm_crc2 = *xmm_crc1; |
| 139 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); |
| 140 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); |
| 141 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
| 142 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
| 143 | ps_res21= _mm_xor_ps(ps_crc1, ps_crc2); |
| 144 | |
| 145 | *xmm_crc1 = *xmm_crc0; |
| 146 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
| 147 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10); |
| 148 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
| 149 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
| 150 | ps_res10= _mm_xor_ps(ps_crc0, ps_crc1); |
| 151 | |
| 152 | *xmm_crc0 = x_tmp3; |
| 153 | *xmm_crc1 = _mm_castps_si128(ps_res10); |
| 154 | *xmm_crc2 = _mm_castps_si128(ps_res21); |
| 155 | *xmm_crc3 = _mm_castps_si128(ps_res32); |
| 156 | } |
| 157 | |
| 158 | local void fold_4(deflate_state *const s, |
| 159 | __m128i *xmm_crc0, __m128i *xmm_crc1, |
| 160 | __m128i *xmm_crc2, __m128i *xmm_crc3) |
| 161 | { |
| 162 | const __m128i xmm_fold4 = _mm_set_epi32( |
| 163 | 0x00000001, 0x54442bd4, |
| 164 | 0x00000001, 0xc6e41596); |
| 165 | |
| 166 | __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3; |
| 167 | __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3; |
| 168 | __m128 ps_t0, ps_t1, ps_t2, ps_t3; |
| 169 | __m128 ps_res0, ps_res1, ps_res2, ps_res3; |
| 170 | |
| 171 | x_tmp0 = *xmm_crc0; |
| 172 | x_tmp1 = *xmm_crc1; |
| 173 | x_tmp2 = *xmm_crc2; |
| 174 | x_tmp3 = *xmm_crc3; |
| 175 | |
| 176 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
| 177 | x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10); |
| 178 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
| 179 | ps_t0 = _mm_castsi128_ps(x_tmp0); |
| 180 | ps_res0 = _mm_xor_ps(ps_crc0, ps_t0); |
| 181 | |
| 182 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); |
| 183 | x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10); |
| 184 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
| 185 | ps_t1 = _mm_castsi128_ps(x_tmp1); |
| 186 | ps_res1 = _mm_xor_ps(ps_crc1, ps_t1); |
| 187 | |
| 188 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); |
| 189 | x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10); |
| 190 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
| 191 | ps_t2 = _mm_castsi128_ps(x_tmp2); |
| 192 | ps_res2 = _mm_xor_ps(ps_crc2, ps_t2); |
| 193 | |
| 194 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01); |
| 195 | x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10); |
| 196 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
| 197 | ps_t3 = _mm_castsi128_ps(x_tmp3); |
| 198 | ps_res3 = _mm_xor_ps(ps_crc3, ps_t3); |
| 199 | |
| 200 | *xmm_crc0 = _mm_castps_si128(ps_res0); |
| 201 | *xmm_crc1 = _mm_castps_si128(ps_res1); |
| 202 | *xmm_crc2 = _mm_castps_si128(ps_res2); |
| 203 | *xmm_crc3 = _mm_castps_si128(ps_res3); |
| 204 | } |
| 205 | |
| 206 | local const unsigned zalign(32) pshufb_shf_table[60] = { |
| 207 | 0x84838281,0x88878685,0x8c8b8a89,0x008f8e8d, /* shl 15 (16 - 1)/shr1 */ |
| 208 | 0x85848382,0x89888786,0x8d8c8b8a,0x01008f8e, /* shl 14 (16 - 3)/shr2 */ |
| 209 | 0x86858483,0x8a898887,0x8e8d8c8b,0x0201008f, /* shl 13 (16 - 4)/shr3 */ |
| 210 | 0x87868584,0x8b8a8988,0x8f8e8d8c,0x03020100, /* shl 12 (16 - 4)/shr4 */ |
| 211 | 0x88878685,0x8c8b8a89,0x008f8e8d,0x04030201, /* shl 11 (16 - 5)/shr5 */ |
| 212 | 0x89888786,0x8d8c8b8a,0x01008f8e,0x05040302, /* shl 10 (16 - 6)/shr6 */ |
| 213 | 0x8a898887,0x8e8d8c8b,0x0201008f,0x06050403, /* shl 9 (16 - 7)/shr7 */ |
| 214 | 0x8b8a8988,0x8f8e8d8c,0x03020100,0x07060504, /* shl 8 (16 - 8)/shr8 */ |
| 215 | 0x8c8b8a89,0x008f8e8d,0x04030201,0x08070605, /* shl 7 (16 - 9)/shr9 */ |
| 216 | 0x8d8c8b8a,0x01008f8e,0x05040302,0x09080706, /* shl 6 (16 -10)/shr10*/ |
| 217 | 0x8e8d8c8b,0x0201008f,0x06050403,0x0a090807, /* shl 5 (16 -11)/shr11*/ |
| 218 | 0x8f8e8d8c,0x03020100,0x07060504,0x0b0a0908, /* shl 4 (16 -12)/shr12*/ |
| 219 | 0x008f8e8d,0x04030201,0x08070605,0x0c0b0a09, /* shl 3 (16 -13)/shr13*/ |
| 220 | 0x01008f8e,0x05040302,0x09080706,0x0d0c0b0a, /* shl 2 (16 -14)/shr14*/ |
| 221 | 0x0201008f,0x06050403,0x0a090807,0x0e0d0c0b /* shl 1 (16 -15)/shr15*/ |
| 222 | }; |
| 223 | |
| 224 | local void partial_fold(deflate_state *const s, const size_t len, |
| 225 | __m128i *xmm_crc0, __m128i *xmm_crc1, |
| 226 | __m128i *xmm_crc2, __m128i *xmm_crc3, |
| 227 | __m128i *xmm_crc_part) |
| 228 | { |
| 229 | |
| 230 | const __m128i xmm_fold4 = _mm_set_epi32( |
| 231 | 0x00000001, 0x54442bd4, |
| 232 | 0x00000001, 0xc6e41596); |
| 233 | const __m128i xmm_mask3 = _mm_set1_epi32(0x80808080); |
| 234 | |
| 235 | __m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3; |
| 236 | __m128i xmm_a0_0, xmm_a0_1; |
| 237 | __m128 ps_crc3, psa0_0, psa0_1, ps_res; |
| 238 | |
| 239 | xmm_shl = _mm_load_si128((__m128i *)pshufb_shf_table + (len - 1)); |
| 240 | xmm_shr = xmm_shl; |
| 241 | xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3); |
| 242 | |
| 243 | xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl); |
| 244 | |
| 245 | *xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr); |
| 246 | xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl); |
| 247 | *xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1); |
| 248 | |
| 249 | *xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr); |
| 250 | xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl); |
| 251 | *xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2); |
| 252 | |
| 253 | *xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr); |
| 254 | xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl); |
| 255 | *xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3); |
| 256 | |
| 257 | *xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr); |
| 258 | *xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl); |
| 259 | *xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part); |
| 260 | |
| 261 | xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10); |
| 262 | xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01); |
| 263 | |
| 264 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
| 265 | psa0_0 = _mm_castsi128_ps(xmm_a0_0); |
| 266 | psa0_1 = _mm_castsi128_ps(xmm_a0_1); |
| 267 | |
| 268 | ps_res = _mm_xor_ps(ps_crc3, psa0_0); |
| 269 | ps_res = _mm_xor_ps(ps_res, psa0_1); |
| 270 | |
| 271 | *xmm_crc3 = _mm_castps_si128(ps_res); |
| 272 | } |
| 273 | |
| 274 | ZLIB_INTERNAL void crc_fold_copy(deflate_state *const s, |
| 275 | unsigned char *dst, const unsigned char *src, long len) |
| 276 | { |
| 277 | unsigned long algn_diff; |
| 278 | __m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3; |
| 279 | |
| 280 | CRC_LOAD(s) |
| 281 | |
| 282 | if (len < 16) { |
| 283 | if (len == 0) |
| 284 | return; |
| 285 | goto partial; |
| 286 | } |
| 287 | |
brucedawson | eb7fac2 | 2015-10-07 11:46:07 -0700 | [diff] [blame] | 288 | algn_diff = 0 - (uintptr_t)src & 0xF; |
robert.bradford | 10dd686 | 2014-11-05 06:59:34 -0800 | [diff] [blame] | 289 | if (algn_diff) { |
| 290 | xmm_crc_part = _mm_loadu_si128((__m128i *)src); |
| 291 | _mm_storeu_si128((__m128i *)dst, xmm_crc_part); |
| 292 | |
| 293 | dst += algn_diff; |
| 294 | src += algn_diff; |
| 295 | len -= algn_diff; |
| 296 | |
| 297 | partial_fold(s, algn_diff, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, |
| 298 | &xmm_crc_part); |
| 299 | } |
| 300 | |
| 301 | while ((len -= 64) >= 0) { |
| 302 | xmm_t0 = _mm_load_si128((__m128i *)src); |
| 303 | xmm_t1 = _mm_load_si128((__m128i *)src + 1); |
| 304 | xmm_t2 = _mm_load_si128((__m128i *)src + 2); |
| 305 | xmm_t3 = _mm_load_si128((__m128i *)src + 3); |
| 306 | |
| 307 | fold_4(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
| 308 | |
| 309 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
| 310 | _mm_storeu_si128((__m128i *)dst + 1, xmm_t1); |
| 311 | _mm_storeu_si128((__m128i *)dst + 2, xmm_t2); |
| 312 | _mm_storeu_si128((__m128i *)dst + 3, xmm_t3); |
| 313 | |
| 314 | xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0); |
| 315 | xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1); |
| 316 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2); |
| 317 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3); |
| 318 | |
| 319 | src += 64; |
| 320 | dst += 64; |
| 321 | } |
| 322 | |
| 323 | /* |
| 324 | * len = num bytes left - 64 |
| 325 | */ |
| 326 | if (len + 16 >= 0) { |
| 327 | len += 16; |
| 328 | |
| 329 | xmm_t0 = _mm_load_si128((__m128i *)src); |
| 330 | xmm_t1 = _mm_load_si128((__m128i *)src + 1); |
| 331 | xmm_t2 = _mm_load_si128((__m128i *)src + 2); |
| 332 | |
| 333 | fold_3(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
| 334 | |
| 335 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
| 336 | _mm_storeu_si128((__m128i *)dst + 1, xmm_t1); |
| 337 | _mm_storeu_si128((__m128i *)dst + 2, xmm_t2); |
| 338 | |
| 339 | xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0); |
| 340 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1); |
| 341 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2); |
| 342 | |
| 343 | if (len == 0) |
| 344 | goto done; |
| 345 | |
| 346 | dst += 48; |
| 347 | src += 48; |
| 348 | } else if (len + 32 >= 0) { |
| 349 | len += 32; |
| 350 | |
| 351 | xmm_t0 = _mm_load_si128((__m128i *)src); |
| 352 | xmm_t1 = _mm_load_si128((__m128i *)src + 1); |
| 353 | |
| 354 | fold_2(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
| 355 | |
| 356 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
| 357 | _mm_storeu_si128((__m128i *)dst + 1, xmm_t1); |
| 358 | |
| 359 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0); |
| 360 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1); |
| 361 | |
| 362 | if (len == 0) |
| 363 | goto done; |
| 364 | |
| 365 | dst += 32; |
| 366 | src += 32; |
| 367 | } else if (len + 48 >= 0) { |
| 368 | len += 48; |
| 369 | |
| 370 | xmm_t0 = _mm_load_si128((__m128i *)src); |
| 371 | |
| 372 | fold_1(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
| 373 | |
| 374 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
| 375 | |
| 376 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0); |
| 377 | |
| 378 | if (len == 0) |
| 379 | goto done; |
| 380 | |
| 381 | dst += 16; |
| 382 | src += 16; |
| 383 | } else { |
| 384 | len += 64; |
| 385 | if (len == 0) |
| 386 | goto done; |
| 387 | } |
| 388 | |
| 389 | partial: |
| 390 | |
| 391 | #if defined(_MSC_VER) |
| 392 | /* VS does not permit the use of _mm_set_epi64x in 32-bit builds */ |
| 393 | { |
| 394 | int32_t parts[4] = {0, 0, 0, 0}; |
| 395 | memcpy(&parts, src, len); |
| 396 | xmm_crc_part = _mm_set_epi32(parts[3], parts[2], parts[1], parts[0]); |
| 397 | } |
| 398 | #else |
| 399 | { |
| 400 | int64_t parts[2] = {0, 0}; |
| 401 | memcpy(&parts, src, len); |
| 402 | xmm_crc_part = _mm_set_epi64x(parts[1], parts[0]); |
| 403 | } |
| 404 | #endif |
| 405 | |
| 406 | _mm_storeu_si128((__m128i *)dst, xmm_crc_part); |
| 407 | partial_fold(s, len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, |
| 408 | &xmm_crc_part); |
| 409 | done: |
| 410 | CRC_SAVE(s) |
| 411 | } |
| 412 | |
| 413 | local const unsigned zalign(16) crc_k[] = { |
| 414 | 0xccaa009e, 0x00000000, /* rk1 */ |
| 415 | 0x751997d0, 0x00000001, /* rk2 */ |
| 416 | 0xccaa009e, 0x00000000, /* rk5 */ |
| 417 | 0x63cd6124, 0x00000001, /* rk6 */ |
| 418 | 0xf7011640, 0x00000001, /* rk7 */ |
| 419 | 0xdb710640, 0x00000001 /* rk8 */ |
| 420 | }; |
| 421 | |
| 422 | local const unsigned zalign(16) crc_mask[4] = { |
| 423 | 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 |
| 424 | }; |
| 425 | |
| 426 | local const unsigned zalign(16) crc_mask2[4] = { |
| 427 | 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF |
| 428 | }; |
| 429 | |
| 430 | unsigned ZLIB_INTERNAL crc_fold_512to32(deflate_state *const s) |
| 431 | { |
| 432 | const __m128i xmm_mask = _mm_load_si128((__m128i *)crc_mask); |
| 433 | const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2); |
| 434 | |
| 435 | unsigned crc; |
| 436 | __m128i x_tmp0, x_tmp1, x_tmp2, crc_fold; |
| 437 | |
| 438 | CRC_LOAD(s) |
| 439 | |
| 440 | /* |
| 441 | * k1 |
| 442 | */ |
| 443 | crc_fold = _mm_load_si128((__m128i *)crc_k); |
| 444 | |
| 445 | x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10); |
| 446 | xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01); |
| 447 | xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0); |
| 448 | xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0); |
| 449 | |
| 450 | x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10); |
| 451 | xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01); |
| 452 | xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1); |
| 453 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1); |
| 454 | |
| 455 | x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10); |
| 456 | xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01); |
| 457 | xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2); |
| 458 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); |
| 459 | |
| 460 | /* |
| 461 | * k5 |
| 462 | */ |
| 463 | crc_fold = _mm_load_si128((__m128i *)crc_k + 1); |
| 464 | |
| 465 | xmm_crc0 = xmm_crc3; |
| 466 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0); |
| 467 | xmm_crc0 = _mm_srli_si128(xmm_crc0, 8); |
| 468 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0); |
| 469 | |
| 470 | xmm_crc0 = xmm_crc3; |
| 471 | xmm_crc3 = _mm_slli_si128(xmm_crc3, 4); |
| 472 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10); |
| 473 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0); |
| 474 | xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2); |
| 475 | |
| 476 | /* |
| 477 | * k7 |
| 478 | */ |
| 479 | xmm_crc1 = xmm_crc3; |
| 480 | xmm_crc2 = xmm_crc3; |
| 481 | crc_fold = _mm_load_si128((__m128i *)crc_k + 2); |
| 482 | |
| 483 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0); |
| 484 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); |
| 485 | xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask); |
| 486 | |
| 487 | xmm_crc2 = xmm_crc3; |
| 488 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10); |
| 489 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); |
| 490 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1); |
| 491 | |
| 492 | crc = _mm_extract_epi32(xmm_crc3, 2); |
| 493 | return ~crc; |
| 494 | CRC_SAVE(s) |
| 495 | } |
Noel Gordon | de5bab1 | 2020-04-22 03:02:47 +0000 | [diff] [blame] | 496 | |
| 497 | #endif /* CRC32_SIMD_SSE42_PCLMUL */ |