blob: 033c075543ca555528b008b2f9f2dc1f286d65fd [file] [log] [blame]
Nick Terrellac58c8d2020-03-26 15:19:05 -07001/* ******************************************************************
2 * Common functions of New Generation Entropy library
Nick Terrell66e811d2021-01-04 17:53:52 -05003 * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc.
Nick Terrellac58c8d2020-03-26 15:19:05 -07004 *
5 * You can contact the author at :
6 * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
7 * - Public forum : https://groups.google.com/forum/#!forum/lz4c
8 *
9 * This source code is licensed under both the BSD-style license (found in the
10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11 * in the COPYING file in the root directory of this source tree).
12 * You may select, at your option, one of the above-listed licenses.
13****************************************************************** */
inikep63ecd742016-05-13 11:27:56 +020014
15/* *************************************
16* Dependencies
17***************************************/
inikep63ecd742016-05-13 11:27:56 +020018#include "mem.h"
Yann Colleta91ca622016-06-05 01:33:55 +020019#include "error_private.h" /* ERR_*, ERROR */
Yann Colletd0e2cd12016-06-05 00:58:01 +020020#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
Yann Collet38b75dd2016-07-24 15:35:59 +020021#include "fse.h"
Yann Colleta91ca622016-06-05 01:33:55 +020022#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
Yann Collet38b75dd2016-07-24 15:35:59 +020023#include "huf.h"
inikep63ecd742016-05-13 11:27:56 +020024
25
Yann Collet1f2c95c2017-03-05 21:07:20 -080026/*=== Version ===*/
Yann Collet45960372017-02-15 12:00:03 -080027unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
28
29
Yann Collet1f2c95c2017-03-05 21:07:20 -080030/*=== Error Management ===*/
inikep63ecd742016-05-13 11:27:56 +020031unsigned FSE_isError(size_t code) { return ERR_isError(code); }
inikep63ecd742016-05-13 11:27:56 +020032const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
33
inikep63ecd742016-05-13 11:27:56 +020034unsigned HUF_isError(size_t code) { return ERR_isError(code); }
inikep63ecd742016-05-13 11:27:56 +020035const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
36
37
38/*-**************************************************************
39* FSE NCount encoding-decoding
40****************************************************************/
Nick Terrell8f8bd2d2020-08-18 16:57:35 -070041static U32 FSE_ctz(U32 val)
42{
43 assert(val != 0);
44 {
45# if defined(_MSC_VER) /* Visual */
46 unsigned long r=0;
47 return _BitScanForward(&r, val) ? (unsigned)r : 0;
48# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
49 return __builtin_ctz(val);
50# elif defined(__ICCARM__) /* IAR Intrinsic */
51 return __CTZ(val);
52# else /* Software version */
53 U32 count = 0;
54 while ((val & 1) == 0) {
55 val >>= 1;
56 ++count;
57 }
58 return count;
59# endif
60 }
61}
62
Nick Terrell612e9472020-08-17 13:44:49 -070063FORCE_INLINE_TEMPLATE
64size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
65 const void* headerBuffer, size_t hbSize)
inikep63ecd742016-05-13 11:27:56 +020066{
67 const BYTE* const istart = (const BYTE*) headerBuffer;
68 const BYTE* const iend = istart + hbSize;
69 const BYTE* ip = istart;
70 int nbBits;
71 int remaining;
72 int threshold;
73 U32 bitStream;
74 int bitCount;
75 unsigned charnum = 0;
Nick Terrell6004c112020-08-14 15:28:59 -070076 unsigned const maxSV1 = *maxSVPtr + 1;
inikep63ecd742016-05-13 11:27:56 +020077 int previous0 = 0;
78
Nick Terrell8f8bd2d2020-08-18 16:57:35 -070079 if (hbSize < 8) {
Nick Terrell8def0e52020-08-24 12:24:45 -070080 /* This function only works when hbSize >= 8 */
Nick Terrell8f8bd2d2020-08-18 16:57:35 -070081 char buffer[8] = {0};
Nick Terrellc465f242020-08-10 12:46:38 -070082 ZSTD_memcpy(buffer, headerBuffer, hbSize);
Nick Terrellf2d09242018-05-23 14:58:58 -070083 { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
84 buffer, sizeof(buffer));
85 if (FSE_isError(countSize)) return countSize;
86 if (countSize > hbSize) return ERROR(corruption_detected);
87 return countSize;
88 } }
Nick Terrell8def0e52020-08-24 12:24:45 -070089 assert(hbSize >= 8);
Nick Terrella97e9a62018-05-23 12:16:00 -070090
Yann Colletff773bf2018-06-26 17:24:41 -070091 /* init */
Nick Terrellc465f242020-08-10 12:46:38 -070092 ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
inikep63ecd742016-05-13 11:27:56 +020093 bitStream = MEM_readLE32(ip);
94 nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
95 if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
96 bitStream >>= 4;
97 bitCount = 4;
98 *tableLogPtr = nbBits;
99 remaining = (1<<nbBits)+1;
100 threshold = 1<<nbBits;
101 nbBits++;
102
Nick Terrell6004c112020-08-14 15:28:59 -0700103 for (;;) {
inikep63ecd742016-05-13 11:27:56 +0200104 if (previous0) {
Nick Terrell8def0e52020-08-24 12:24:45 -0700105 /* Count the number of repeats. Each time the
106 * 2-bit repeat code is 0b11 there is another
107 * repeat.
108 * Avoid UB by setting the high bit to 1.
109 */
Nick Terrell8f8bd2d2020-08-18 16:57:35 -0700110 int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
Nick Terrell6004c112020-08-14 15:28:59 -0700111 while (repeats >= 12) {
112 charnum += 3 * 12;
Nick Terrell41936382020-08-25 11:37:41 -0700113 if (LIKELY(ip <= iend-7)) {
Nick Terrell6004c112020-08-14 15:28:59 -0700114 ip += 3;
inikep63ecd742016-05-13 11:27:56 +0200115 } else {
Nick Terrell41936382020-08-25 11:37:41 -0700116 bitCount -= (int)(8 * (iend - 7 - ip));
117 bitCount &= 31;
118 ip = iend - 4;
Nick Terrell6004c112020-08-14 15:28:59 -0700119 }
Nick Terrell41936382020-08-25 11:37:41 -0700120 bitStream = MEM_readLE32(ip) >> bitCount;
Nick Terrell8f8bd2d2020-08-18 16:57:35 -0700121 repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
inikep63ecd742016-05-13 11:27:56 +0200122 }
Nick Terrell6004c112020-08-14 15:28:59 -0700123 charnum += 3 * repeats;
124 bitStream >>= 2 * repeats;
125 bitCount += 2 * repeats;
126
Nick Terrell8def0e52020-08-24 12:24:45 -0700127 /* Add the final repeat which isn't 0b11. */
Nick Terrell41936382020-08-25 11:37:41 -0700128 assert((bitStream & 3) < 3);
Nick Terrell6004c112020-08-14 15:28:59 -0700129 charnum += bitStream & 3;
inikep63ecd742016-05-13 11:27:56 +0200130 bitCount += 2;
Nick Terrell6004c112020-08-14 15:28:59 -0700131
132 /* This is an error, but break and return an error
133 * at the end, because returning out of a loop makes
134 * it harder for the compiler to optimize.
135 */
136 if (charnum >= maxSV1) break;
137
138 /* We don't need to set the normalized count to 0
139 * because we already memset the whole buffer to 0.
140 */
141
Nick Terrell41936382020-08-25 11:37:41 -0700142 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
Nick Terrella97e9a62018-05-23 12:16:00 -0700143 assert((bitCount >> 3) <= 3); /* For first condition to work */
inikep63ecd742016-05-13 11:27:56 +0200144 ip += bitCount>>3;
145 bitCount &= 7;
Yann Collet38b75dd2016-07-24 15:35:59 +0200146 } else {
Nick Terrell41936382020-08-25 11:37:41 -0700147 bitCount -= (int)(8 * (iend - 4 - ip));
148 bitCount &= 31;
149 ip = iend - 4;
Nick Terrell6004c112020-08-14 15:28:59 -0700150 }
Nick Terrell41936382020-08-25 11:37:41 -0700151 bitStream = MEM_readLE32(ip) >> bitCount;
Nick Terrell6004c112020-08-14 15:28:59 -0700152 }
153 {
154 int const max = (2*threshold-1) - remaining;
Yann Collet45960372017-02-15 12:00:03 -0800155 int count;
inikep63ecd742016-05-13 11:27:56 +0200156
157 if ((bitStream & (threshold-1)) < (U32)max) {
Yann Collet45960372017-02-15 12:00:03 -0800158 count = bitStream & (threshold-1);
159 bitCount += nbBits-1;
inikep63ecd742016-05-13 11:27:56 +0200160 } else {
Yann Collet45960372017-02-15 12:00:03 -0800161 count = bitStream & (2*threshold-1);
inikep63ecd742016-05-13 11:27:56 +0200162 if (count >= threshold) count -= max;
Yann Collet45960372017-02-15 12:00:03 -0800163 bitCount += nbBits;
inikep63ecd742016-05-13 11:27:56 +0200164 }
165
166 count--; /* extra accuracy */
Nick Terrell6004c112020-08-14 15:28:59 -0700167 /* When it matters (small blocks), this is a
168 * predictable branch, because we don't use -1.
169 */
170 if (count >= 0) {
171 remaining -= count;
172 } else {
173 assert(count == -1);
174 remaining += count;
175 }
Yann Collet45960372017-02-15 12:00:03 -0800176 normalizedCounter[charnum++] = (short)count;
inikep63ecd742016-05-13 11:27:56 +0200177 previous0 = !count;
inikep63ecd742016-05-13 11:27:56 +0200178
Nick Terrell6004c112020-08-14 15:28:59 -0700179 assert(threshold > 1);
180 if (remaining < threshold) {
181 /* This branch can be folded into the
182 * threshold update condition because we
183 * know that threshold > 1.
184 */
185 if (remaining <= 1) break;
186 nbBits = BIT_highbit32(remaining) + 1;
187 threshold = 1 << (nbBits - 1);
188 }
189 if (charnum >= maxSV1) break;
190
Nick Terrell41936382020-08-25 11:37:41 -0700191 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
inikep63ecd742016-05-13 11:27:56 +0200192 ip += bitCount>>3;
193 bitCount &= 7;
194 } else {
195 bitCount -= (int)(8 * (iend - 4 - ip));
Nick Terrell41936382020-08-25 11:37:41 -0700196 bitCount &= 31;
inikep63ecd742016-05-13 11:27:56 +0200197 ip = iend - 4;
198 }
Nick Terrell41936382020-08-25 11:37:41 -0700199 bitStream = MEM_readLE32(ip) >> bitCount;
Nick Terrell6004c112020-08-14 15:28:59 -0700200 } }
Yann Collet38b75dd2016-07-24 15:35:59 +0200201 if (remaining != 1) return ERROR(corruption_detected);
Nick Terrell6004c112020-08-14 15:28:59 -0700202 /* Only possible when there are too many zeros. */
203 if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
Yann Colletcbc5e9d2016-07-24 18:02:04 +0200204 if (bitCount > 32) return ERROR(corruption_detected);
inikep63ecd742016-05-13 11:27:56 +0200205 *maxSVPtr = charnum-1;
206
207 ip += (bitCount+7)>>3;
inikep63ecd742016-05-13 11:27:56 +0200208 return ip-istart;
209}
Yann Colleta91ca622016-06-05 01:33:55 +0200210
Nick Terrell6d2f7502020-08-24 14:44:33 -0700211/* Avoids the FORCE_INLINE of the _body() function. */
Nick Terrell612e9472020-08-17 13:44:49 -0700212static size_t FSE_readNCount_body_default(
213 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
214 const void* headerBuffer, size_t hbSize)
215{
216 return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
217}
218
219#if DYNAMIC_BMI2
220TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
221 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
222 const void* headerBuffer, size_t hbSize)
223{
224 return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
225}
226#endif
227
228size_t FSE_readNCount_bmi2(
229 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
230 const void* headerBuffer, size_t hbSize, int bmi2)
231{
232#if DYNAMIC_BMI2
233 if (bmi2) {
234 return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
235 }
236#endif
237 (void)bmi2;
238 return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
239}
240
241size_t FSE_readNCount(
242 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
243 const void* headerBuffer, size_t hbSize)
244{
245 return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
246}
247
248
Yann Colleta91ca622016-06-05 01:33:55 +0200249/*! HUF_readStats() :
250 Read compact Huffman tree, saved by HUF_writeCTable().
251 `huffWeight` is destination buffer.
Yann Colletb89af202016-12-01 18:24:59 -0800252 `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
Yann Colleta91ca622016-06-05 01:33:55 +0200253 @return : size read from `src` , or an error Code .
Yann Collet38b75dd2016-07-24 15:35:59 +0200254 Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
Yann Colleta91ca622016-06-05 01:33:55 +0200255*/
256size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
257 U32* nbSymbolsPtr, U32* tableLogPtr,
258 const void* src, size_t srcSize)
259{
Nick Terrellba1fd172020-08-16 22:22:33 -0700260 U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
Nick Terrell612e9472020-08-17 13:44:49 -0700261 return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
Nick Terrellba1fd172020-08-16 22:22:33 -0700262}
263
Yann Collet68c14bd2020-12-04 16:33:39 -0800264FORCE_INLINE_TEMPLATE size_t
265HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
266 U32* nbSymbolsPtr, U32* tableLogPtr,
267 const void* src, size_t srcSize,
268 void* workSpace, size_t wkspSize,
269 int bmi2)
Nick Terrellba1fd172020-08-16 22:22:33 -0700270{
Yann Colleta91ca622016-06-05 01:33:55 +0200271 U32 weightTotal;
272 const BYTE* ip = (const BYTE*) src;
Nick Terrellccfcc642016-10-17 11:28:02 -0700273 size_t iSize;
Yann Colleta91ca622016-06-05 01:33:55 +0200274 size_t oSize;
275
Nick Terrellccfcc642016-10-17 11:28:02 -0700276 if (!srcSize) return ERROR(srcSize_wrong);
277 iSize = ip[0];
Nick Terrellc465f242020-08-10 12:46:38 -0700278 /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
Yann Colleta91ca622016-06-05 01:33:55 +0200279
Yann Collet7ed5e332016-07-24 14:26:11 +0200280 if (iSize >= 128) { /* special header */
Yann Collet38b75dd2016-07-24 15:35:59 +0200281 oSize = iSize - 127;
282 iSize = ((oSize+1)/2);
283 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
284 if (oSize >= hwSize) return ERROR(corruption_detected);
285 ip += 1;
286 { U32 n;
287 for (n=0; n<oSize; n+=2) {
288 huffWeight[n] = ip[n/2] >> 4;
289 huffWeight[n+1] = ip[n/2] & 15;
290 } } }
Yann Colleta91ca622016-06-05 01:33:55 +0200291 else { /* header compressed with FSE (normal case) */
292 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
Yann Collet68c14bd2020-12-04 16:33:39 -0800293 /* max (hwSize-1) values decoded, as last one is implied */
294 oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
Yann Colleta91ca622016-06-05 01:33:55 +0200295 if (FSE_isError(oSize)) return oSize;
296 }
297
298 /* collect weight stats */
Nick Terrellc465f242020-08-10 12:46:38 -0700299 ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
Yann Colleta91ca622016-06-05 01:33:55 +0200300 weightTotal = 0;
301 { U32 n; for (n=0; n<oSize; n++) {
Yann Colletb89af202016-12-01 18:24:59 -0800302 if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
Yann Colleta91ca622016-06-05 01:33:55 +0200303 rankStats[huffWeight[n]]++;
304 weightTotal += (1 << huffWeight[n]) >> 1;
305 } }
Nick Terrelld7605292016-10-19 11:19:54 -0700306 if (weightTotal == 0) return ERROR(corruption_detected);
Yann Colleta91ca622016-06-05 01:33:55 +0200307
308 /* get last non-null symbol weight (implied, total must be 2^n) */
309 { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
Yann Colletb89af202016-12-01 18:24:59 -0800310 if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
Yann Colleta91ca622016-06-05 01:33:55 +0200311 *tableLogPtr = tableLog;
312 /* determine last weight */
313 { U32 const total = 1 << tableLog;
314 U32 const rest = total - weightTotal;
315 U32 const verif = 1 << BIT_highbit32(rest);
316 U32 const lastWeight = BIT_highbit32(rest) + 1;
317 if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
318 huffWeight[oSize] = (BYTE)lastWeight;
319 rankStats[lastWeight]++;
320 } }
321
322 /* check tree construction validity */
323 if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
324
325 /* results */
326 *nbSymbolsPtr = (U32)(oSize+1);
327 return iSize+1;
328}
Nick Terrell612e9472020-08-17 13:44:49 -0700329
Nick Terrell6d2f7502020-08-24 14:44:33 -0700330/* Avoids the FORCE_INLINE of the _body() function. */
Nick Terrell612e9472020-08-17 13:44:49 -0700331static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
332 U32* nbSymbolsPtr, U32* tableLogPtr,
333 const void* src, size_t srcSize,
334 void* workSpace, size_t wkspSize)
335{
336 return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
337}
338
339#if DYNAMIC_BMI2
340static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
341 U32* nbSymbolsPtr, U32* tableLogPtr,
342 const void* src, size_t srcSize,
343 void* workSpace, size_t wkspSize)
344{
345 return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
346}
347#endif
348
349size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
350 U32* nbSymbolsPtr, U32* tableLogPtr,
351 const void* src, size_t srcSize,
352 void* workSpace, size_t wkspSize,
353 int bmi2)
354{
355#if DYNAMIC_BMI2
356 if (bmi2) {
357 return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
358 }
359#endif
360 (void)bmi2;
361 return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
362}