Nick Terrell | ac58c8d | 2020-03-26 15:19:05 -0700 | [diff] [blame] | 1 | /* ****************************************************************** |
| 2 | * Common functions of New Generation Entropy library |
| 3 | * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. |
| 4 | * |
| 5 | * You can contact the author at : |
| 6 | * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| 7 | * - Public forum : https://groups.google.com/forum/#!forum/lz4c |
| 8 | * |
| 9 | * This source code is licensed under both the BSD-style license (found in the |
| 10 | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
| 11 | * in the COPYING file in the root directory of this source tree). |
| 12 | * You may select, at your option, one of the above-listed licenses. |
| 13 | ****************************************************************** */ |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 14 | |
| 15 | /* ************************************* |
| 16 | * Dependencies |
| 17 | ***************************************/ |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 18 | #include "mem.h" |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 19 | #include "error_private.h" /* ERR_*, ERROR */ |
Yann Collet | d0e2cd1 | 2016-06-05 00:58:01 +0200 | [diff] [blame] | 20 | #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ |
Yann Collet | 38b75dd | 2016-07-24 15:35:59 +0200 | [diff] [blame] | 21 | #include "fse.h" |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 22 | #define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ |
Yann Collet | 38b75dd | 2016-07-24 15:35:59 +0200 | [diff] [blame] | 23 | #include "huf.h" |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 24 | |
| 25 | |
Yann Collet | 1f2c95c | 2017-03-05 21:07:20 -0800 | [diff] [blame] | 26 | /*=== Version ===*/ |
Yann Collet | 4596037 | 2017-02-15 12:00:03 -0800 | [diff] [blame] | 27 | unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } |
| 28 | |
| 29 | |
Yann Collet | 1f2c95c | 2017-03-05 21:07:20 -0800 | [diff] [blame] | 30 | /*=== Error Management ===*/ |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 31 | unsigned FSE_isError(size_t code) { return ERR_isError(code); } |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 32 | const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); } |
| 33 | |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 34 | unsigned HUF_isError(size_t code) { return ERR_isError(code); } |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 35 | const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } |
| 36 | |
| 37 | |
| 38 | /*-************************************************************** |
| 39 | * FSE NCount encoding-decoding |
| 40 | ****************************************************************/ |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 41 | size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, |
| 42 | const void* headerBuffer, size_t hbSize) |
| 43 | { |
| 44 | const BYTE* const istart = (const BYTE*) headerBuffer; |
| 45 | const BYTE* const iend = istart + hbSize; |
| 46 | const BYTE* ip = istart; |
| 47 | int nbBits; |
| 48 | int remaining; |
| 49 | int threshold; |
| 50 | U32 bitStream; |
| 51 | int bitCount; |
| 52 | unsigned charnum = 0; |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 53 | unsigned const maxSV1 = *maxSVPtr + 1; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 54 | int previous0 = 0; |
| 55 | |
Nick Terrell | a97e9a6 | 2018-05-23 12:16:00 -0700 | [diff] [blame] | 56 | if (hbSize < 4) { |
Nick Terrell | f2d0924 | 2018-05-23 14:58:58 -0700 | [diff] [blame] | 57 | /* This function only works when hbSize >= 4 */ |
Yann Collet | 21c273d | 2020-07-16 20:25:15 -0700 | [diff] [blame] | 58 | char buffer[4] = {0}; |
Nick Terrell | f2d0924 | 2018-05-23 14:58:58 -0700 | [diff] [blame] | 59 | memcpy(buffer, headerBuffer, hbSize); |
| 60 | { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, |
| 61 | buffer, sizeof(buffer)); |
| 62 | if (FSE_isError(countSize)) return countSize; |
| 63 | if (countSize > hbSize) return ERROR(corruption_detected); |
| 64 | return countSize; |
| 65 | } } |
Nick Terrell | c92dd11 | 2018-05-23 14:47:20 -0700 | [diff] [blame] | 66 | assert(hbSize >= 4); |
Nick Terrell | a97e9a6 | 2018-05-23 12:16:00 -0700 | [diff] [blame] | 67 | |
Yann Collet | ff773bf | 2018-06-26 17:24:41 -0700 | [diff] [blame] | 68 | /* init */ |
| 69 | memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */ |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 70 | bitStream = MEM_readLE32(ip); |
| 71 | nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ |
| 72 | if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); |
| 73 | bitStream >>= 4; |
| 74 | bitCount = 4; |
| 75 | *tableLogPtr = nbBits; |
| 76 | remaining = (1<<nbBits)+1; |
| 77 | threshold = 1<<nbBits; |
| 78 | nbBits++; |
| 79 | |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 80 | for (;;) { |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 81 | if (previous0) { |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 82 | // TODO: Generalize to FSE_countTrailingZeros() or something |
| 83 | int repeats = __builtin_ctz(~bitStream) >> 1; |
| 84 | while (repeats >= 12) { |
| 85 | charnum += 3 * 12; |
| 86 | if (ip < iend-6) { |
| 87 | ip += 3; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 88 | bitStream = MEM_readLE32(ip) >> bitCount; |
| 89 | } else { |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 90 | bitStream >>= 24; |
| 91 | bitCount += 24; |
| 92 | } |
| 93 | repeats = __builtin_ctz(~bitStream) >> 1; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 94 | } |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 95 | charnum += 3 * repeats; |
| 96 | bitStream >>= 2 * repeats; |
| 97 | bitCount += 2 * repeats; |
| 98 | |
| 99 | assert(bitCount < 30 && (bitStream & 3) != 3); |
| 100 | charnum += bitStream & 3; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 101 | bitCount += 2; |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 102 | |
| 103 | /* This is an error, but break and return an error |
| 104 | * at the end, because returning out of a loop makes |
| 105 | * it harder for the compiler to optimize. |
| 106 | */ |
| 107 | if (charnum >= maxSV1) break; |
| 108 | |
| 109 | /* We don't need to set the normalized count to 0 |
| 110 | * because we already memset the whole buffer to 0. |
| 111 | */ |
| 112 | |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 113 | if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { |
Nick Terrell | a97e9a6 | 2018-05-23 12:16:00 -0700 | [diff] [blame] | 114 | assert((bitCount >> 3) <= 3); /* For first condition to work */ |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 115 | ip += bitCount>>3; |
| 116 | bitCount &= 7; |
| 117 | bitStream = MEM_readLE32(ip) >> bitCount; |
Yann Collet | 38b75dd | 2016-07-24 15:35:59 +0200 | [diff] [blame] | 118 | } else { |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 119 | bitStream >>= 2; |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 120 | } |
| 121 | } |
| 122 | { |
| 123 | int const max = (2*threshold-1) - remaining; |
Yann Collet | 4596037 | 2017-02-15 12:00:03 -0800 | [diff] [blame] | 124 | int count; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 125 | |
| 126 | if ((bitStream & (threshold-1)) < (U32)max) { |
Yann Collet | 4596037 | 2017-02-15 12:00:03 -0800 | [diff] [blame] | 127 | count = bitStream & (threshold-1); |
| 128 | bitCount += nbBits-1; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 129 | } else { |
Yann Collet | 4596037 | 2017-02-15 12:00:03 -0800 | [diff] [blame] | 130 | count = bitStream & (2*threshold-1); |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 131 | if (count >= threshold) count -= max; |
Yann Collet | 4596037 | 2017-02-15 12:00:03 -0800 | [diff] [blame] | 132 | bitCount += nbBits; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | count--; /* extra accuracy */ |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 136 | /* When it matters (small blocks), this is a |
| 137 | * predictable branch, because we don't use -1. |
| 138 | */ |
| 139 | if (count >= 0) { |
| 140 | remaining -= count; |
| 141 | } else { |
| 142 | assert(count == -1); |
| 143 | remaining += count; |
| 144 | } |
Yann Collet | 4596037 | 2017-02-15 12:00:03 -0800 | [diff] [blame] | 145 | normalizedCounter[charnum++] = (short)count; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 146 | previous0 = !count; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 147 | |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 148 | assert(threshold > 1); |
| 149 | if (remaining < threshold) { |
| 150 | /* This branch can be folded into the |
| 151 | * threshold update condition because we |
| 152 | * know that threshold > 1. |
| 153 | */ |
| 154 | if (remaining <= 1) break; |
| 155 | nbBits = BIT_highbit32(remaining) + 1; |
| 156 | threshold = 1 << (nbBits - 1); |
| 157 | } |
| 158 | if (charnum >= maxSV1) break; |
| 159 | |
| 160 | if (LIKELY((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))) { |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 161 | ip += bitCount>>3; |
| 162 | bitCount &= 7; |
| 163 | } else { |
| 164 | bitCount -= (int)(8 * (iend - 4 - ip)); |
| 165 | ip = iend - 4; |
| 166 | } |
| 167 | bitStream = MEM_readLE32(ip) >> (bitCount & 31); |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 168 | } } |
Yann Collet | 38b75dd | 2016-07-24 15:35:59 +0200 | [diff] [blame] | 169 | if (remaining != 1) return ERROR(corruption_detected); |
Nick Terrell | 6004c11 | 2020-08-14 15:28:59 -0700 | [diff] [blame^] | 170 | /* Only possible when there are too many zeros. */ |
| 171 | if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall); |
Yann Collet | cbc5e9d | 2016-07-24 18:02:04 +0200 | [diff] [blame] | 172 | if (bitCount > 32) return ERROR(corruption_detected); |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 173 | *maxSVPtr = charnum-1; |
| 174 | |
| 175 | ip += (bitCount+7)>>3; |
inikep | 63ecd74 | 2016-05-13 11:27:56 +0200 | [diff] [blame] | 176 | return ip-istart; |
| 177 | } |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 178 | |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 179 | /*! HUF_readStats() : |
| 180 | Read compact Huffman tree, saved by HUF_writeCTable(). |
| 181 | `huffWeight` is destination buffer. |
Yann Collet | b89af20 | 2016-12-01 18:24:59 -0800 | [diff] [blame] | 182 | `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 183 | @return : size read from `src` , or an error Code . |
Yann Collet | 38b75dd | 2016-07-24 15:35:59 +0200 | [diff] [blame] | 184 | Note : Needed by HUF_readCTable() and HUF_readDTableX?() . |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 185 | */ |
| 186 | size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, |
| 187 | U32* nbSymbolsPtr, U32* tableLogPtr, |
| 188 | const void* src, size_t srcSize) |
| 189 | { |
| 190 | U32 weightTotal; |
| 191 | const BYTE* ip = (const BYTE*) src; |
Nick Terrell | ccfcc64 | 2016-10-17 11:28:02 -0700 | [diff] [blame] | 192 | size_t iSize; |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 193 | size_t oSize; |
| 194 | |
Nick Terrell | ccfcc64 | 2016-10-17 11:28:02 -0700 | [diff] [blame] | 195 | if (!srcSize) return ERROR(srcSize_wrong); |
| 196 | iSize = ip[0]; |
Yann Collet | 7ed5e33 | 2016-07-24 14:26:11 +0200 | [diff] [blame] | 197 | /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 198 | |
Yann Collet | 7ed5e33 | 2016-07-24 14:26:11 +0200 | [diff] [blame] | 199 | if (iSize >= 128) { /* special header */ |
Yann Collet | 38b75dd | 2016-07-24 15:35:59 +0200 | [diff] [blame] | 200 | oSize = iSize - 127; |
| 201 | iSize = ((oSize+1)/2); |
| 202 | if (iSize+1 > srcSize) return ERROR(srcSize_wrong); |
| 203 | if (oSize >= hwSize) return ERROR(corruption_detected); |
| 204 | ip += 1; |
| 205 | { U32 n; |
| 206 | for (n=0; n<oSize; n+=2) { |
| 207 | huffWeight[n] = ip[n/2] >> 4; |
| 208 | huffWeight[n+1] = ip[n/2] & 15; |
| 209 | } } } |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 210 | else { /* header compressed with FSE (normal case) */ |
Yann Collet | b89af20 | 2016-12-01 18:24:59 -0800 | [diff] [blame] | 211 | FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 212 | if (iSize+1 > srcSize) return ERROR(srcSize_wrong); |
Yann Collet | b89af20 | 2016-12-01 18:24:59 -0800 | [diff] [blame] | 213 | oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 214 | if (FSE_isError(oSize)) return oSize; |
| 215 | } |
| 216 | |
| 217 | /* collect weight stats */ |
Yann Collet | b89af20 | 2016-12-01 18:24:59 -0800 | [diff] [blame] | 218 | memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 219 | weightTotal = 0; |
| 220 | { U32 n; for (n=0; n<oSize; n++) { |
Yann Collet | b89af20 | 2016-12-01 18:24:59 -0800 | [diff] [blame] | 221 | if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected); |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 222 | rankStats[huffWeight[n]]++; |
| 223 | weightTotal += (1 << huffWeight[n]) >> 1; |
| 224 | } } |
Nick Terrell | d760529 | 2016-10-19 11:19:54 -0700 | [diff] [blame] | 225 | if (weightTotal == 0) return ERROR(corruption_detected); |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 226 | |
| 227 | /* get last non-null symbol weight (implied, total must be 2^n) */ |
| 228 | { U32 const tableLog = BIT_highbit32(weightTotal) + 1; |
Yann Collet | b89af20 | 2016-12-01 18:24:59 -0800 | [diff] [blame] | 229 | if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); |
Yann Collet | a91ca62 | 2016-06-05 01:33:55 +0200 | [diff] [blame] | 230 | *tableLogPtr = tableLog; |
| 231 | /* determine last weight */ |
| 232 | { U32 const total = 1 << tableLog; |
| 233 | U32 const rest = total - weightTotal; |
| 234 | U32 const verif = 1 << BIT_highbit32(rest); |
| 235 | U32 const lastWeight = BIT_highbit32(rest) + 1; |
| 236 | if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ |
| 237 | huffWeight[oSize] = (BYTE)lastWeight; |
| 238 | rankStats[lastWeight]++; |
| 239 | } } |
| 240 | |
| 241 | /* check tree construction validity */ |
| 242 | if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ |
| 243 | |
| 244 | /* results */ |
| 245 | *nbSymbolsPtr = (U32)(oSize+1); |
| 246 | return iSize+1; |
| 247 | } |