Yann Collet | 5be2dd2 | 2015-11-11 13:43:58 +0100 | [diff] [blame^] | 1 | /* |
| 2 | zstd - standard compression library |
| 3 | Copyright (C) 2014-2015, Yann Collet. |
| 4 | |
| 5 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| 6 | |
| 7 | Redistribution and use in source and binary forms, with or without |
| 8 | modification, are permitted provided that the following conditions are |
| 9 | met: |
| 10 | * Redistributions of source code must retain the above copyright |
| 11 | notice, this list of conditions and the following disclaimer. |
| 12 | * Redistributions in binary form must reproduce the above |
| 13 | copyright notice, this list of conditions and the following disclaimer |
| 14 | in the documentation and/or other materials provided with the |
| 15 | distribution. |
| 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | You can contact the author at : |
| 29 | - zstd source repository : https://github.com/Cyan4973/zstd |
| 30 | - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c |
| 31 | */ |
| 32 | |
| 33 | /* *************************************************************** |
| 34 | * Tuning parameters |
| 35 | *****************************************************************/ |
| 36 | /*! |
| 37 | * MEMORY_USAGE : |
| 38 | * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) |
| 39 | * Increasing memory usage improves compression ratio |
| 40 | * Reduced memory usage can improve speed, due to cache effect |
| 41 | */ |
| 42 | #define ZSTD_MEMORY_USAGE 16 |
| 43 | |
| 44 | /*! |
| 45 | * HEAPMODE : |
| 46 | * Select how default compression functions will allocate memory for their hash table, |
| 47 | * in memory stack (0, fastest), or in memory heap (1, requires malloc()) |
| 48 | * Note that compression context is fairly large, as a consequence heap memory is recommended. |
| 49 | */ |
| 50 | #ifndef ZSTD_HEAPMODE |
| 51 | # define ZSTD_HEAPMODE 1 |
| 52 | #endif /* ZSTD_HEAPMODE */ |
| 53 | |
| 54 | /*! |
| 55 | * LEGACY_SUPPORT : |
| 56 | * decompressor can decode older formats (starting from Zstd 0.1+) |
| 57 | */ |
| 58 | #ifndef ZSTD_LEGACY_SUPPORT |
| 59 | # define ZSTD_LEGACY_SUPPORT 1 |
| 60 | #endif |
| 61 | |
| 62 | |
| 63 | /* ******************************************************* |
| 64 | * Includes |
| 65 | *********************************************************/ |
| 66 | #include <stdlib.h> /* calloc */ |
| 67 | #include <string.h> /* memcpy, memmove */ |
| 68 | #include <stdio.h> /* debug : printf */ |
| 69 | #include "mem.h" /* low level memory routines */ |
| 70 | #include "zstd_static.h" |
| 71 | #include "zstd_internal.h" |
| 72 | #include "fse_static.h" |
| 73 | #include "huff0.h" |
| 74 | |
| 75 | #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1) |
| 76 | # include "zstd_legacy.h" |
| 77 | #endif |
| 78 | |
| 79 | |
| 80 | /* ******************************************************* |
| 81 | * Compiler specifics |
| 82 | *********************************************************/ |
| 83 | #ifdef __AVX2__ |
| 84 | # include <immintrin.h> /* AVX2 intrinsics */ |
| 85 | #endif |
| 86 | |
| 87 | #ifdef _MSC_VER /* Visual Studio */ |
| 88 | # define FORCE_INLINE static __forceinline |
| 89 | # include <intrin.h> /* For Visual 2005 */ |
| 90 | # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ |
| 91 | # pragma warning(disable : 4324) /* disable: C4324: padded structure */ |
| 92 | #else |
| 93 | # define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) |
| 94 | # ifdef __GNUC__ |
| 95 | # define FORCE_INLINE static inline __attribute__((always_inline)) |
| 96 | # else |
| 97 | # define FORCE_INLINE static inline |
| 98 | # endif |
| 99 | #endif |
| 100 | |
| 101 | |
| 102 | /* ******************************************************* |
| 103 | * Constants |
| 104 | *********************************************************/ |
| 105 | #define HASH_LOG (ZSTD_MEMORY_USAGE - 2) |
| 106 | #define HASH_TABLESIZE (1 << HASH_LOG) |
| 107 | #define HASH_MASK (HASH_TABLESIZE - 1) |
| 108 | |
| 109 | #define KNUTH 2654435761 |
| 110 | |
| 111 | #define BIT7 128 |
| 112 | #define BIT6 64 |
| 113 | #define BIT5 32 |
| 114 | #define BIT4 16 |
| 115 | #define BIT1 2 |
| 116 | #define BIT0 1 |
| 117 | |
| 118 | #define KB *(1 <<10) |
| 119 | #define MB *(1 <<20) |
| 120 | #define GB *(1U<<30) |
| 121 | |
| 122 | #define BLOCKSIZE (128 KB) /* define, for static allocation */ |
| 123 | #define IS_RAW BIT0 |
| 124 | #define IS_RLE BIT1 |
| 125 | |
| 126 | static const U32 g_maxDistance = 4 * BLOCKSIZE; |
| 127 | static const U32 g_maxLimit = 1 GB; |
| 128 | |
| 129 | #define WORKPLACESIZE (BLOCKSIZE*3) |
| 130 | #define MINMATCH 4 |
| 131 | #define LitFSELog 11 |
| 132 | #define MLFSELog 10 |
| 133 | #define LLFSELog 10 |
| 134 | #define OffFSELog 9 |
| 135 | #define MAX(a,b) ((a)<(b)?(b):(a)) |
| 136 | #define MaxSeq MAX(MaxLL, MaxML) |
| 137 | |
| 138 | #define LITERAL_NOENTROPY 63 |
| 139 | #define COMMAND_NOENTROPY 7 /* to remove */ |
| 140 | |
| 141 | static const size_t ZSTD_blockHeaderSize = 3; |
| 142 | static const size_t ZSTD_frameHeaderSize = 4; |
| 143 | |
| 144 | |
| 145 | /* ******************************************************* |
| 146 | * Memory operations |
| 147 | **********************************************************/ |
| 148 | static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } |
| 149 | |
| 150 | |
| 151 | /* ************************************** |
| 152 | * Local structures |
| 153 | ****************************************/ |
| 154 | void ZSTD_resetSeqStore(seqStore_t* ssPtr) |
| 155 | { |
| 156 | ssPtr->offset = ssPtr->offsetStart; |
| 157 | ssPtr->lit = ssPtr->litStart; |
| 158 | ssPtr->litLength = ssPtr->litLengthStart; |
| 159 | ssPtr->matchLength = ssPtr->matchLengthStart; |
| 160 | ssPtr->dumps = ssPtr->dumpsStart; |
| 161 | } |
| 162 | |
| 163 | |
| 164 | /* ************************************* |
| 165 | * Error Management |
| 166 | ***************************************/ |
| 167 | /*! ZSTD_isError |
| 168 | * tells if a return value is an error code */ |
| 169 | unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } |
| 170 | |
| 171 | /*! ZSTD_getErrorName |
| 172 | * provides error code string (useful for debugging) */ |
| 173 | const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } |
| 174 | |
| 175 | |
| 176 | /* ************************************* |
| 177 | * Tool functions |
| 178 | ***************************************/ |
| 179 | unsigned ZSTD_versionNumber (void) { return ZSTD_VERSION_NUMBER; } |
| 180 | |
| 181 | |
| 182 | /* ******************************************************* |
| 183 | * Compression |
| 184 | *********************************************************/ |
| 185 | size_t ZSTD_compressBound(size_t srcSize) /* maximum compressed size */ |
| 186 | { |
| 187 | return FSE_compressBound(srcSize) + 12; |
| 188 | } |
| 189 | |
| 190 | |
| 191 | size_t ZSTD_noCompressBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize) |
| 192 | { |
| 193 | BYTE* const ostart = (BYTE* const)dst; |
| 194 | |
| 195 | if (srcSize + ZSTD_blockHeaderSize > maxDstSize) return ERROR(dstSize_tooSmall); |
| 196 | memcpy(ostart + ZSTD_blockHeaderSize, src, srcSize); |
| 197 | |
| 198 | /* Build header */ |
| 199 | ostart[0] = (BYTE)(srcSize>>16); |
| 200 | ostart[1] = (BYTE)(srcSize>>8); |
| 201 | ostart[2] = (BYTE) srcSize; |
| 202 | ostart[0] += (BYTE)(bt_raw<<6); /* is a raw (uncompressed) block */ |
| 203 | |
| 204 | return ZSTD_blockHeaderSize+srcSize; |
| 205 | } |
| 206 | |
| 207 | |
| 208 | static size_t ZSTD_compressRawLiteralsBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize) |
| 209 | { |
| 210 | BYTE* const ostart = (BYTE* const)dst; |
| 211 | |
| 212 | if (srcSize + 3 > maxDstSize) return ERROR(dstSize_tooSmall); |
| 213 | |
| 214 | MEM_writeLE32(dst, ((U32)srcSize << 2) | IS_RAW); |
| 215 | memcpy(ostart + 3, src, srcSize); |
| 216 | return srcSize + 3; |
| 217 | } |
| 218 | |
| 219 | static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize) |
| 220 | { |
| 221 | BYTE* const ostart = (BYTE* const)dst; |
| 222 | |
| 223 | (void)maxDstSize; |
| 224 | MEM_writeLE32(dst, ((U32)srcSize << 2) | IS_RLE); /* note : maxDstSize > litHeaderSize > 4 */ |
| 225 | ostart[3] = *(const BYTE*)src; |
| 226 | return 4; |
| 227 | } |
| 228 | |
| 229 | size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 1; } |
| 230 | |
| 231 | static size_t ZSTD_compressLiterals (void* dst, size_t maxDstSize, |
| 232 | const void* src, size_t srcSize) |
| 233 | { |
| 234 | const size_t minGain = ZSTD_minGain(srcSize); |
| 235 | BYTE* const ostart = (BYTE*)dst; |
| 236 | size_t hsize; |
| 237 | static const size_t litHeaderSize = 5; |
| 238 | |
| 239 | if (maxDstSize < litHeaderSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ |
| 240 | |
| 241 | hsize = HUF_compress(ostart+litHeaderSize, maxDstSize-litHeaderSize, src, srcSize); |
| 242 | |
| 243 | if ((hsize==0) || (hsize >= srcSize - minGain)) return ZSTD_compressRawLiteralsBlock(dst, maxDstSize, src, srcSize); |
| 244 | if (hsize==1) return ZSTD_compressRleLiteralsBlock(dst, maxDstSize, src, srcSize); |
| 245 | |
| 246 | /* Build header */ |
| 247 | { |
| 248 | ostart[0] = (BYTE)(srcSize << 2); /* is a block, is compressed */ |
| 249 | ostart[1] = (BYTE)(srcSize >> 6); |
| 250 | ostart[2] = (BYTE)(srcSize >>14); |
| 251 | ostart[2] += (BYTE)(hsize << 5); |
| 252 | ostart[3] = (BYTE)(hsize >> 3); |
| 253 | ostart[4] = (BYTE)(hsize >>11); |
| 254 | } |
| 255 | |
| 256 | return hsize+litHeaderSize; |
| 257 | } |
| 258 | |
| 259 | |
| 260 | size_t ZSTD_compressSequences(BYTE* dst, size_t maxDstSize, |
| 261 | const seqStore_t* seqStorePtr, |
| 262 | size_t srcSize) |
| 263 | { |
| 264 | U32 count[MaxSeq+1]; |
| 265 | S16 norm[MaxSeq+1]; |
| 266 | size_t mostFrequent; |
| 267 | U32 max = 255; |
| 268 | U32 tableLog = 11; |
| 269 | U32 CTable_LitLength [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL )]; |
| 270 | U32 CTable_OffsetBits [FSE_CTABLE_SIZE_U32(OffFSELog,MaxOff)]; |
| 271 | U32 CTable_MatchLength[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML )]; |
| 272 | U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ |
| 273 | const BYTE* const op_lit_start = seqStorePtr->litStart; |
| 274 | const BYTE* const llTable = seqStorePtr->litLengthStart; |
| 275 | const BYTE* const llPtr = seqStorePtr->litLength; |
| 276 | const BYTE* const mlTable = seqStorePtr->matchLengthStart; |
| 277 | const U32* const offsetTable = seqStorePtr->offsetStart; |
| 278 | BYTE* const offCodeTable = seqStorePtr->offCodeStart; |
| 279 | BYTE* op = dst; |
| 280 | BYTE* const oend = dst + maxDstSize; |
| 281 | const size_t nbSeq = llPtr - llTable; |
| 282 | const size_t minGain = ZSTD_minGain(srcSize); |
| 283 | const size_t maxCSize = srcSize - minGain; |
| 284 | BYTE* seqHead; |
| 285 | |
| 286 | |
| 287 | /* Compress literals */ |
| 288 | { |
| 289 | size_t cSize; |
| 290 | size_t litSize = seqStorePtr->lit - op_lit_start; |
| 291 | |
| 292 | if (litSize <= LITERAL_NOENTROPY) |
| 293 | cSize = ZSTD_compressRawLiteralsBlock(op, maxDstSize, op_lit_start, litSize); |
| 294 | else |
| 295 | cSize = ZSTD_compressLiterals(op, maxDstSize, op_lit_start, litSize); |
| 296 | if (ZSTD_isError(cSize)) return cSize; |
| 297 | op += cSize; |
| 298 | } |
| 299 | |
| 300 | /* Sequences Header */ |
| 301 | if ((oend-op) < MIN_SEQUENCES_SIZE) |
| 302 | return ERROR(dstSize_tooSmall); |
| 303 | MEM_writeLE16(op, (U16)nbSeq); op+=2; |
| 304 | seqHead = op; |
| 305 | |
| 306 | /* dumps : contains too large lengths */ |
| 307 | { |
| 308 | size_t dumpsLength = seqStorePtr->dumps - seqStorePtr->dumpsStart; |
| 309 | if (dumpsLength < 512) |
| 310 | { |
| 311 | op[0] = (BYTE)(dumpsLength >> 8); |
| 312 | op[1] = (BYTE)(dumpsLength); |
| 313 | op += 2; |
| 314 | } |
| 315 | else |
| 316 | { |
| 317 | op[0] = 2; |
| 318 | op[1] = (BYTE)(dumpsLength>>8); |
| 319 | op[2] = (BYTE)(dumpsLength); |
| 320 | op += 3; |
| 321 | } |
| 322 | if ((size_t)(oend-op) < dumpsLength+6) return ERROR(dstSize_tooSmall); |
| 323 | memcpy(op, seqStorePtr->dumpsStart, dumpsLength); |
| 324 | op += dumpsLength; |
| 325 | } |
| 326 | |
| 327 | /* CTable for Literal Lengths */ |
| 328 | max = MaxLL; |
| 329 | mostFrequent = FSE_countFast(count, &max, seqStorePtr->litLengthStart, nbSeq); |
| 330 | if ((mostFrequent == nbSeq) && (nbSeq > 2)) |
| 331 | { |
| 332 | *op++ = *(seqStorePtr->litLengthStart); |
| 333 | FSE_buildCTable_rle(CTable_LitLength, (BYTE)max); |
| 334 | LLtype = bt_rle; |
| 335 | } |
| 336 | else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (LLbits-1)))) |
| 337 | { |
| 338 | FSE_buildCTable_raw(CTable_LitLength, LLbits); |
| 339 | LLtype = bt_raw; |
| 340 | } |
| 341 | else |
| 342 | { |
| 343 | size_t NCountSize; |
| 344 | tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max); |
| 345 | FSE_normalizeCount(norm, tableLog, count, nbSeq, max); |
| 346 | NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ |
| 347 | if (FSE_isError(NCountSize)) return ERROR(GENERIC); |
| 348 | op += NCountSize; |
| 349 | FSE_buildCTable(CTable_LitLength, norm, max, tableLog); |
| 350 | LLtype = bt_compressed; |
| 351 | } |
| 352 | |
| 353 | /* CTable for Offsets codes */ |
| 354 | { |
| 355 | /* create Offset codes */ |
| 356 | size_t i; |
| 357 | max = MaxOff; |
| 358 | for (i=0; i<nbSeq; i++) |
| 359 | { |
| 360 | offCodeTable[i] = (BYTE)ZSTD_highbit(offsetTable[i]) + 1; |
| 361 | if (offsetTable[i]==0) offCodeTable[i]=0; |
| 362 | } |
| 363 | mostFrequent = FSE_countFast(count, &max, offCodeTable, nbSeq); |
| 364 | } |
| 365 | if ((mostFrequent == nbSeq) && (nbSeq > 2)) |
| 366 | { |
| 367 | *op++ = *offCodeTable; |
| 368 | FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max); |
| 369 | Offtype = bt_rle; |
| 370 | } |
| 371 | else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (Offbits-1)))) |
| 372 | { |
| 373 | FSE_buildCTable_raw(CTable_OffsetBits, Offbits); |
| 374 | Offtype = bt_raw; |
| 375 | } |
| 376 | else |
| 377 | { |
| 378 | size_t NCountSize; |
| 379 | tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max); |
| 380 | FSE_normalizeCount(norm, tableLog, count, nbSeq, max); |
| 381 | NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ |
| 382 | if (FSE_isError(NCountSize)) return ERROR(GENERIC); |
| 383 | op += NCountSize; |
| 384 | FSE_buildCTable(CTable_OffsetBits, norm, max, tableLog); |
| 385 | Offtype = bt_compressed; |
| 386 | } |
| 387 | |
| 388 | /* CTable for MatchLengths */ |
| 389 | max = MaxML; |
| 390 | mostFrequent = FSE_countFast(count, &max, seqStorePtr->matchLengthStart, nbSeq); |
| 391 | if ((mostFrequent == nbSeq) && (nbSeq > 2)) |
| 392 | { |
| 393 | *op++ = *seqStorePtr->matchLengthStart; |
| 394 | FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max); |
| 395 | MLtype = bt_rle; |
| 396 | } |
| 397 | else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (MLbits-1)))) |
| 398 | { |
| 399 | FSE_buildCTable_raw(CTable_MatchLength, MLbits); |
| 400 | MLtype = bt_raw; |
| 401 | } |
| 402 | else |
| 403 | { |
| 404 | size_t NCountSize; |
| 405 | tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max); |
| 406 | FSE_normalizeCount(norm, tableLog, count, nbSeq, max); |
| 407 | NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ |
| 408 | if (FSE_isError(NCountSize)) return ERROR(GENERIC); |
| 409 | op += NCountSize; |
| 410 | FSE_buildCTable(CTable_MatchLength, norm, max, tableLog); |
| 411 | MLtype = bt_compressed; |
| 412 | } |
| 413 | |
| 414 | seqHead[0] += (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); |
| 415 | |
| 416 | /* Encoding Sequences */ |
| 417 | { |
| 418 | size_t streamSize, errorCode; |
| 419 | BIT_CStream_t blockStream; |
| 420 | FSE_CState_t stateMatchLength; |
| 421 | FSE_CState_t stateOffsetBits; |
| 422 | FSE_CState_t stateLitLength; |
| 423 | int i; |
| 424 | |
| 425 | errorCode = BIT_initCStream(&blockStream, op, oend-op); |
| 426 | if (ERR_isError(errorCode)) return ERROR(dstSize_tooSmall); /* not enough space remaining */ |
| 427 | FSE_initCState(&stateMatchLength, CTable_MatchLength); |
| 428 | FSE_initCState(&stateOffsetBits, CTable_OffsetBits); |
| 429 | FSE_initCState(&stateLitLength, CTable_LitLength); |
| 430 | |
| 431 | for (i=(int)nbSeq-1; i>=0; i--) |
| 432 | { |
| 433 | BYTE matchLength = mlTable[i]; |
| 434 | U32 offset = offsetTable[i]; |
| 435 | BYTE offCode = offCodeTable[i]; /* 32b*/ /* 64b*/ |
| 436 | U32 nbBits = (offCode-1) * (!!offCode); |
| 437 | BYTE litLength = llTable[i]; /* (7)*/ /* (7)*/ |
| 438 | FSE_encodeSymbol(&blockStream, &stateMatchLength, matchLength); /* 17 */ /* 17 */ |
| 439 | if (MEM_32bits()) BIT_flushBits(&blockStream); /* 7 */ |
| 440 | BIT_addBits(&blockStream, offset, nbBits); /* 32 */ /* 42 */ |
| 441 | if (MEM_32bits()) BIT_flushBits(&blockStream); /* 7 */ |
| 442 | FSE_encodeSymbol(&blockStream, &stateOffsetBits, offCode); /* 16 */ /* 51 */ |
| 443 | FSE_encodeSymbol(&blockStream, &stateLitLength, litLength); /* 26 */ /* 61 */ |
| 444 | BIT_flushBits(&blockStream); /* 7 */ /* 7 */ |
| 445 | } |
| 446 | |
| 447 | FSE_flushCState(&blockStream, &stateMatchLength); |
| 448 | FSE_flushCState(&blockStream, &stateOffsetBits); |
| 449 | FSE_flushCState(&blockStream, &stateLitLength); |
| 450 | |
| 451 | streamSize = BIT_closeCStream(&blockStream); |
| 452 | if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ |
| 453 | op += streamSize; |
| 454 | } |
| 455 | |
| 456 | /* check compressibility */ |
| 457 | if ((size_t)(op-dst) >= maxCSize) return 0; |
| 458 | |
| 459 | return op - dst; |
| 460 | } |
| 461 | |
| 462 | |
| 463 | |
| 464 | |
| 465 | /* ************************************************************* |
| 466 | * Decompression section |
| 467 | ***************************************************************/ |
| 468 | struct ZSTD_DCtx_s |
| 469 | { |
| 470 | U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; |
| 471 | U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; |
| 472 | U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; |
| 473 | void* previousDstEnd; |
| 474 | void* base; |
| 475 | size_t expected; |
| 476 | blockType_t bType; |
| 477 | U32 phase; |
| 478 | const BYTE* litPtr; |
| 479 | size_t litBufSize; |
| 480 | size_t litSize; |
| 481 | BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */]; |
| 482 | }; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */ |
| 483 | |
| 484 | |
| 485 | size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) |
| 486 | { |
| 487 | const BYTE* const in = (const BYTE* const)src; |
| 488 | BYTE headerFlags; |
| 489 | U32 cSize; |
| 490 | |
| 491 | if (srcSize < 3) return ERROR(srcSize_wrong); |
| 492 | |
| 493 | headerFlags = *in; |
| 494 | cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); |
| 495 | |
| 496 | bpPtr->blockType = (blockType_t)(headerFlags >> 6); |
| 497 | bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; |
| 498 | |
| 499 | if (bpPtr->blockType == bt_end) return 0; |
| 500 | if (bpPtr->blockType == bt_rle) return 1; |
| 501 | return cSize; |
| 502 | } |
| 503 | |
| 504 | static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) |
| 505 | { |
| 506 | if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); |
| 507 | memcpy(dst, src, srcSize); |
| 508 | return srcSize; |
| 509 | } |
| 510 | |
| 511 | |
| 512 | /** ZSTD_decompressLiterals |
| 513 | @return : nb of bytes read from src, or an error code*/ |
| 514 | static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr, |
| 515 | const void* src, size_t srcSize) |
| 516 | { |
| 517 | const BYTE* ip = (const BYTE*)src; |
| 518 | |
| 519 | const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ |
| 520 | const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ |
| 521 | |
| 522 | if (litSize > *maxDstSizePtr) return ERROR(corruption_detected); |
| 523 | if (litCSize + 5 > srcSize) return ERROR(corruption_detected); |
| 524 | |
| 525 | if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected); |
| 526 | |
| 527 | *maxDstSizePtr = litSize; |
| 528 | return litCSize + 5; |
| 529 | } |
| 530 | |
| 531 | |
| 532 | /** ZSTD_decodeLiteralsBlock |
| 533 | @return : nb of bytes read from src (< srcSize )*/ |
| 534 | size_t ZSTD_decodeLiteralsBlock(void* ctx, |
| 535 | const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ |
| 536 | { |
| 537 | ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx; |
| 538 | const BYTE* const istart = (const BYTE*) src; |
| 539 | |
| 540 | /* any compressed block with literals segment must be at least this size */ |
| 541 | if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); |
| 542 | |
| 543 | switch(*istart & 3) |
| 544 | { |
| 545 | /* compressed */ |
| 546 | case 0: |
| 547 | { |
| 548 | size_t litSize = BLOCKSIZE; |
| 549 | const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize); |
| 550 | dctx->litPtr = dctx->litBuffer; |
| 551 | dctx->litBufSize = BLOCKSIZE+8; |
| 552 | dctx->litSize = litSize; |
| 553 | return readSize; /* works if it's an error too */ |
| 554 | } |
| 555 | case IS_RAW: |
| 556 | { |
| 557 | const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ |
| 558 | if (litSize > srcSize-11) /* risk of reading too far with wildcopy */ |
| 559 | { |
| 560 | if (litSize > srcSize-3) return ERROR(corruption_detected); |
| 561 | memcpy(dctx->litBuffer, istart, litSize); |
| 562 | dctx->litPtr = dctx->litBuffer; |
| 563 | dctx->litBufSize = BLOCKSIZE+8; |
| 564 | dctx->litSize = litSize; |
| 565 | return litSize+3; |
| 566 | } |
| 567 | /* direct reference into compressed stream */ |
| 568 | dctx->litPtr = istart+3; |
| 569 | dctx->litBufSize = srcSize-3; |
| 570 | dctx->litSize = litSize; |
| 571 | return litSize+3; } |
| 572 | case IS_RLE: |
| 573 | { |
| 574 | const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ |
| 575 | if (litSize > BLOCKSIZE) return ERROR(corruption_detected); |
| 576 | memset(dctx->litBuffer, istart[3], litSize); |
| 577 | dctx->litPtr = dctx->litBuffer; |
| 578 | dctx->litBufSize = BLOCKSIZE+8; |
| 579 | dctx->litSize = litSize; |
| 580 | return 4; |
| 581 | } |
| 582 | default: |
| 583 | return ERROR(corruption_detected); /* forbidden nominal case */ |
| 584 | } |
| 585 | } |
| 586 | |
| 587 | |
| 588 | size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, |
| 589 | FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, |
| 590 | const void* src, size_t srcSize) |
| 591 | { |
| 592 | const BYTE* const istart = (const BYTE* const)src; |
| 593 | const BYTE* ip = istart; |
| 594 | const BYTE* const iend = istart + srcSize; |
| 595 | U32 LLtype, Offtype, MLtype; |
| 596 | U32 LLlog, Offlog, MLlog; |
| 597 | size_t dumpsLength; |
| 598 | |
| 599 | /* check */ |
| 600 | if (srcSize < 5) return ERROR(srcSize_wrong); |
| 601 | |
| 602 | /* SeqHead */ |
| 603 | *nbSeq = MEM_readLE16(ip); ip+=2; |
| 604 | LLtype = *ip >> 6; |
| 605 | Offtype = (*ip >> 4) & 3; |
| 606 | MLtype = (*ip >> 2) & 3; |
| 607 | if (*ip & 2) |
| 608 | { |
| 609 | dumpsLength = ip[2]; |
| 610 | dumpsLength += ip[1] << 8; |
| 611 | ip += 3; |
| 612 | } |
| 613 | else |
| 614 | { |
| 615 | dumpsLength = ip[1]; |
| 616 | dumpsLength += (ip[0] & 1) << 8; |
| 617 | ip += 2; |
| 618 | } |
| 619 | *dumpsPtr = ip; |
| 620 | ip += dumpsLength; |
| 621 | *dumpsLengthPtr = dumpsLength; |
| 622 | |
| 623 | /* check */ |
| 624 | if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ |
| 625 | |
| 626 | /* sequences */ |
| 627 | { |
| 628 | S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ |
| 629 | size_t headerSize; |
| 630 | |
| 631 | /* Build DTables */ |
| 632 | switch(LLtype) |
| 633 | { |
| 634 | U32 max; |
| 635 | case bt_rle : |
| 636 | LLlog = 0; |
| 637 | FSE_buildDTable_rle(DTableLL, *ip++); break; |
| 638 | case bt_raw : |
| 639 | LLlog = LLbits; |
| 640 | FSE_buildDTable_raw(DTableLL, LLbits); break; |
| 641 | default : |
| 642 | max = MaxLL; |
| 643 | headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); |
| 644 | if (FSE_isError(headerSize)) return ERROR(GENERIC); |
| 645 | if (LLlog > LLFSELog) return ERROR(corruption_detected); |
| 646 | ip += headerSize; |
| 647 | FSE_buildDTable(DTableLL, norm, max, LLlog); |
| 648 | } |
| 649 | |
| 650 | switch(Offtype) |
| 651 | { |
| 652 | U32 max; |
| 653 | case bt_rle : |
| 654 | Offlog = 0; |
| 655 | if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ |
| 656 | FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */ |
| 657 | break; |
| 658 | case bt_raw : |
| 659 | Offlog = Offbits; |
| 660 | FSE_buildDTable_raw(DTableOffb, Offbits); break; |
| 661 | default : |
| 662 | max = MaxOff; |
| 663 | headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); |
| 664 | if (FSE_isError(headerSize)) return ERROR(GENERIC); |
| 665 | if (Offlog > OffFSELog) return ERROR(corruption_detected); |
| 666 | ip += headerSize; |
| 667 | FSE_buildDTable(DTableOffb, norm, max, Offlog); |
| 668 | } |
| 669 | |
| 670 | switch(MLtype) |
| 671 | { |
| 672 | U32 max; |
| 673 | case bt_rle : |
| 674 | MLlog = 0; |
| 675 | if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ |
| 676 | FSE_buildDTable_rle(DTableML, *ip++); break; |
| 677 | case bt_raw : |
| 678 | MLlog = MLbits; |
| 679 | FSE_buildDTable_raw(DTableML, MLbits); break; |
| 680 | default : |
| 681 | max = MaxML; |
| 682 | headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); |
| 683 | if (FSE_isError(headerSize)) return ERROR(GENERIC); |
| 684 | if (MLlog > MLFSELog) return ERROR(corruption_detected); |
| 685 | ip += headerSize; |
| 686 | FSE_buildDTable(DTableML, norm, max, MLlog); |
| 687 | } |
| 688 | } |
| 689 | |
| 690 | return ip-istart; |
| 691 | } |
| 692 | |
| 693 | |
| 694 | typedef struct { |
| 695 | size_t litLength; |
| 696 | size_t offset; |
| 697 | size_t matchLength; |
| 698 | } seq_t; |
| 699 | |
| 700 | typedef struct { |
| 701 | BIT_DStream_t DStream; |
| 702 | FSE_DState_t stateLL; |
| 703 | FSE_DState_t stateOffb; |
| 704 | FSE_DState_t stateML; |
| 705 | size_t prevOffset; |
| 706 | const BYTE* dumps; |
| 707 | const BYTE* dumpsEnd; |
| 708 | } seqState_t; |
| 709 | |
| 710 | |
| 711 | static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) |
| 712 | { |
| 713 | size_t litLength; |
| 714 | size_t prevOffset; |
| 715 | size_t offset; |
| 716 | size_t matchLength; |
| 717 | const BYTE* dumps = seqState->dumps; |
| 718 | const BYTE* const de = seqState->dumpsEnd; |
| 719 | |
| 720 | /* Literal length */ |
| 721 | litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); |
| 722 | prevOffset = litLength ? seq->offset : seqState->prevOffset; |
| 723 | seqState->prevOffset = seq->offset; |
| 724 | if (litLength == MaxLL) |
| 725 | { |
| 726 | U32 add = *dumps++; |
| 727 | if (add < 255) litLength += add; |
| 728 | else |
| 729 | { |
| 730 | litLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */ |
| 731 | dumps += 3; |
| 732 | } |
| 733 | if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */ |
| 734 | } |
| 735 | |
| 736 | /* Offset */ |
| 737 | { |
| 738 | static const U32 offsetPrefix[MaxOff+1] = { |
| 739 | 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256, |
| 740 | 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, |
| 741 | 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 }; |
| 742 | U32 offsetCode, nbBits; |
| 743 | offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */ |
| 744 | if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); |
| 745 | nbBits = offsetCode - 1; |
| 746 | if (offsetCode==0) nbBits = 0; /* cmove */ |
| 747 | offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits); |
| 748 | if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); |
| 749 | if (offsetCode==0) offset = prevOffset; /* cmove */ |
| 750 | } |
| 751 | |
| 752 | /* MatchLength */ |
| 753 | matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); |
| 754 | if (matchLength == MaxML) |
| 755 | { |
| 756 | U32 add = *dumps++; |
| 757 | if (add < 255) matchLength += add; |
| 758 | else |
| 759 | { |
| 760 | matchLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */ |
| 761 | dumps += 3; |
| 762 | } |
| 763 | if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */ |
| 764 | } |
| 765 | matchLength += MINMATCH; |
| 766 | |
| 767 | /* save result */ |
| 768 | seq->litLength = litLength; |
| 769 | seq->offset = offset; |
| 770 | seq->matchLength = matchLength; |
| 771 | seqState->dumps = dumps; |
| 772 | } |
| 773 | |
| 774 | |
| 775 | static size_t ZSTD_execSequence(BYTE* op, |
| 776 | seq_t sequence, |
| 777 | const BYTE** litPtr, const BYTE* const litLimit_8, |
| 778 | BYTE* const base, BYTE* const oend) |
| 779 | { |
| 780 | static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ |
| 781 | static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* substracted */ |
| 782 | const BYTE* const ostart = op; |
| 783 | BYTE* const oLitEnd = op + sequence.litLength; |
| 784 | BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ |
| 785 | BYTE* const oend_8 = oend-8; |
| 786 | const BYTE* const litEnd = *litPtr + sequence.litLength; |
| 787 | |
| 788 | /* check */ |
| 789 | if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ |
| 790 | if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ |
| 791 | if (litEnd > litLimit_8) return ERROR(corruption_detected); /* risk read beyond lit buffer */ |
| 792 | |
| 793 | /* copy Literals */ |
| 794 | ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ |
| 795 | op = oLitEnd; |
| 796 | *litPtr = litEnd; /* update for next sequence */ |
| 797 | |
| 798 | /* copy Match */ |
| 799 | { |
| 800 | const BYTE* match = op - sequence.offset; |
| 801 | |
| 802 | /* check */ |
| 803 | //if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */ |
| 804 | if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */ |
| 805 | if (match < base) return ERROR(corruption_detected); |
| 806 | |
| 807 | /* close range match, overlap */ |
| 808 | if (sequence.offset < 8) |
| 809 | { |
| 810 | const int dec64 = dec64table[sequence.offset]; |
| 811 | op[0] = match[0]; |
| 812 | op[1] = match[1]; |
| 813 | op[2] = match[2]; |
| 814 | op[3] = match[3]; |
| 815 | match += dec32table[sequence.offset]; |
| 816 | ZSTD_copy4(op+4, match); |
| 817 | match -= dec64; |
| 818 | } |
| 819 | else |
| 820 | { |
| 821 | ZSTD_copy8(op, match); |
| 822 | } |
| 823 | op += 8; match += 8; |
| 824 | |
| 825 | if (oMatchEnd > oend-12) |
| 826 | { |
| 827 | if (op < oend_8) |
| 828 | { |
| 829 | ZSTD_wildcopy(op, match, oend_8 - op); |
| 830 | match += oend_8 - op; |
| 831 | op = oend_8; |
| 832 | } |
| 833 | while (op < oMatchEnd) *op++ = *match++; |
| 834 | } |
| 835 | else |
| 836 | { |
| 837 | ZSTD_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */ |
| 838 | } |
| 839 | } |
| 840 | |
| 841 | return oMatchEnd - ostart; |
| 842 | } |
| 843 | |
| 844 | static size_t ZSTD_decompressSequences( |
| 845 | void* ctx, |
| 846 | void* dst, size_t maxDstSize, |
| 847 | const void* seqStart, size_t seqSize) |
| 848 | { |
| 849 | ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx; |
| 850 | const BYTE* ip = (const BYTE*)seqStart; |
| 851 | const BYTE* const iend = ip + seqSize; |
| 852 | BYTE* const ostart = (BYTE* const)dst; |
| 853 | BYTE* op = ostart; |
| 854 | BYTE* const oend = ostart + maxDstSize; |
| 855 | size_t errorCode, dumpsLength; |
| 856 | const BYTE* litPtr = dctx->litPtr; |
| 857 | const BYTE* const litLimit_8 = litPtr + dctx->litBufSize - 8; |
| 858 | const BYTE* const litEnd = litPtr + dctx->litSize; |
| 859 | int nbSeq; |
| 860 | const BYTE* dumps; |
| 861 | U32* DTableLL = dctx->LLTable; |
| 862 | U32* DTableML = dctx->MLTable; |
| 863 | U32* DTableOffb = dctx->OffTable; |
| 864 | BYTE* const base = (BYTE*) (dctx->base); |
| 865 | |
| 866 | /* Build Decoding Tables */ |
| 867 | errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, |
| 868 | DTableLL, DTableML, DTableOffb, |
| 869 | ip, iend-ip); |
| 870 | if (ZSTD_isError(errorCode)) return errorCode; |
| 871 | ip += errorCode; |
| 872 | |
| 873 | /* Regen sequences */ |
| 874 | { |
| 875 | seq_t sequence; |
| 876 | seqState_t seqState; |
| 877 | |
| 878 | memset(&sequence, 0, sizeof(sequence)); |
| 879 | sequence.offset = 4; |
| 880 | seqState.dumps = dumps; |
| 881 | seqState.dumpsEnd = dumps + dumpsLength; |
| 882 | seqState.prevOffset = 4; |
| 883 | errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip); |
| 884 | if (ERR_isError(errorCode)) return ERROR(corruption_detected); |
| 885 | FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); |
| 886 | FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); |
| 887 | FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); |
| 888 | |
| 889 | for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; ) |
| 890 | { |
| 891 | size_t oneSeqSize; |
| 892 | nbSeq--; |
| 893 | ZSTD_decodeSequence(&sequence, &seqState); |
| 894 | oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litLimit_8, base, oend); |
| 895 | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; |
| 896 | op += oneSeqSize; |
| 897 | } |
| 898 | |
| 899 | /* check if reached exact end */ |
| 900 | if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ |
| 901 | if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ |
| 902 | |
| 903 | /* last literal segment */ |
| 904 | { |
| 905 | size_t lastLLSize = litEnd - litPtr; |
| 906 | if (litPtr > litEnd) return ERROR(corruption_detected); |
| 907 | if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); |
| 908 | if (op != litPtr) memcpy(op, litPtr, lastLLSize); |
| 909 | op += lastLLSize; |
| 910 | } |
| 911 | } |
| 912 | |
| 913 | return op-ostart; |
| 914 | } |
| 915 | |
| 916 | |
| 917 | static size_t ZSTD_decompressBlock( |
| 918 | void* ctx, |
| 919 | void* dst, size_t maxDstSize, |
| 920 | const void* src, size_t srcSize) |
| 921 | { |
| 922 | /* blockType == blockCompressed */ |
| 923 | const BYTE* ip = (const BYTE*)src; |
| 924 | |
| 925 | /* Decode literals sub-block */ |
| 926 | size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize); |
| 927 | if (ZSTD_isError(litCSize)) return litCSize; |
| 928 | ip += litCSize; |
| 929 | srcSize -= litCSize; |
| 930 | |
| 931 | return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize); |
| 932 | } |
| 933 | |
| 934 | |
| 935 | size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) |
| 936 | { |
| 937 | const BYTE* ip = (const BYTE*)src; |
| 938 | const BYTE* iend = ip + srcSize; |
| 939 | BYTE* const ostart = (BYTE* const)dst; |
| 940 | BYTE* op = ostart; |
| 941 | BYTE* const oend = ostart + maxDstSize; |
| 942 | size_t remainingSize = srcSize; |
| 943 | U32 magicNumber; |
| 944 | blockProperties_t blockProperties; |
| 945 | |
| 946 | /* Frame Header */ |
| 947 | if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); |
| 948 | magicNumber = MEM_readLE32(src); |
| 949 | #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1) |
| 950 | if (ZSTD_isLegacy(magicNumber)) |
| 951 | return ZSTD_decompressLegacy(dst, maxDstSize, src, srcSize, magicNumber); |
| 952 | #endif |
| 953 | if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); |
| 954 | ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; |
| 955 | |
| 956 | /* Loop on each block */ |
| 957 | while (1) |
| 958 | { |
| 959 | size_t decodedSize=0; |
| 960 | size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties); |
| 961 | if (ZSTD_isError(cBlockSize)) return cBlockSize; |
| 962 | |
| 963 | ip += ZSTD_blockHeaderSize; |
| 964 | remainingSize -= ZSTD_blockHeaderSize; |
| 965 | if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); |
| 966 | |
| 967 | switch(blockProperties.blockType) |
| 968 | { |
| 969 | case bt_compressed: |
| 970 | decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize); |
| 971 | break; |
| 972 | case bt_raw : |
| 973 | decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize); |
| 974 | break; |
| 975 | case bt_rle : |
| 976 | return ERROR(GENERIC); /* not yet supported */ |
| 977 | break; |
| 978 | case bt_end : |
| 979 | /* end of frame */ |
| 980 | if (remainingSize) return ERROR(srcSize_wrong); |
| 981 | break; |
| 982 | default: |
| 983 | return ERROR(GENERIC); /* impossible */ |
| 984 | } |
| 985 | if (cBlockSize == 0) break; /* bt_end */ |
| 986 | |
| 987 | if (ZSTD_isError(decodedSize)) return decodedSize; |
| 988 | op += decodedSize; |
| 989 | ip += cBlockSize; |
| 990 | remainingSize -= cBlockSize; |
| 991 | } |
| 992 | |
| 993 | return op-ostart; |
| 994 | } |
| 995 | |
| 996 | size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) |
| 997 | { |
| 998 | ZSTD_DCtx ctx; |
| 999 | ctx.base = dst; |
| 1000 | return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); |
| 1001 | } |
| 1002 | |
| 1003 | |
| 1004 | /* ****************************** |
| 1005 | * Streaming Decompression API |
| 1006 | ********************************/ |
| 1007 | |
| 1008 | size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx) |
| 1009 | { |
| 1010 | dctx->expected = ZSTD_frameHeaderSize; |
| 1011 | dctx->phase = 0; |
| 1012 | dctx->previousDstEnd = NULL; |
| 1013 | dctx->base = NULL; |
| 1014 | return 0; |
| 1015 | } |
| 1016 | |
| 1017 | ZSTD_DCtx* ZSTD_createDCtx(void) |
| 1018 | { |
| 1019 | ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx)); |
| 1020 | if (dctx==NULL) return NULL; |
| 1021 | ZSTD_resetDCtx(dctx); |
| 1022 | return dctx; |
| 1023 | } |
| 1024 | |
| 1025 | size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) |
| 1026 | { |
| 1027 | free(dctx); |
| 1028 | return 0; |
| 1029 | } |
| 1030 | |
| 1031 | size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) |
| 1032 | { |
| 1033 | return dctx->expected; |
| 1034 | } |
| 1035 | |
| 1036 | size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) |
| 1037 | { |
| 1038 | /* Sanity check */ |
| 1039 | if (srcSize != ctx->expected) return ERROR(srcSize_wrong); |
| 1040 | if (dst != ctx->previousDstEnd) /* not contiguous */ |
| 1041 | ctx->base = dst; |
| 1042 | |
| 1043 | /* Decompress : frame header */ |
| 1044 | if (ctx->phase == 0) |
| 1045 | { |
| 1046 | /* Check frame magic header */ |
| 1047 | U32 magicNumber = MEM_readLE32(src); |
| 1048 | if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); |
| 1049 | ctx->phase = 1; |
| 1050 | ctx->expected = ZSTD_blockHeaderSize; |
| 1051 | return 0; |
| 1052 | } |
| 1053 | |
| 1054 | /* Decompress : block header */ |
| 1055 | if (ctx->phase == 1) |
| 1056 | { |
| 1057 | blockProperties_t bp; |
| 1058 | size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); |
| 1059 | if (ZSTD_isError(blockSize)) return blockSize; |
| 1060 | if (bp.blockType == bt_end) |
| 1061 | { |
| 1062 | ctx->expected = 0; |
| 1063 | ctx->phase = 0; |
| 1064 | } |
| 1065 | else |
| 1066 | { |
| 1067 | ctx->expected = blockSize; |
| 1068 | ctx->bType = bp.blockType; |
| 1069 | ctx->phase = 2; |
| 1070 | } |
| 1071 | |
| 1072 | return 0; |
| 1073 | } |
| 1074 | |
| 1075 | /* Decompress : block content */ |
| 1076 | { |
| 1077 | size_t rSize; |
| 1078 | switch(ctx->bType) |
| 1079 | { |
| 1080 | case bt_compressed: |
| 1081 | rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); |
| 1082 | break; |
| 1083 | case bt_raw : |
| 1084 | rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); |
| 1085 | break; |
| 1086 | case bt_rle : |
| 1087 | return ERROR(GENERIC); /* not yet handled */ |
| 1088 | break; |
| 1089 | case bt_end : /* should never happen (filtered at phase 1) */ |
| 1090 | rSize = 0; |
| 1091 | break; |
| 1092 | default: |
| 1093 | return ERROR(GENERIC); |
| 1094 | } |
| 1095 | ctx->phase = 1; |
| 1096 | ctx->expected = ZSTD_blockHeaderSize; |
| 1097 | ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); |
| 1098 | return rSize; |
| 1099 | } |
| 1100 | |
| 1101 | } |
| 1102 | |
| 1103 | |