| From d2626127c6d6e60e940dd9a3ed58323bdcdc4930 Mon Sep 17 00:00:00 2001 |
| From: Nick Terrell <terrelln@fb.com> |
| Date: Tue, 16 May 2017 14:55:36 -0700 |
| Subject: [PATCH v2 2/4] lib: Add zstd modules |
| |
| Add zstd compression and decompression kernel modules. |
| zstd offers a wide varity of compression speed and quality trade-offs. |
| It can compress at speeds approaching lz4, and quality approaching lzma. |
| zstd decompressions at speeds more than twice as fast as zlib, and |
| decompression speed remains roughly the same across all compression levels. |
| |
| The code was ported from the upstream zstd source repository. The |
| `linux/zstd.h` header was modified to match linux kernel style. |
| The cross-platform and allocation code was stripped out. Instead zstd |
| requires the caller to pass a preallocated workspace. The source files |
| were clang-formatted [1] to match the Linux Kernel style as much as |
| possible. Otherwise, the code was unmodified. We would like to avoid |
| as much further manual modification to the source code as possible, so it |
| will be easier to keep the kernel zstd up to date. |
| |
| I benchmarked zstd compression as a special character device. I ran zstd |
| and zlib compression at several levels, as well as performing no |
| compression, which measure the time spent copying the data to kernel space. |
| Data is passed to the compresser 4096 B at a time. The benchmark file is |
| located in the upstream zstd source repository under |
| `contrib/linux-kernel/zstd_compress_test.c` [2]. |
| |
| I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. |
| The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor, |
| 16 GB of RAM, and a SSD. I benchmarked using `silesia.tar` [3], which is |
| 211,988,480 B large. Run the following commands for the benchmark: |
| |
| sudo modprobe zstd_compress_test |
| sudo mknod zstd_compress_test c 245 0 |
| sudo cp silesia.tar zstd_compress_test |
| |
| The time is reported by the time of the userland `cp`. |
| The MB/s is computed with |
| |
| 1,536,217,008 B / time(buffer size, hash) |
| |
| which includes the time to copy from userland. |
| The Adjusted MB/s is computed with |
| |
| 1,536,217,088 B / (time(buffer size, hash) - time(buffer size, none)). |
| |
| The memory reported is the amount of memory the compressor requests. |
| |
| | Method | Size (B) | Time (s) | Ratio | MB/s | Adj MB/s | Mem (MB) | |
| |----------|----------|----------|-------|---------|----------|----------| |
| | none | 11988480 | 0.100 | 1 | 2119.88 | - | - | |
| | zstd -1 | 73645762 | 1.044 | 2.878 | 203.05 | 224.56 | 1.23 | |
| | zstd -3 | 66988878 | 1.761 | 3.165 | 120.38 | 127.63 | 2.47 | |
| | zstd -5 | 65001259 | 2.563 | 3.261 | 82.71 | 86.07 | 2.86 | |
| | zstd -10 | 60165346 | 13.242 | 3.523 | 16.01 | 16.13 | 13.22 | |
| | zstd -15 | 58009756 | 47.601 | 3.654 | 4.45 | 4.46 | 21.61 | |
| | zstd -19 | 54014593 | 102.835 | 3.925 | 2.06 | 2.06 | 60.15 | |
| | zlib -1 | 77260026 | 2.895 | 2.744 | 73.23 | 75.85 | 0.27 | |
| | zlib -3 | 72972206 | 4.116 | 2.905 | 51.50 | 52.79 | 0.27 | |
| | zlib -6 | 68190360 | 9.633 | 3.109 | 22.01 | 22.24 | 0.27 | |
| | zlib -9 | 67613382 | 22.554 | 3.135 | 9.40 | 9.44 | 0.27 | |
| |
| I benchmarked zstd decompression using the same method on the same machine. |
| The benchmark file is located in the upstream zstd repo under |
| `contrib/linux-kernel/zstd_decompress_test.c` [4]. The memory reported is |
| the amount of memory required to decompress data compressed with the given |
| compression level. If you know the maximum size of your input, you can |
| reduce the memory usage of decompression irrespective of the compression |
| level. |
| |
| | Method | Time (s) | MB/s | Adjusted MB/s | Memory (MB) | |
| |----------|----------|---------|---------------|-------------| |
| | none | 0.025 | 8479.54 | - | - | |
| | zstd -1 | 0.358 | 592.15 | 636.60 | 0.84 | |
| | zstd -3 | 0.396 | 535.32 | 571.40 | 1.46 | |
| | zstd -5 | 0.396 | 535.32 | 571.40 | 1.46 | |
| | zstd -10 | 0.374 | 566.81 | 607.42 | 2.51 | |
| | zstd -15 | 0.379 | 559.34 | 598.84 | 4.61 | |
| | zstd -19 | 0.412 | 514.54 | 547.77 | 8.80 | |
| | zlib -1 | 0.940 | 225.52 | 231.68 | 0.04 | |
| | zlib -3 | 0.883 | 240.08 | 247.07 | 0.04 | |
| | zlib -6 | 0.844 | 251.17 | 258.84 | 0.04 | |
| | zlib -9 | 0.837 | 253.27 | 287.64 | 0.04 | |
| |
| Tested in userland using the test-suite in the zstd repo under |
| `contrib/linux-kernel/test/UserlandTest.cpp` [5] by mocking the kernel |
| functions. Fuzz tested using libfuzzer [6] with the fuzz harnesses under |
| `contrib/linux-kernel/test/{RoundTripCrash.c,DecompressCrash.c}` [7] [8] |
| with ASAN, UBSAN, and MSAN. Additionaly, it was tested while testing the |
| BtrFS and SquashFS patches coming next. |
| |
| [1] https://clang.llvm.org/docs/ClangFormat.html |
| [2] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/zstd_compress_test.c |
| [3] http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia |
| [4] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/zstd_decompress_test.c |
| [5] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/test/UserlandTest.cpp |
| [6] http://llvm.org/docs/LibFuzzer.html |
| [7] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/test/RoundTripCrash.c |
| [8] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/test/DecompressCrash.c |
| |
| zstd source repository: https://github.com/facebook/zstd |
| |
| Signed-off-by: Nick Terrell <terrelln@fb.com> |
| --- |
| v1 -> v2: |
| - Use div_u64() for division of u64s |
| - Reduce stack usage of ZSTD_compressSequences(), ZSTD_buildSeqTable(), |
| ZSTD_decompressSequencesLong(), FSE_buildDTable(), FSE_decompress_wksp(), |
| HUF_writeCTable(), HUF_readStats(), HUF_readCTable(), |
| HUF_compressWeights(), HUF_readDTableX2(), and HUF_readDTableX4() |
| - No function uses more than 400 B of stack space |
| |
| include/linux/zstd.h | 1157 +++++++++++++++ |
| lib/Kconfig | 8 + |
| lib/Makefile | 2 + |
| lib/zstd/Makefile | 18 + |
| lib/zstd/bitstream.h | 374 +++++ |
| lib/zstd/compress.c | 3479 +++++++++++++++++++++++++++++++++++++++++++++ |
| lib/zstd/decompress.c | 2526 ++++++++++++++++++++++++++++++++ |
| lib/zstd/entropy_common.c | 243 ++++ |
| lib/zstd/error_private.h | 53 + |
| lib/zstd/fse.h | 575 ++++++++ |
| lib/zstd/fse_compress.c | 795 +++++++++++ |
| lib/zstd/fse_decompress.c | 332 +++++ |
| lib/zstd/huf.h | 212 +++ |
| lib/zstd/huf_compress.c | 771 ++++++++++ |
| lib/zstd/huf_decompress.c | 960 +++++++++++++ |
| lib/zstd/mem.h | 151 ++ |
| lib/zstd/zstd_common.c | 75 + |
| lib/zstd/zstd_internal.h | 269 ++++ |
| lib/zstd/zstd_opt.h | 1014 +++++++++++++ |
| 19 files changed, 13014 insertions(+) |
| create mode 100644 include/linux/zstd.h |
| create mode 100644 lib/zstd/Makefile |
| create mode 100644 lib/zstd/bitstream.h |
| create mode 100644 lib/zstd/compress.c |
| create mode 100644 lib/zstd/decompress.c |
| create mode 100644 lib/zstd/entropy_common.c |
| create mode 100644 lib/zstd/error_private.h |
| create mode 100644 lib/zstd/fse.h |
| create mode 100644 lib/zstd/fse_compress.c |
| create mode 100644 lib/zstd/fse_decompress.c |
| create mode 100644 lib/zstd/huf.h |
| create mode 100644 lib/zstd/huf_compress.c |
| create mode 100644 lib/zstd/huf_decompress.c |
| create mode 100644 lib/zstd/mem.h |
| create mode 100644 lib/zstd/zstd_common.c |
| create mode 100644 lib/zstd/zstd_internal.h |
| create mode 100644 lib/zstd/zstd_opt.h |
| |
| diff --git a/include/linux/zstd.h b/include/linux/zstd.h |
| new file mode 100644 |
| index 0000000..249575e |
| --- /dev/null |
| +++ b/include/linux/zstd.h |
| @@ -0,0 +1,1157 @@ |
| +/* |
| + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +#ifndef ZSTD_H |
| +#define ZSTD_H |
| + |
| +/* ====== Dependency ======*/ |
| +#include <linux/types.h> /* size_t */ |
| + |
| + |
| +/*-***************************************************************************** |
| + * Introduction |
| + * |
| + * zstd, short for Zstandard, is a fast lossless compression algorithm, |
| + * targeting real-time compression scenarios at zlib-level and better |
| + * compression ratios. The zstd compression library provides in-memory |
| + * compression and decompression functions. The library supports compression |
| + * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled |
| + * ultra, should be used with caution, as they require more memory. |
| + * Compression can be done in: |
| + * - a single step, reusing a context (described as Explicit memory management) |
| + * - unbounded multiple steps (described as Streaming compression) |
| + * The compression ratio achievable on small data can be highly improved using |
| + * compression with a dictionary in: |
| + * - a single step (described as Simple dictionary API) |
| + * - a single step, reusing a dictionary (described as Fast dictionary API) |
| + ******************************************************************************/ |
| + |
| +/*====== Helper functions ======*/ |
| + |
| +/** |
| + * enum ZSTD_ErrorCode - zstd error codes |
| + * |
| + * Functions that return size_t can be checked for errors using ZSTD_isError() |
| + * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode(). |
| + */ |
| +typedef enum { |
| + ZSTD_error_no_error, |
| + ZSTD_error_GENERIC, |
| + ZSTD_error_prefix_unknown, |
| + ZSTD_error_version_unsupported, |
| + ZSTD_error_parameter_unknown, |
| + ZSTD_error_frameParameter_unsupported, |
| + ZSTD_error_frameParameter_unsupportedBy32bits, |
| + ZSTD_error_frameParameter_windowTooLarge, |
| + ZSTD_error_compressionParameter_unsupported, |
| + ZSTD_error_init_missing, |
| + ZSTD_error_memory_allocation, |
| + ZSTD_error_stage_wrong, |
| + ZSTD_error_dstSize_tooSmall, |
| + ZSTD_error_srcSize_wrong, |
| + ZSTD_error_corruption_detected, |
| + ZSTD_error_checksum_wrong, |
| + ZSTD_error_tableLog_tooLarge, |
| + ZSTD_error_maxSymbolValue_tooLarge, |
| + ZSTD_error_maxSymbolValue_tooSmall, |
| + ZSTD_error_dictionary_corrupted, |
| + ZSTD_error_dictionary_wrong, |
| + ZSTD_error_dictionaryCreation_failed, |
| + ZSTD_error_maxCode |
| +} ZSTD_ErrorCode; |
| + |
| +/** |
| + * ZSTD_maxCLevel() - maximum compression level available |
| + * |
| + * Return: Maximum compression level available. |
| + */ |
| +int ZSTD_maxCLevel(void); |
| +/** |
| + * ZSTD_compressBound() - maximum compressed size in worst case scenario |
| + * @srcSize: The size of the data to compress. |
| + * |
| + * Return: The maximum compressed size in the worst case scenario. |
| + */ |
| +size_t ZSTD_compressBound(size_t srcSize); |
| +/** |
| + * ZSTD_isError() - tells if a size_t function result is an error code |
| + * @code: The function result to check for error. |
| + * |
| + * Return: Non-zero iff the code is an error. |
| + */ |
| +static __attribute__((unused)) unsigned int ZSTD_isError(size_t code) |
| +{ |
| + return code > (size_t)-ZSTD_error_maxCode; |
| +} |
| +/** |
| + * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode |
| + * @functionResult: The result of a function for which ZSTD_isError() is true. |
| + * |
| + * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0 |
| + * if the functionResult isn't an error. |
| + */ |
| +static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode( |
| + size_t functionResult) |
| +{ |
| + if (!ZSTD_isError(functionResult)) |
| + return (ZSTD_ErrorCode)0; |
| + return (ZSTD_ErrorCode)(0 - functionResult); |
| +} |
| + |
| +/** |
| + * enum ZSTD_strategy - zstd compression search strategy |
| + * |
| + * From faster to stronger. |
| + */ |
| +typedef enum { |
| + ZSTD_fast, |
| + ZSTD_dfast, |
| + ZSTD_greedy, |
| + ZSTD_lazy, |
| + ZSTD_lazy2, |
| + ZSTD_btlazy2, |
| + ZSTD_btopt, |
| + ZSTD_btopt2 |
| +} ZSTD_strategy; |
| + |
| +/** |
| + * struct ZSTD_compressionParameters - zstd compression parameters |
| + * @windowLog: Log of the largest match distance. Larger means more |
| + * compression, and more memory needed during decompression. |
| + * @chainLog: Fully searched segment. Larger means more compression, slower, |
| + * and more memory (useless for fast). |
| + * @hashLog: Dispatch table. Larger means more compression, |
| + * slower, and more memory. |
| + * @searchLog: Number of searches. Larger means more compression and slower. |
| + * @searchLength: Match length searched. Larger means faster decompression, |
| + * sometimes less compression. |
| + * @targetLength: Acceptable match size for optimal parser (only). Larger means |
| + * more compression, and slower. |
| + * @strategy: The zstd compression strategy. |
| + */ |
| +typedef struct { |
| + unsigned int windowLog; |
| + unsigned int chainLog; |
| + unsigned int hashLog; |
| + unsigned int searchLog; |
| + unsigned int searchLength; |
| + unsigned int targetLength; |
| + ZSTD_strategy strategy; |
| +} ZSTD_compressionParameters; |
| + |
| +/** |
| + * struct ZSTD_frameParameters - zstd frame parameters |
| + * @contentSizeFlag: Controls whether content size will be present in the frame |
| + * header (when known). |
| + * @checksumFlag: Controls whether a 32-bit checksum is generated at the end |
| + * of the frame for error detection. |
| + * @noDictIDFlag: Controls whether dictID will be saved into the frame header |
| + * when using dictionary compression. |
| + * |
| + * The default value is all fields set to 0. |
| + */ |
| +typedef struct { |
| + unsigned int contentSizeFlag; |
| + unsigned int checksumFlag; |
| + unsigned int noDictIDFlag; |
| +} ZSTD_frameParameters; |
| + |
| +/** |
| + * struct ZSTD_parameters - zstd parameters |
| + * @cParams: The compression parameters. |
| + * @fParams: The frame parameters. |
| + */ |
| +typedef struct { |
| + ZSTD_compressionParameters cParams; |
| + ZSTD_frameParameters fParams; |
| +} ZSTD_parameters; |
| + |
| +/** |
| + * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level |
| + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). |
| + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. |
| + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. |
| + * |
| + * Return: The selected ZSTD_compressionParameters. |
| + */ |
| +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, |
| + unsigned long long estimatedSrcSize, size_t dictSize); |
| + |
| +/** |
| + * ZSTD_getParams() - returns ZSTD_parameters for selected level |
| + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). |
| + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. |
| + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. |
| + * |
| + * The same as ZSTD_getCParams() except also selects the default frame |
| + * parameters (all zero). |
| + * |
| + * Return: The selected ZSTD_parameters. |
| + */ |
| +ZSTD_parameters ZSTD_getParams(int compressionLevel, |
| + unsigned long long estimatedSrcSize, size_t dictSize); |
| + |
| +/*-************************************* |
| + * Explicit memory management |
| + **************************************/ |
| + |
| +/** |
| + * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx |
| + * @cParams: The compression parameters to be used for compression. |
| + * |
| + * If multiple compression parameters might be used, the caller must call |
| + * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum |
| + * size. |
| + * |
| + * Return: A lower bound on the size of the workspace that is passed to |
| + * ZSTD_initCCtx(). |
| + */ |
| +size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams); |
| + |
| +/** |
| + * struct ZSTD_CCtx - the zstd compression context |
| + * |
| + * When compressing many times it is recommended to allocate a context just once |
| + * and reuse it for each successive compression operation. |
| + */ |
| +typedef struct ZSTD_CCtx_s ZSTD_CCtx; |
| +/** |
| + * ZSTD_initCCtx() - initialize a zstd compression context |
| + * @workspace: The workspace to emplace the context into. It must outlive |
| + * the returned context. |
| + * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to |
| + * determine how large the workspace must be. |
| + * |
| + * Return: A compression context emplaced into workspace. |
| + */ |
| +ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize); |
| + |
| +/** |
| + * ZSTD_compressCCtx() - compress src into dst |
| + * @ctx: The context. Must have been initialized with a workspace at |
| + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). |
| + * @dst: The buffer to compress src into. |
| + * @dstCapacity: The size of the destination buffer. May be any size, but |
| + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. |
| + * @src: The data to compress. |
| + * @srcSize: The size of the data to compress. |
| + * @params: The parameters to use for compression. See ZSTD_getParams(). |
| + * |
| + * Return: The compressed size or an error, which can be checked using |
| + * ZSTD_isError(). |
| + */ |
| +size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize, ZSTD_parameters params); |
| + |
| +/** |
| + * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx |
| + * |
| + * Return: A lower bound on the size of the workspace that is passed to |
| + * ZSTD_initDCtx(). |
| + */ |
| +size_t ZSTD_DCtxWorkspaceBound(void); |
| + |
| +/** |
| + * struct ZSTD_DCtx - the zstd decompression context |
| + * |
| + * When decompressing many times it is recommended to allocate a context just |
| + * once and reuse it for each successive decompression operation. |
| + */ |
| +typedef struct ZSTD_DCtx_s ZSTD_DCtx; |
| +/** |
| + * ZSTD_initDCtx() - initialize a zstd decompression context |
| + * @workspace: The workspace to emplace the context into. It must outlive |
| + * the returned context. |
| + * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to |
| + * determine how large the workspace must be. |
| + * |
| + * Return: A decompression context emplaced into workspace. |
| + */ |
| +ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize); |
| + |
| +/** |
| + * ZSTD_decompressDCtx() - decompress zstd compressed src into dst |
| + * @ctx: The decompression context. |
| + * @dst: The buffer to decompress src into. |
| + * @dstCapacity: The size of the destination buffer. Must be at least as large |
| + * as the decompressed size. If the caller cannot upper bound the |
| + * decompressed size, then it's better to use the streaming API. |
| + * @src: The zstd compressed data to decompress. Multiple concatenated |
| + * frames and skippable frames are allowed. |
| + * @srcSize: The exact size of the data to decompress. |
| + * |
| + * Return: The decompressed size or an error, which can be checked using |
| + * ZSTD_isError(). |
| + */ |
| +size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize); |
| + |
| +/*-************************ |
| + * Simple dictionary API |
| + **************************/ |
| + |
| +/** |
| + * ZSTD_compress_usingDict() - compress src into dst using a dictionary |
| + * @ctx: The context. Must have been initialized with a workspace at |
| + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). |
| + * @dst: The buffer to compress src into. |
| + * @dstCapacity: The size of the destination buffer. May be any size, but |
| + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. |
| + * @src: The data to compress. |
| + * @srcSize: The size of the data to compress. |
| + * @dict: The dictionary to use for compression. |
| + * @dictSize: The size of the dictionary. |
| + * @params: The parameters to use for compression. See ZSTD_getParams(). |
| + * |
| + * Compression using a predefined dictionary. The same dictionary must be used |
| + * during decompression. |
| + * |
| + * Return: The compressed size or an error, which can be checked using |
| + * ZSTD_isError(). |
| + */ |
| +size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize, const void *dict, size_t dictSize, |
| + ZSTD_parameters params); |
| + |
| +/** |
| + * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary |
| + * @ctx: The decompression context. |
| + * @dst: The buffer to decompress src into. |
| + * @dstCapacity: The size of the destination buffer. Must be at least as large |
| + * as the decompressed size. If the caller cannot upper bound the |
| + * decompressed size, then it's better to use the streaming API. |
| + * @src: The zstd compressed data to decompress. Multiple concatenated |
| + * frames and skippable frames are allowed. |
| + * @srcSize: The exact size of the data to decompress. |
| + * @dict: The dictionary to use for decompression. The same dictionary |
| + * must've been used to compress the data. |
| + * @dictSize: The size of the dictionary. |
| + * |
| + * Return: The decompressed size or an error, which can be checked using |
| + * ZSTD_isError(). |
| + */ |
| +size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize, const void *dict, size_t dictSize); |
| + |
| +/*-************************** |
| + * Fast dictionary API |
| + ***************************/ |
| + |
| +/** |
| + * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict |
| + * @cParams: The compression parameters to be used for compression. |
| + * |
| + * Return: A lower bound on the size of the workspace that is passed to |
| + * ZSTD_initCDict(). |
| + */ |
| +size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams); |
| + |
| +/** |
| + * struct ZSTD_CDict - a digested dictionary to be used for compression |
| + */ |
| +typedef struct ZSTD_CDict_s ZSTD_CDict; |
| + |
| +/** |
| + * ZSTD_initCDict() - initialize a digested dictionary for compression |
| + * @dictBuffer: The dictionary to digest. The buffer is referenced by the |
| + * ZSTD_CDict so it must outlive the returned ZSTD_CDict. |
| + * @dictSize: The size of the dictionary. |
| + * @params: The parameters to use for compression. See ZSTD_getParams(). |
| + * @workspace: The workspace. It must outlive the returned ZSTD_CDict. |
| + * @workspaceSize: The workspace size. Must be at least |
| + * ZSTD_CDictWorkspaceBound(params.cParams). |
| + * |
| + * When compressing multiple messages / blocks with the same dictionary it is |
| + * recommended to load it just once. The ZSTD_CDict merely references the |
| + * dictBuffer, so it must outlive the returned ZSTD_CDict. |
| + * |
| + * Return: The digested dictionary emplaced into workspace. |
| + */ |
| +ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize, |
| + ZSTD_parameters params, void *workspace, size_t workspaceSize); |
| + |
| +/** |
| + * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict |
| + * @ctx: The context. Must have been initialized with a workspace at |
| + * least as large as ZSTD_CCtxWorkspaceBound(cParams) where |
| + * cParams are the compression parameters used to initialize the |
| + * cdict. |
| + * @dst: The buffer to compress src into. |
| + * @dstCapacity: The size of the destination buffer. May be any size, but |
| + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. |
| + * @src: The data to compress. |
| + * @srcSize: The size of the data to compress. |
| + * @cdict: The digested dictionary to use for compression. |
| + * @params: The parameters to use for compression. See ZSTD_getParams(). |
| + * |
| + * Compression using a digested dictionary. The same dictionary must be used |
| + * during decompression. |
| + * |
| + * Return: The compressed size or an error, which can be checked using |
| + * ZSTD_isError(). |
| + */ |
| +size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize, const ZSTD_CDict *cdict); |
| + |
| + |
| +/** |
| + * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict |
| + * |
| + * Return: A lower bound on the size of the workspace that is passed to |
| + * ZSTD_initDDict(). |
| + */ |
| +size_t ZSTD_DDictWorkspaceBound(void); |
| + |
| +/** |
| + * struct ZSTD_DDict - a digested dictionary to be used for decompression |
| + */ |
| +typedef struct ZSTD_DDict_s ZSTD_DDict; |
| + |
| +/** |
| + * ZSTD_initDDict() - initialize a digested dictionary for decompression |
| + * @dictBuffer: The dictionary to digest. The buffer is referenced by the |
| + * ZSTD_DDict so it must outlive the returned ZSTD_DDict. |
| + * @dictSize: The size of the dictionary. |
| + * @workspace: The workspace. It must outlive the returned ZSTD_DDict. |
| + * @workspaceSize: The workspace size. Must be at least |
| + * ZSTD_DDictWorkspaceBound(). |
| + * |
| + * When decompressing multiple messages / blocks with the same dictionary it is |
| + * recommended to load it just once. The ZSTD_DDict merely references the |
| + * dictBuffer, so it must outlive the returned ZSTD_DDict. |
| + * |
| + * Return: The digested dictionary emplaced into workspace. |
| + */ |
| +ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize, |
| + void *workspace, size_t workspaceSize); |
| + |
| +/** |
| + * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict |
| + * @ctx: The decompression context. |
| + * @dst: The buffer to decompress src into. |
| + * @dstCapacity: The size of the destination buffer. Must be at least as large |
| + * as the decompressed size. If the caller cannot upper bound the |
| + * decompressed size, then it's better to use the streaming API. |
| + * @src: The zstd compressed data to decompress. Multiple concatenated |
| + * frames and skippable frames are allowed. |
| + * @srcSize: The exact size of the data to decompress. |
| + * @ddict: The digested dictionary to use for decompression. The same |
| + * dictionary must've been used to compress the data. |
| + * |
| + * Return: The decompressed size or an error, which can be checked using |
| + * ZSTD_isError(). |
| + */ |
| +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, |
| + size_t dstCapacity, const void *src, size_t srcSize, |
| + const ZSTD_DDict *ddict); |
| + |
| + |
| +/*-************************** |
| + * Streaming |
| + ***************************/ |
| + |
| +/** |
| + * struct ZSTD_inBuffer - input buffer for streaming |
| + * @src: Start of the input buffer. |
| + * @size: Size of the input buffer. |
| + * @pos: Position where reading stopped. Will be updated. |
| + * Necessarily 0 <= pos <= size. |
| + */ |
| +typedef struct ZSTD_inBuffer_s { |
| + const void *src; |
| + size_t size; |
| + size_t pos; |
| +} ZSTD_inBuffer; |
| + |
| +/** |
| + * struct ZSTD_outBuffer - output buffer for streaming |
| + * @dst: Start of the output buffer. |
| + * @size: Size of the output buffer. |
| + * @pos: Position where writing stopped. Will be updated. |
| + * Necessarily 0 <= pos <= size. |
| + */ |
| +typedef struct ZSTD_outBuffer_s { |
| + void *dst; |
| + size_t size; |
| + size_t pos; |
| +} ZSTD_outBuffer; |
| + |
| + |
| + |
| +/*-***************************************************************************** |
| + * Streaming compression - HowTo |
| + * |
| + * A ZSTD_CStream object is required to track streaming operation. |
| + * Use ZSTD_initCStream() to initialize a ZSTD_CStream object. |
| + * ZSTD_CStream objects can be reused multiple times on consecutive compression |
| + * operations. It is recommended to re-use ZSTD_CStream in situations where many |
| + * streaming operations will be achieved consecutively. Use one separate |
| + * ZSTD_CStream per thread for parallel execution. |
| + * |
| + * Use ZSTD_compressStream() repetitively to consume input stream. |
| + * The function will automatically update both `pos` fields. |
| + * Note that it may not consume the entire input, in which case `pos < size`, |
| + * and it's up to the caller to present again remaining data. |
| + * It returns a hint for the preferred number of bytes to use as an input for |
| + * the next function call. |
| + * |
| + * At any moment, it's possible to flush whatever data remains within internal |
| + * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might |
| + * still be some content left within the internal buffer if `output->size` is |
| + * too small. It returns the number of bytes left in the internal buffer and |
| + * must be called until it returns 0. |
| + * |
| + * ZSTD_endStream() instructs to finish a frame. It will perform a flush and |
| + * write frame epilogue. The epilogue is required for decoders to consider a |
| + * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush |
| + * the full content if `output->size` is too small. In which case, call again |
| + * ZSTD_endStream() to complete the flush. It returns the number of bytes left |
| + * in the internal buffer and must be called until it returns 0. |
| + ******************************************************************************/ |
| + |
| +/** |
| + * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream |
| + * @cParams: The compression parameters to be used for compression. |
| + * |
| + * Return: A lower bound on the size of the workspace that is passed to |
| + * ZSTD_initCStream() and ZSTD_initCStream_usingCDict(). |
| + */ |
| +size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams); |
| + |
| +/** |
| + * struct ZSTD_CStream - the zstd streaming compression context |
| + */ |
| +typedef struct ZSTD_CStream_s ZSTD_CStream; |
| + |
| +/*===== ZSTD_CStream management functions =====*/ |
| +/** |
| + * ZSTD_initCStream() - initialize a zstd streaming compression context |
| + * @params: The zstd compression parameters. |
| + * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must |
| + * pass the source size (zero means empty source). Otherwise, |
| + * the caller may optionally pass the source size, or zero if |
| + * unknown. |
| + * @workspace: The workspace to emplace the context into. It must outlive |
| + * the returned context. |
| + * @workspaceSize: The size of workspace. |
| + * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine |
| + * how large the workspace must be. |
| + * |
| + * Return: The zstd streaming compression context. |
| + */ |
| +ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, |
| + unsigned long long pledgedSrcSize, void *workspace, |
| + size_t workspaceSize); |
| + |
| +/** |
| + * ZSTD_initCStream_usingCDict() - initialize a streaming compression context |
| + * @cdict: The digested dictionary to use for compression. |
| + * @pledgedSrcSize: Optionally the source size, or zero if unknown. |
| + * @workspace: The workspace to emplace the context into. It must outlive |
| + * the returned context. |
| + * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound() |
| + * with the cParams used to initialize the cdict to determine |
| + * how large the workspace must be. |
| + * |
| + * Return: The zstd streaming compression context. |
| + */ |
| +ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, |
| + unsigned long long pledgedSrcSize, void *workspace, |
| + size_t workspaceSize); |
| + |
| +/*===== Streaming compression functions =====*/ |
| +/** |
| + * ZSTD_resetCStream() - reset the context using parameters from creation |
| + * @zcs: The zstd streaming compression context to reset. |
| + * @pledgedSrcSize: Optionally the source size, or zero if unknown. |
| + * |
| + * Resets the context using the parameters from creation. Skips dictionary |
| + * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame |
| + * content size is always written into the frame header. |
| + * |
| + * Return: Zero or an error, which can be checked using ZSTD_isError(). |
| + */ |
| +size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize); |
| +/** |
| + * ZSTD_compressStream() - streaming compress some of input into output |
| + * @zcs: The zstd streaming compression context. |
| + * @output: Destination buffer. `output->pos` is updated to indicate how much |
| + * compressed data was written. |
| + * @input: Source buffer. `input->pos` is updated to indicate how much data was |
| + * read. Note that it may not consume the entire input, in which case |
| + * `input->pos < input->size`, and it's up to the caller to present |
| + * remaining data again. |
| + * |
| + * The `input` and `output` buffers may be any size. Guaranteed to make some |
| + * forward progress if `input` and `output` are not empty. |
| + * |
| + * Return: A hint for the number of bytes to use as the input for the next |
| + * function call or an error, which can be checked using |
| + * ZSTD_isError(). |
| + */ |
| +size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, |
| + ZSTD_inBuffer *input); |
| +/** |
| + * ZSTD_flushStream() - flush internal buffers into output |
| + * @zcs: The zstd streaming compression context. |
| + * @output: Destination buffer. `output->pos` is updated to indicate how much |
| + * compressed data was written. |
| + * |
| + * ZSTD_flushStream() must be called until it returns 0, meaning all the data |
| + * has been flushed. Since ZSTD_flushStream() causes a block to be ended, |
| + * calling it too often will degrade the compression ratio. |
| + * |
| + * Return: The number of bytes still present within internal buffers or an |
| + * error, which can be checked using ZSTD_isError(). |
| + */ |
| +size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); |
| +/** |
| + * ZSTD_endStream() - flush internal buffers into output and end the frame |
| + * @zcs: The zstd streaming compression context. |
| + * @output: Destination buffer. `output->pos` is updated to indicate how much |
| + * compressed data was written. |
| + * |
| + * ZSTD_endStream() must be called until it returns 0, meaning all the data has |
| + * been flushed and the frame epilogue has been written. |
| + * |
| + * Return: The number of bytes still present within internal buffers or an |
| + * error, which can be checked using ZSTD_isError(). |
| + */ |
| +size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); |
| + |
| +/** |
| + * ZSTD_CStreamInSize() - recommended size for the input buffer |
| + * |
| + * Return: The recommended size for the input buffer. |
| + */ |
| +size_t ZSTD_CStreamInSize(void); |
| +/** |
| + * ZSTD_CStreamOutSize() - recommended size for the output buffer |
| + * |
| + * When the output buffer is at least this large, it is guaranteed to be large |
| + * enough to flush at least one complete compressed block. |
| + * |
| + * Return: The recommended size for the output buffer. |
| + */ |
| +size_t ZSTD_CStreamOutSize(void); |
| + |
| + |
| + |
| +/*-***************************************************************************** |
| + * Streaming decompression - HowTo |
| + * |
| + * A ZSTD_DStream object is required to track streaming operations. |
| + * Use ZSTD_initDStream() to initialize a ZSTD_DStream object. |
| + * ZSTD_DStream objects can be re-used multiple times. |
| + * |
| + * Use ZSTD_decompressStream() repetitively to consume your input. |
| + * The function will update both `pos` fields. |
| + * If `input->pos < input->size`, some input has not been consumed. |
| + * It's up to the caller to present again remaining data. |
| + * If `output->pos < output->size`, decoder has flushed everything it could. |
| + * Returns 0 iff a frame is completely decoded and fully flushed. |
| + * Otherwise it returns a suggested next input size that will never load more |
| + * than the current frame. |
| + ******************************************************************************/ |
| + |
| +/** |
| + * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream |
| + * @maxWindowSize: The maximum window size allowed for compressed frames. |
| + * |
| + * Return: A lower bound on the size of the workspace that is passed to |
| + * ZSTD_initDStream() and ZSTD_initDStream_usingDDict(). |
| + */ |
| +size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize); |
| + |
| +/** |
| + * struct ZSTD_DStream - the zstd streaming decompression context |
| + */ |
| +typedef struct ZSTD_DStream_s ZSTD_DStream; |
| +/*===== ZSTD_DStream management functions =====*/ |
| +/** |
| + * ZSTD_initDStream() - initialize a zstd streaming decompression context |
| + * @maxWindowSize: The maximum window size allowed for compressed frames. |
| + * @workspace: The workspace to emplace the context into. It must outlive |
| + * the returned context. |
| + * @workspaceSize: The size of workspace. |
| + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine |
| + * how large the workspace must be. |
| + * |
| + * Return: The zstd streaming decompression context. |
| + */ |
| +ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, |
| + size_t workspaceSize); |
| +/** |
| + * ZSTD_initDStream_usingDDict() - initialize streaming decompression context |
| + * @maxWindowSize: The maximum window size allowed for compressed frames. |
| + * @ddict: The digested dictionary to use for decompression. |
| + * @workspace: The workspace to emplace the context into. It must outlive |
| + * the returned context. |
| + * @workspaceSize: The size of workspace. |
| + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine |
| + * how large the workspace must be. |
| + * |
| + * Return: The zstd streaming decompression context. |
| + */ |
| +ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, |
| + const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize); |
| + |
| +/*===== Streaming decompression functions =====*/ |
| +/** |
| + * ZSTD_resetDStream() - reset the context using parameters from creation |
| + * @zds: The zstd streaming decompression context to reset. |
| + * |
| + * Resets the context using the parameters from creation. Skips dictionary |
| + * loading, since it can be reused. |
| + * |
| + * Return: Zero or an error, which can be checked using ZSTD_isError(). |
| + */ |
| +size_t ZSTD_resetDStream(ZSTD_DStream *zds); |
| +/** |
| + * ZSTD_decompressStream() - streaming decompress some of input into output |
| + * @zds: The zstd streaming decompression context. |
| + * @output: Destination buffer. `output.pos` is updated to indicate how much |
| + * decompressed data was written. |
| + * @input: Source buffer. `input.pos` is updated to indicate how much data was |
| + * read. Note that it may not consume the entire input, in which case |
| + * `input.pos < input.size`, and it's up to the caller to present |
| + * remaining data again. |
| + * |
| + * The `input` and `output` buffers may be any size. Guaranteed to make some |
| + * forward progress if `input` and `output` are not empty. |
| + * ZSTD_decompressStream() will not consume the last byte of the frame until |
| + * the entire frame is flushed. |
| + * |
| + * Return: Returns 0 iff a frame is completely decoded and fully flushed. |
| + * Otherwise returns a hint for the number of bytes to use as the input |
| + * for the next function call or an error, which can be checked using |
| + * ZSTD_isError(). The size hint will never load more than the frame. |
| + */ |
| +size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, |
| + ZSTD_inBuffer *input); |
| + |
| +/** |
| + * ZSTD_DStreamInSize() - recommended size for the input buffer |
| + * |
| + * Return: The recommended size for the input buffer. |
| + */ |
| +size_t ZSTD_DStreamInSize(void); |
| +/** |
| + * ZSTD_DStreamOutSize() - recommended size for the output buffer |
| + * |
| + * When the output buffer is at least this large, it is guaranteed to be large |
| + * enough to flush at least one complete decompressed block. |
| + * |
| + * Return: The recommended size for the output buffer. |
| + */ |
| +size_t ZSTD_DStreamOutSize(void); |
| + |
| + |
| +/* --- Constants ---*/ |
| +#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ |
| +#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U |
| + |
| +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) |
| +#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) |
| + |
| +#define ZSTD_WINDOWLOG_MAX_32 27 |
| +#define ZSTD_WINDOWLOG_MAX_64 27 |
| +#define ZSTD_WINDOWLOG_MAX \ |
| + ((unsigned int)(sizeof(size_t) == 4 \ |
| + ? ZSTD_WINDOWLOG_MAX_32 \ |
| + : ZSTD_WINDOWLOG_MAX_64)) |
| +#define ZSTD_WINDOWLOG_MIN 10 |
| +#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX |
| +#define ZSTD_HASHLOG_MIN 6 |
| +#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) |
| +#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN |
| +#define ZSTD_HASHLOG3_MAX 17 |
| +#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) |
| +#define ZSTD_SEARCHLOG_MIN 1 |
| +/* only for ZSTD_fast, other strategies are limited to 6 */ |
| +#define ZSTD_SEARCHLENGTH_MAX 7 |
| +/* only for ZSTD_btopt, other strategies are limited to 4 */ |
| +#define ZSTD_SEARCHLENGTH_MIN 3 |
| +#define ZSTD_TARGETLENGTH_MIN 4 |
| +#define ZSTD_TARGETLENGTH_MAX 999 |
| + |
| +/* for static allocation */ |
| +#define ZSTD_FRAMEHEADERSIZE_MAX 18 |
| +#define ZSTD_FRAMEHEADERSIZE_MIN 6 |
| +static const size_t ZSTD_frameHeaderSize_prefix = 5; |
| +static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; |
| +static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; |
| +/* magic number + skippable frame length */ |
| +static const size_t ZSTD_skippableHeaderSize = 8; |
| + |
| + |
| +/*-************************************* |
| + * Compressed size functions |
| + **************************************/ |
| + |
| +/** |
| + * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame |
| + * @src: Source buffer. It should point to the start of a zstd encoded frame |
| + * or a skippable frame. |
| + * @srcSize: The size of the source buffer. It must be at least as large as the |
| + * size of the frame. |
| + * |
| + * Return: The compressed size of the frame pointed to by `src` or an error, |
| + * which can be check with ZSTD_isError(). |
| + * Suitable to pass to ZSTD_decompress() or similar functions. |
| + */ |
| +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize); |
| + |
| +/*-************************************* |
| + * Decompressed size functions |
| + **************************************/ |
| +/** |
| + * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header |
| + * @src: It should point to the start of a zstd encoded frame. |
| + * @srcSize: The size of the source buffer. It must be at least as large as the |
| + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. |
| + * |
| + * Return: The frame content size stored in the frame header if known. |
| + * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the |
| + * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input. |
| + */ |
| +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); |
| + |
| +/** |
| + * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames |
| + * @src: It should point to the start of a series of zstd encoded and/or |
| + * skippable frames. |
| + * @srcSize: The exact size of the series of frames. |
| + * |
| + * If any zstd encoded frame in the series doesn't have the frame content size |
| + * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always |
| + * set when using ZSTD_compress(). The decompressed size can be very large. |
| + * If the source is untrusted, the decompressed size could be wrong or |
| + * intentionally modified. Always ensure the result fits within the |
| + * application's authorized limits. ZSTD_findDecompressedSize() handles multiple |
| + * frames, and so it must traverse the input to read each frame header. This is |
| + * efficient as most of the data is skipped, however it does mean that all frame |
| + * data must be present and valid. |
| + * |
| + * Return: Decompressed size of all the data contained in the frames if known. |
| + * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown. |
| + * `ZSTD_CONTENTSIZE_ERROR` if an error occurred. |
| + */ |
| +unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize); |
| + |
| +/*-************************************* |
| + * Advanced compression functions |
| + **************************************/ |
| +/** |
| + * ZSTD_checkCParams() - ensure parameter values remain within authorized range |
| + * @cParams: The zstd compression parameters. |
| + * |
| + * Return: Zero or an error, which can be checked using ZSTD_isError(). |
| + */ |
| +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams); |
| + |
| +/** |
| + * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize |
| + * @srcSize: Optionally the estimated source size, or zero if unknown. |
| + * @dictSize: Optionally the estimated dictionary size, or zero if unknown. |
| + * |
| + * Return: The optimized parameters. |
| + */ |
| +ZSTD_compressionParameters ZSTD_adjustCParams( |
| + ZSTD_compressionParameters cParams, unsigned long long srcSize, |
| + size_t dictSize); |
| + |
| +/*--- Advanced decompression functions ---*/ |
| + |
| +/** |
| + * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame |
| + * @buffer: The source buffer to check. |
| + * @size: The size of the source buffer, must be at least 4 bytes. |
| + * |
| + * Return: True iff the buffer starts with a zstd or skippable frame identifier. |
| + */ |
| +unsigned int ZSTD_isFrame(const void *buffer, size_t size); |
| + |
| +/** |
| + * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary |
| + * @dict: The dictionary buffer. |
| + * @dictSize: The size of the dictionary buffer. |
| + * |
| + * Return: The dictionary id stored within the dictionary or 0 if the |
| + * dictionary is not a zstd dictionary. If it returns 0 the |
| + * dictionary can still be loaded as a content-only dictionary. |
| + */ |
| +unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize); |
| + |
| +/** |
| + * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict |
| + * @ddict: The ddict to find the id of. |
| + * |
| + * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not |
| + * a zstd dictionary. If it returns 0 `ddict` will be loaded as a |
| + * content-only dictionary. |
| + */ |
| +unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict); |
| + |
| +/** |
| + * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame |
| + * @src: Source buffer. It must be a zstd encoded frame. |
| + * @srcSize: The size of the source buffer. It must be at least as large as the |
| + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. |
| + * |
| + * Return: The dictionary id required to decompress the frame stored within |
| + * `src` or 0 if the dictionary id could not be decoded. It can return |
| + * 0 if the frame does not require a dictionary, the dictionary id |
| + * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize` |
| + * is too small. |
| + */ |
| +unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize); |
| + |
| +/** |
| + * struct ZSTD_frameParams - zstd frame parameters stored in the frame header |
| + * @frameContentSize: The frame content size, or 0 if not present. |
| + * @windowSize: The window size, or 0 if the frame is a skippable frame. |
| + * @dictID: The dictionary id, or 0 if not present. |
| + * @checksumFlag: Whether a checksum was used. |
| + */ |
| +typedef struct { |
| + unsigned long long frameContentSize; |
| + unsigned int windowSize; |
| + unsigned int dictID; |
| + unsigned int checksumFlag; |
| +} ZSTD_frameParams; |
| + |
| +/** |
| + * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame |
| + * @fparamsPtr: On success the frame parameters are written here. |
| + * @src: The source buffer. It must point to a zstd or skippable frame. |
| + * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is |
| + * always large enough to succeed. |
| + * |
| + * Return: 0 on success. If more data is required it returns how many bytes |
| + * must be provided to make forward progress. Otherwise it returns |
| + * an error, which can be checked using ZSTD_isError(). |
| + */ |
| +size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, |
| + size_t srcSize); |
| + |
| +/*-***************************************************************************** |
| + * Buffer-less and synchronous inner streaming functions |
| + * |
| + * This is an advanced API, giving full control over buffer management, for |
| + * users which need direct control over memory. |
| + * But it's also a complex one, with many restrictions (documented below). |
| + * Prefer using normal streaming API for an easier experience |
| + ******************************************************************************/ |
| + |
| +/*-***************************************************************************** |
| + * Buffer-less streaming compression (synchronous mode) |
| + * |
| + * A ZSTD_CCtx object is required to track streaming operations. |
| + * Use ZSTD_initCCtx() to initialize a context. |
| + * ZSTD_CCtx object can be re-used multiple times within successive compression |
| + * operations. |
| + * |
| + * Start by initializing a context. |
| + * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary |
| + * compression, |
| + * or ZSTD_compressBegin_advanced(), for finer parameter control. |
| + * It's also possible to duplicate a reference context which has already been |
| + * initialized, using ZSTD_copyCCtx() |
| + * |
| + * Then, consume your input using ZSTD_compressContinue(). |
| + * There are some important considerations to keep in mind when using this |
| + * advanced function : |
| + * - ZSTD_compressContinue() has no internal buffer. It uses externally provided |
| + * buffer only. |
| + * - Interface is synchronous : input is consumed entirely and produce 1+ |
| + * (or more) compressed blocks. |
| + * - Caller must ensure there is enough space in `dst` to store compressed data |
| + * under worst case scenario. Worst case evaluation is provided by |
| + * ZSTD_compressBound(). |
| + * ZSTD_compressContinue() doesn't guarantee recover after a failed |
| + * compression. |
| + * - ZSTD_compressContinue() presumes prior input ***is still accessible and |
| + * unmodified*** (up to maximum distance size, see WindowLog). |
| + * It remembers all previous contiguous blocks, plus one separated memory |
| + * segment (which can itself consists of multiple contiguous blocks) |
| + * - ZSTD_compressContinue() detects that prior input has been overwritten when |
| + * `src` buffer overlaps. In which case, it will "discard" the relevant memory |
| + * section from its history. |
| + * |
| + * Finish a frame with ZSTD_compressEnd(), which will write the last block(s) |
| + * and optional checksum. It's possible to use srcSize==0, in which case, it |
| + * will write a final empty block to end the frame. Without last block mark, |
| + * frames will be considered unfinished (corrupted) by decoders. |
| + * |
| + * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new |
| + * frame. |
| + ******************************************************************************/ |
| + |
| +/*===== Buffer-less streaming compression functions =====*/ |
| +size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel); |
| +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, |
| + size_t dictSize, int compressionLevel); |
| +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, |
| + size_t dictSize, ZSTD_parameters params, |
| + unsigned long long pledgedSrcSize); |
| +size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, |
| + unsigned long long pledgedSrcSize); |
| +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, |
| + unsigned long long pledgedSrcSize); |
| +size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize); |
| +size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize); |
| + |
| + |
| + |
| +/*-***************************************************************************** |
| + * Buffer-less streaming decompression (synchronous mode) |
| + * |
| + * A ZSTD_DCtx object is required to track streaming operations. |
| + * Use ZSTD_initDCtx() to initialize a context. |
| + * A ZSTD_DCtx object can be re-used multiple times. |
| + * |
| + * First typical operation is to retrieve frame parameters, using |
| + * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide |
| + * important information to correctly decode the frame, such as the minimum |
| + * rolling buffer size to allocate to decompress data (`windowSize`), and the |
| + * dictionary ID used. |
| + * Note: content size is optional, it may not be present. 0 means unknown. |
| + * Note that these values could be wrong, either because of data malformation, |
| + * or because an attacker is spoofing deliberate false information. As a |
| + * consequence, check that values remain within valid application range, |
| + * especially `windowSize`, before allocation. Each application can set its own |
| + * limit, depending on local restrictions. For extended interoperability, it is |
| + * recommended to support at least 8 MB. |
| + * Frame parameters are extracted from the beginning of the compressed frame. |
| + * Data fragment must be large enough to ensure successful decoding, typically |
| + * `ZSTD_frameHeaderSize_max` bytes. |
| + * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled. |
| + * >0: `srcSize` is too small, provide at least this many bytes. |
| + * errorCode, which can be tested using ZSTD_isError(). |
| + * |
| + * Start decompression, with ZSTD_decompressBegin() or |
| + * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared |
| + * context, using ZSTD_copyDCtx(). |
| + * |
| + * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() |
| + * alternatively. |
| + * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' |
| + * to ZSTD_decompressContinue(). |
| + * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will |
| + * fail. |
| + * |
| + * The result of ZSTD_decompressContinue() is the number of bytes regenerated |
| + * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an |
| + * error; it just means ZSTD_decompressContinue() has decoded some metadata |
| + * item. It can also be an error code, which can be tested with ZSTD_isError(). |
| + * |
| + * ZSTD_decompressContinue() needs previous data blocks during decompression, up |
| + * to `windowSize`. They should preferably be located contiguously, prior to |
| + * current block. Alternatively, a round buffer of sufficient size is also |
| + * possible. Sufficient size is determined by frame parameters. |
| + * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't |
| + * follow each other, make sure that either the compressor breaks contiguity at |
| + * the same place, or that previous contiguous segment is large enough to |
| + * properly handle maximum back-reference. |
| + * |
| + * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. |
| + * Context can then be reset to start a new decompression. |
| + * |
| + * Note: it's possible to know if next input to present is a header or a block, |
| + * using ZSTD_nextInputType(). This information is not required to properly |
| + * decode a frame. |
| + * |
| + * == Special case: skippable frames == |
| + * |
| + * Skippable frames allow integration of user-defined data into a flow of |
| + * concatenated frames. Skippable frames will be ignored (skipped) by a |
| + * decompressor. The format of skippable frames is as follows: |
| + * a) Skippable frame ID - 4 Bytes, Little endian format, any value from |
| + * 0x184D2A50 to 0x184D2A5F |
| + * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits |
| + * c) Frame Content - any content (User Data) of length equal to Frame Size |
| + * For skippable frames ZSTD_decompressContinue() always returns 0. |
| + * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 |
| + * what means that a frame is skippable. |
| + * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might |
| + * actually be a zstd encoded frame with no content. For purposes of |
| + * decompression, it is valid in both cases to skip the frame using |
| + * ZSTD_findFrameCompressedSize() to find its size in bytes. |
| + * It also returns frame size as fparamsPtr->frameContentSize. |
| + ******************************************************************************/ |
| + |
| +/*===== Buffer-less streaming decompression functions =====*/ |
| +size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx); |
| +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, |
| + size_t dictSize); |
| +void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx); |
| +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx); |
| +size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize); |
| +typedef enum { |
| + ZSTDnit_frameHeader, |
| + ZSTDnit_blockHeader, |
| + ZSTDnit_block, |
| + ZSTDnit_lastBlock, |
| + ZSTDnit_checksum, |
| + ZSTDnit_skippableFrame |
| +} ZSTD_nextInputType_e; |
| +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx); |
| + |
| +/*-***************************************************************************** |
| + * Block functions |
| + * |
| + * Block functions produce and decode raw zstd blocks, without frame metadata. |
| + * Frame metadata cost is typically ~18 bytes, which can be non-negligible for |
| + * very small blocks (< 100 bytes). User will have to take in charge required |
| + * information to regenerate data, such as compressed and content sizes. |
| + * |
| + * A few rules to respect: |
| + * - Compressing and decompressing require a context structure |
| + * + Use ZSTD_initCCtx() and ZSTD_initDCtx() |
| + * - It is necessary to init context before starting |
| + * + compression : ZSTD_compressBegin() |
| + * + decompression : ZSTD_decompressBegin() |
| + * + variants _usingDict() are also allowed |
| + * + copyCCtx() and copyDCtx() work too |
| + * - Block size is limited, it must be <= ZSTD_getBlockSizeMax() |
| + * + If you need to compress more, cut data into multiple blocks |
| + * + Consider using the regular ZSTD_compress() instead, as frame metadata |
| + * costs become negligible when source size is large. |
| + * - When a block is considered not compressible enough, ZSTD_compressBlock() |
| + * result will be zero. In which case, nothing is produced into `dst`. |
| + * + User must test for such outcome and deal directly with uncompressed data |
| + * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!! |
| + * + In case of multiple successive blocks, decoder must be informed of |
| + * uncompressed block existence to follow proper history. Use |
| + * ZSTD_insertBlock() in such a case. |
| + ******************************************************************************/ |
| + |
| +/* Define for static allocation */ |
| +#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) |
| +/*===== Raw zstd block functions =====*/ |
| +size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx); |
| +size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize); |
| +size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, |
| + const void *src, size_t srcSize); |
| +size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, |
| + size_t blockSize); |
| + |
| +#endif /* ZSTD_H */ |
| diff --git a/lib/Kconfig b/lib/Kconfig |
| index b6009d7..f00ddab 100644 |
| --- a/lib/Kconfig |
| +++ b/lib/Kconfig |
| @@ -241,6 +241,14 @@ config LZ4HC_COMPRESS |
| config LZ4_DECOMPRESS |
| tristate |
| |
| +config ZSTD_COMPRESS |
| + select XXHASH |
| + tristate |
| + |
| +config ZSTD_DECOMPRESS |
| + select XXHASH |
| + tristate |
| + |
| source "lib/xz/Kconfig" |
| |
| # |
| diff --git a/lib/Makefile b/lib/Makefile |
| index 1338226..4fcef16 100644 |
| --- a/lib/Makefile |
| +++ b/lib/Makefile |
| @@ -116,6 +116,8 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ |
| obj-$(CONFIG_LZ4_COMPRESS) += lz4/ |
| obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ |
| obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ |
| +obj-$(CONFIG_ZSTD_COMPRESS) += zstd/ |
| +obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/ |
| obj-$(CONFIG_XZ_DEC) += xz/ |
| obj-$(CONFIG_RAID6_PQ) += raid6/ |
| |
| diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile |
| new file mode 100644 |
| index 0000000..dd0a359 |
| --- /dev/null |
| +++ b/lib/zstd/Makefile |
| @@ -0,0 +1,18 @@ |
| +obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o |
| +obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o |
| + |
| +ccflags-y += -O3 |
| + |
| +# Object files unique to zstd_compress and zstd_decompress |
| +zstd_compress-y := fse_compress.o huf_compress.o compress.o |
| +zstd_decompress-y := huf_decompress.o decompress.o |
| + |
| +# These object files are shared between the modules. |
| +# Always add them to zstd_compress. |
| +# Unless both zstd_compress and zstd_decompress are built in |
| +# then also add them to zstd_decompress. |
| +zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o |
| + |
| +ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy) |
| + zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o |
| +endif |
| diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h |
| new file mode 100644 |
| index 0000000..a826b99 |
| --- /dev/null |
| +++ b/lib/zstd/bitstream.h |
| @@ -0,0 +1,374 @@ |
| +/* |
| + * bitstream |
| + * Part of FSE library |
| + * header file (to include) |
| + * Copyright (C) 2013-2016, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| +#ifndef BITSTREAM_H_MODULE |
| +#define BITSTREAM_H_MODULE |
| + |
| +/* |
| +* This API consists of small unitary functions, which must be inlined for best performance. |
| +* Since link-time-optimization is not available for all compilers, |
| +* these functions are defined into a .h to be included. |
| +*/ |
| + |
| +/*-**************************************** |
| +* Dependencies |
| +******************************************/ |
| +#include "error_private.h" /* error codes and messages */ |
| +#include "mem.h" /* unaligned access routines */ |
| + |
| +/*========================================= |
| +* Target specific |
| +=========================================*/ |
| +#define STREAM_ACCUMULATOR_MIN_32 25 |
| +#define STREAM_ACCUMULATOR_MIN_64 57 |
| +#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) |
| + |
| +/*-****************************************** |
| +* bitStream encoding API (write forward) |
| +********************************************/ |
| +/* bitStream can mix input from multiple sources. |
| +* A critical property of these streams is that they encode and decode in **reverse** direction. |
| +* So the first bit sequence you add will be the last to be read, like a LIFO stack. |
| +*/ |
| +typedef struct { |
| + size_t bitContainer; |
| + int bitPos; |
| + char *startPtr; |
| + char *ptr; |
| + char *endPtr; |
| +} BIT_CStream_t; |
| + |
| +ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity); |
| +ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits); |
| +ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC); |
| +ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC); |
| + |
| +/* Start with initCStream, providing the size of buffer to write into. |
| +* bitStream will never write outside of this buffer. |
| +* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. |
| +* |
| +* bits are first added to a local register. |
| +* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. |
| +* Writing data into memory is an explicit operation, performed by the flushBits function. |
| +* Hence keep track how many bits are potentially stored into local register to avoid register overflow. |
| +* After a flushBits, a maximum of 7 bits might still be stored into local register. |
| +* |
| +* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. |
| +* |
| +* Last operation is to close the bitStream. |
| +* The function returns the final size of CStream in bytes. |
| +* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) |
| +*/ |
| + |
| +/*-******************************************** |
| +* bitStream decoding API (read backward) |
| +**********************************************/ |
| +typedef struct { |
| + size_t bitContainer; |
| + unsigned bitsConsumed; |
| + const char *ptr; |
| + const char *start; |
| +} BIT_DStream_t; |
| + |
| +typedef enum { |
| + BIT_DStream_unfinished = 0, |
| + BIT_DStream_endOfBuffer = 1, |
| + BIT_DStream_completed = 2, |
| + BIT_DStream_overflow = 3 |
| +} BIT_DStream_status; /* result of BIT_reloadDStream() */ |
| +/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ |
| + |
| +ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize); |
| +ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits); |
| +ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD); |
| +ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD); |
| + |
| +/* Start by invoking BIT_initDStream(). |
| +* A chunk of the bitStream is then stored into a local register. |
| +* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). |
| +* You can then retrieve bitFields stored into the local register, **in reverse order**. |
| +* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. |
| +* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. |
| +* Otherwise, it can be less than that, so proceed accordingly. |
| +* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). |
| +*/ |
| + |
| +/*-**************************************** |
| +* unsafe API |
| +******************************************/ |
| +ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits); |
| +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ |
| + |
| +ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC); |
| +/* unsafe version; does not check buffer overflow */ |
| + |
| +ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits); |
| +/* faster, but works only if nbBits >= 1 */ |
| + |
| +/*-************************************************************** |
| +* Internal functions |
| +****************************************************************/ |
| +ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); } |
| + |
| +/*===== Local Constants =====*/ |
| +static const unsigned BIT_mask[] = {0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, |
| + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, |
| + 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */ |
| + |
| +/*-************************************************************** |
| +* bitStream encoding |
| +****************************************************************/ |
| +/*! BIT_initCStream() : |
| + * `dstCapacity` must be > sizeof(void*) |
| + * @return : 0 if success, |
| + otherwise an error code (can be tested using ERR_isError() ) */ |
| +ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity) |
| +{ |
| + bitC->bitContainer = 0; |
| + bitC->bitPos = 0; |
| + bitC->startPtr = (char *)startPtr; |
| + bitC->ptr = bitC->startPtr; |
| + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr); |
| + if (dstCapacity <= sizeof(bitC->ptr)) |
| + return ERROR(dstSize_tooSmall); |
| + return 0; |
| +} |
| + |
| +/*! BIT_addBits() : |
| + can add up to 26 bits into `bitC`. |
| + Does not check for register overflow ! */ |
| +ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits) |
| +{ |
| + bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; |
| + bitC->bitPos += nbBits; |
| +} |
| + |
| +/*! BIT_addBitsFast() : |
| + * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ |
| +ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits) |
| +{ |
| + bitC->bitContainer |= value << bitC->bitPos; |
| + bitC->bitPos += nbBits; |
| +} |
| + |
| +/*! BIT_flushBitsFast() : |
| + * unsafe version; does not check buffer overflow */ |
| +ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC) |
| +{ |
| + size_t const nbBytes = bitC->bitPos >> 3; |
| + ZSTD_writeLEST(bitC->ptr, bitC->bitContainer); |
| + bitC->ptr += nbBytes; |
| + bitC->bitPos &= 7; |
| + bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ |
| +} |
| + |
| +/*! BIT_flushBits() : |
| + * safe version; check for buffer overflow, and prevents it. |
| + * note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */ |
| +ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC) |
| +{ |
| + size_t const nbBytes = bitC->bitPos >> 3; |
| + ZSTD_writeLEST(bitC->ptr, bitC->bitContainer); |
| + bitC->ptr += nbBytes; |
| + if (bitC->ptr > bitC->endPtr) |
| + bitC->ptr = bitC->endPtr; |
| + bitC->bitPos &= 7; |
| + bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ |
| +} |
| + |
| +/*! BIT_closeCStream() : |
| + * @return : size of CStream, in bytes, |
| + or 0 if it could not fit into dstBuffer */ |
| +ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC) |
| +{ |
| + BIT_addBitsFast(bitC, 1, 1); /* endMark */ |
| + BIT_flushBits(bitC); |
| + |
| + if (bitC->ptr >= bitC->endPtr) |
| + return 0; /* doesn't fit within authorized budget : cancel */ |
| + |
| + return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); |
| +} |
| + |
| +/*-******************************************************** |
| +* bitStream decoding |
| +**********************************************************/ |
| +/*! BIT_initDStream() : |
| +* Initialize a BIT_DStream_t. |
| +* `bitD` : a pointer to an already allocated BIT_DStream_t structure. |
| +* `srcSize` must be the *exact* size of the bitStream, in bytes. |
| +* @return : size of stream (== srcSize) or an errorCode if a problem is detected |
| +*/ |
| +ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize) |
| +{ |
| + if (srcSize < 1) { |
| + memset(bitD, 0, sizeof(*bitD)); |
| + return ERROR(srcSize_wrong); |
| + } |
| + |
| + if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ |
| + bitD->start = (const char *)srcBuffer; |
| + bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer); |
| + bitD->bitContainer = ZSTD_readLEST(bitD->ptr); |
| + { |
| + BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1]; |
| + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ |
| + if (lastByte == 0) |
| + return ERROR(GENERIC); /* endMark not present */ |
| + } |
| + } else { |
| + bitD->start = (const char *)srcBuffer; |
| + bitD->ptr = bitD->start; |
| + bitD->bitContainer = *(const BYTE *)(bitD->start); |
| + switch (srcSize) { |
| + case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16); |
| + case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24); |
| + case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32); |
| + case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24; |
| + case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16; |
| + case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8; |
| + default:; |
| + } |
| + { |
| + BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1]; |
| + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; |
| + if (lastByte == 0) |
| + return ERROR(GENERIC); /* endMark not present */ |
| + } |
| + bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8; |
| + } |
| + |
| + return srcSize; |
| +} |
| + |
| +ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; } |
| + |
| +ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; } |
| + |
| +ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; } |
| + |
| +/*! BIT_lookBits() : |
| + * Provides next n bits from local register. |
| + * local register is not modified. |
| + * On 32-bits, maxNbBits==24. |
| + * On 64-bits, maxNbBits==56. |
| + * @return : value extracted |
| + */ |
| +ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits) |
| +{ |
| + U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1; |
| + return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask); |
| +} |
| + |
| +/*! BIT_lookBitsFast() : |
| +* unsafe version; only works only if nbBits >= 1 */ |
| +ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits) |
| +{ |
| + U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1; |
| + return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask); |
| +} |
| + |
| +ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } |
| + |
| +/*! BIT_readBits() : |
| + * Read (consume) next n bits from local register and update. |
| + * Pay attention to not read more than nbBits contained into local register. |
| + * @return : extracted value. |
| + */ |
| +ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits) |
| +{ |
| + size_t const value = BIT_lookBits(bitD, nbBits); |
| + BIT_skipBits(bitD, nbBits); |
| + return value; |
| +} |
| + |
| +/*! BIT_readBitsFast() : |
| +* unsafe version; only works only if nbBits >= 1 */ |
| +ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits) |
| +{ |
| + size_t const value = BIT_lookBitsFast(bitD, nbBits); |
| + BIT_skipBits(bitD, nbBits); |
| + return value; |
| +} |
| + |
| +/*! BIT_reloadDStream() : |
| +* Refill `bitD` from buffer previously set in BIT_initDStream() . |
| +* This function is safe, it guarantees it will not read beyond src buffer. |
| +* @return : status of `BIT_DStream_t` internal register. |
| + if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */ |
| +ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD) |
| +{ |
| + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */ |
| + return BIT_DStream_overflow; |
| + |
| + if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { |
| + bitD->ptr -= bitD->bitsConsumed >> 3; |
| + bitD->bitsConsumed &= 7; |
| + bitD->bitContainer = ZSTD_readLEST(bitD->ptr); |
| + return BIT_DStream_unfinished; |
| + } |
| + if (bitD->ptr == bitD->start) { |
| + if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8) |
| + return BIT_DStream_endOfBuffer; |
| + return BIT_DStream_completed; |
| + } |
| + { |
| + U32 nbBytes = bitD->bitsConsumed >> 3; |
| + BIT_DStream_status result = BIT_DStream_unfinished; |
| + if (bitD->ptr - nbBytes < bitD->start) { |
| + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ |
| + result = BIT_DStream_endOfBuffer; |
| + } |
| + bitD->ptr -= nbBytes; |
| + bitD->bitsConsumed -= nbBytes * 8; |
| + bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ |
| + return result; |
| + } |
| +} |
| + |
| +/*! BIT_endOfDStream() : |
| +* @return Tells if DStream has exactly reached its end (all bits consumed). |
| +*/ |
| +ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream) |
| +{ |
| + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8)); |
| +} |
| + |
| +#endif /* BITSTREAM_H_MODULE */ |
| diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c |
| new file mode 100644 |
| index 0000000..d60ab7d |
| --- /dev/null |
| +++ b/lib/zstd/compress.c |
| @@ -0,0 +1,3479 @@ |
| +/** |
| + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +/*-************************************* |
| +* Dependencies |
| +***************************************/ |
| +#include "fse.h" |
| +#include "huf.h" |
| +#include "mem.h" |
| +#include "zstd_internal.h" /* includes zstd.h */ |
| +#include <linux/kernel.h> |
| +#include <linux/module.h> |
| +#include <linux/string.h> /* memset */ |
| + |
| +/*-************************************* |
| +* Constants |
| +***************************************/ |
| +static const U32 g_searchStrength = 8; /* control skip over incompressible data */ |
| +#define HASH_READ_SIZE 8 |
| +typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; |
| + |
| +/*-************************************* |
| +* Helper functions |
| +***************************************/ |
| +size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; } |
| + |
| +/*-************************************* |
| +* Sequence storage |
| +***************************************/ |
| +static void ZSTD_resetSeqStore(seqStore_t *ssPtr) |
| +{ |
| + ssPtr->lit = ssPtr->litStart; |
| + ssPtr->sequences = ssPtr->sequencesStart; |
| + ssPtr->longLengthID = 0; |
| +} |
| + |
| +/*-************************************* |
| +* Context memory management |
| +***************************************/ |
| +struct ZSTD_CCtx_s { |
| + const BYTE *nextSrc; /* next block here to continue on curr prefix */ |
| + const BYTE *base; /* All regular indexes relative to this position */ |
| + const BYTE *dictBase; /* extDict indexes relative to this position */ |
| + U32 dictLimit; /* below that point, need extDict */ |
| + U32 lowLimit; /* below that point, no more data */ |
| + U32 nextToUpdate; /* index from which to continue dictionary update */ |
| + U32 nextToUpdate3; /* index from which to continue dictionary update */ |
| + U32 hashLog3; /* dispatch table : larger == faster, more memory */ |
| + U32 loadedDictEnd; /* index of end of dictionary */ |
| + U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */ |
| + U32 forceRawDict; /* Force loading dictionary in "content-only" mode (no header analysis) */ |
| + ZSTD_compressionStage_e stage; |
| + U32 rep[ZSTD_REP_NUM]; |
| + U32 repToConfirm[ZSTD_REP_NUM]; |
| + U32 dictID; |
| + ZSTD_parameters params; |
| + void *workSpace; |
| + size_t workSpaceSize; |
| + size_t blockSize; |
| + U64 frameContentSize; |
| + struct xxh64_state xxhState; |
| + ZSTD_customMem customMem; |
| + |
| + seqStore_t seqStore; /* sequences storage ptrs */ |
| + U32 *hashTable; |
| + U32 *hashTable3; |
| + U32 *chainTable; |
| + HUF_CElt *hufTable; |
| + U32 flagStaticTables; |
| + HUF_repeat flagStaticHufTable; |
| + FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)]; |
| + FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)]; |
| + FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)]; |
| + unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32]; |
| +}; |
| + |
| +size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams) |
| +{ |
| + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog); |
| + U32 const divider = (cParams.searchLength == 3) ? 3 : 4; |
| + size_t const maxNbSeq = blockSize / divider; |
| + size_t const tokenSpace = blockSize + 11 * maxNbSeq; |
| + size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog); |
| + size_t const hSize = ((size_t)1) << cParams.hashLog; |
| + U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); |
| + size_t const h3Size = ((size_t)1) << hashLog3; |
| + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); |
| + size_t const optSpace = |
| + ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t)); |
| + size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace + |
| + (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0); |
| + |
| + return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize); |
| +} |
| + |
| +static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem) |
| +{ |
| + ZSTD_CCtx *cctx; |
| + if (!customMem.customAlloc || !customMem.customFree) |
| + return NULL; |
| + cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); |
| + if (!cctx) |
| + return NULL; |
| + memset(cctx, 0, sizeof(ZSTD_CCtx)); |
| + cctx->customMem = customMem; |
| + return cctx; |
| +} |
| + |
| +ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); |
| + ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem); |
| + if (cctx) { |
| + cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize); |
| + } |
| + return cctx; |
| +} |
| + |
| +size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx) |
| +{ |
| + if (cctx == NULL) |
| + return 0; /* support free on NULL */ |
| + ZSTD_free(cctx->workSpace, cctx->customMem); |
| + ZSTD_free(cctx, cctx->customMem); |
| + return 0; /* reserved as a potential error code in the future */ |
| +} |
| + |
| +const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); } |
| + |
| +static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; } |
| + |
| +/** ZSTD_checkParams() : |
| + ensure param values remain within authorized range. |
| + @return : 0, or an error code if one value is beyond authorized range */ |
| +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) |
| +{ |
| +#define CLAMPCHECK(val, min, max) \ |
| + { \ |
| + if ((val < min) | (val > max)) \ |
| + return ERROR(compressionParameter_unsupported); \ |
| + } |
| + CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); |
| + CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); |
| + CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); |
| + CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); |
| + CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); |
| + CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); |
| + if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) |
| + return ERROR(compressionParameter_unsupported); |
| + return 0; |
| +} |
| + |
| +/** ZSTD_cycleLog() : |
| + * condition for correct operation : hashLog > 1 */ |
| +static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) |
| +{ |
| + U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); |
| + return hashLog - btScale; |
| +} |
| + |
| +/** ZSTD_adjustCParams() : |
| + optimize `cPar` for a given input (`srcSize` and `dictSize`). |
| + mostly downsizing to reduce memory consumption and initialization. |
| + Both `srcSize` and `dictSize` are optional (use 0 if unknown), |
| + but if both are 0, no optimization can be done. |
| + Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */ |
| +ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) |
| +{ |
| + if (srcSize + dictSize == 0) |
| + return cPar; /* no size information available : no adjustment */ |
| + |
| + /* resize params, to use less memory when necessary */ |
| + { |
| + U32 const minSrcSize = (srcSize == 0) ? 500 : 0; |
| + U64 const rSize = srcSize + dictSize + minSrcSize; |
| + if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) { |
| + U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1); |
| + if (cPar.windowLog > srcLog) |
| + cPar.windowLog = srcLog; |
| + } |
| + } |
| + if (cPar.hashLog > cPar.windowLog) |
| + cPar.hashLog = cPar.windowLog; |
| + { |
| + U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); |
| + if (cycleLog > cPar.windowLog) |
| + cPar.chainLog -= (cycleLog - cPar.windowLog); |
| + } |
| + |
| + if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) |
| + cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ |
| + |
| + return cPar; |
| +} |
| + |
| +static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2) |
| +{ |
| + return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) & |
| + (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3)); |
| +} |
| + |
| +/*! ZSTD_continueCCtx() : |
| + reuse CCtx without reset (note : requires no dictionary) */ |
| +static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize) |
| +{ |
| + U32 const end = (U32)(cctx->nextSrc - cctx->base); |
| + cctx->params = params; |
| + cctx->frameContentSize = frameContentSize; |
| + cctx->lowLimit = end; |
| + cctx->dictLimit = end; |
| + cctx->nextToUpdate = end + 1; |
| + cctx->stage = ZSTDcs_init; |
| + cctx->dictID = 0; |
| + cctx->loadedDictEnd = 0; |
| + { |
| + int i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + cctx->rep[i] = repStartValue[i]; |
| + } |
| + cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */ |
| + xxh64_reset(&cctx->xxhState, 0); |
| + return 0; |
| +} |
| + |
| +typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e; |
| + |
| +/*! ZSTD_resetCCtx_advanced() : |
| + note : `params` must be validated */ |
| +static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp) |
| +{ |
| + if (crp == ZSTDcrp_continue) |
| + if (ZSTD_equivalentParams(params, zc->params)) { |
| + zc->flagStaticTables = 0; |
| + zc->flagStaticHufTable = HUF_repeat_none; |
| + return ZSTD_continueCCtx(zc, params, frameContentSize); |
| + } |
| + |
| + { |
| + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); |
| + U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4; |
| + size_t const maxNbSeq = blockSize / divider; |
| + size_t const tokenSpace = blockSize + 11 * maxNbSeq; |
| + size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog); |
| + size_t const hSize = ((size_t)1) << params.cParams.hashLog; |
| + U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog); |
| + size_t const h3Size = ((size_t)1) << hashLog3; |
| + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); |
| + void *ptr; |
| + |
| + /* Check if workSpace is large enough, alloc a new one if needed */ |
| + { |
| + size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + |
| + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t)); |
| + size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace + |
| + (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0); |
| + if (zc->workSpaceSize < neededSpace) { |
| + ZSTD_free(zc->workSpace, zc->customMem); |
| + zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); |
| + if (zc->workSpace == NULL) |
| + return ERROR(memory_allocation); |
| + zc->workSpaceSize = neededSpace; |
| + } |
| + } |
| + |
| + if (crp != ZSTDcrp_noMemset) |
| + memset(zc->workSpace, 0, tableSpace); /* reset tables only */ |
| + xxh64_reset(&zc->xxhState, 0); |
| + zc->hashLog3 = hashLog3; |
| + zc->hashTable = (U32 *)(zc->workSpace); |
| + zc->chainTable = zc->hashTable + hSize; |
| + zc->hashTable3 = zc->chainTable + chainSize; |
| + ptr = zc->hashTable3 + h3Size; |
| + zc->hufTable = (HUF_CElt *)ptr; |
| + zc->flagStaticTables = 0; |
| + zc->flagStaticHufTable = HUF_repeat_none; |
| + ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */ |
| + |
| + zc->nextToUpdate = 1; |
| + zc->nextSrc = NULL; |
| + zc->base = NULL; |
| + zc->dictBase = NULL; |
| + zc->dictLimit = 0; |
| + zc->lowLimit = 0; |
| + zc->params = params; |
| + zc->blockSize = blockSize; |
| + zc->frameContentSize = frameContentSize; |
| + { |
| + int i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + zc->rep[i] = repStartValue[i]; |
| + } |
| + |
| + if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) { |
| + zc->seqStore.litFreq = (U32 *)ptr; |
| + zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits); |
| + zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1); |
| + zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1); |
| + ptr = zc->seqStore.offCodeFreq + (MaxOff + 1); |
| + zc->seqStore.matchTable = (ZSTD_match_t *)ptr; |
| + ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1; |
| + zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr; |
| + ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1; |
| + zc->seqStore.litLengthSum = 0; |
| + } |
| + zc->seqStore.sequencesStart = (seqDef *)ptr; |
| + ptr = zc->seqStore.sequencesStart + maxNbSeq; |
| + zc->seqStore.llCode = (BYTE *)ptr; |
| + zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; |
| + zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; |
| + zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; |
| + |
| + zc->stage = ZSTDcs_init; |
| + zc->dictID = 0; |
| + zc->loadedDictEnd = 0; |
| + |
| + return 0; |
| + } |
| +} |
| + |
| +/* ZSTD_invalidateRepCodes() : |
| + * ensures next compression will not use repcodes from previous block. |
| + * Note : only works with regular variant; |
| + * do not use with extDict variant ! */ |
| +void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx) |
| +{ |
| + int i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + cctx->rep[i] = 0; |
| +} |
| + |
| +/*! ZSTD_copyCCtx() : |
| +* Duplicate an existing context `srcCCtx` into another one `dstCCtx`. |
| +* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). |
| +* @return : 0, or an error code */ |
| +size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize) |
| +{ |
| + if (srcCCtx->stage != ZSTDcs_init) |
| + return ERROR(stage_wrong); |
| + |
| + memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); |
| + { |
| + ZSTD_parameters params = srcCCtx->params; |
| + params.fParams.contentSizeFlag = (pledgedSrcSize > 0); |
| + ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); |
| + } |
| + |
| + /* copy tables */ |
| + { |
| + size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); |
| + size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog; |
| + size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; |
| + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); |
| + memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace); |
| + } |
| + |
| + /* copy dictionary offsets */ |
| + dstCCtx->nextToUpdate = srcCCtx->nextToUpdate; |
| + dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3; |
| + dstCCtx->nextSrc = srcCCtx->nextSrc; |
| + dstCCtx->base = srcCCtx->base; |
| + dstCCtx->dictBase = srcCCtx->dictBase; |
| + dstCCtx->dictLimit = srcCCtx->dictLimit; |
| + dstCCtx->lowLimit = srcCCtx->lowLimit; |
| + dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd; |
| + dstCCtx->dictID = srcCCtx->dictID; |
| + |
| + /* copy entropy tables */ |
| + dstCCtx->flagStaticTables = srcCCtx->flagStaticTables; |
| + dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable; |
| + if (srcCCtx->flagStaticTables) { |
| + memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable)); |
| + memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable)); |
| + memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable)); |
| + } |
| + if (srcCCtx->flagStaticHufTable) { |
| + memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/*! ZSTD_reduceTable() : |
| +* reduce table indexes by `reducerValue` */ |
| +static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue) |
| +{ |
| + U32 u; |
| + for (u = 0; u < size; u++) { |
| + if (table[u] < reducerValue) |
| + table[u] = 0; |
| + else |
| + table[u] -= reducerValue; |
| + } |
| +} |
| + |
| +/*! ZSTD_reduceIndex() : |
| +* rescale all indexes to avoid future overflow (indexes are U32) */ |
| +static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue) |
| +{ |
| + { |
| + U32 const hSize = 1 << zc->params.cParams.hashLog; |
| + ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); |
| + } |
| + |
| + { |
| + U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog); |
| + ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); |
| + } |
| + |
| + { |
| + U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0; |
| + ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); |
| + } |
| +} |
| + |
| +/*-******************************************************* |
| +* Block entropic compression |
| +*********************************************************/ |
| + |
| +/* See doc/zstd_compression_format.md for detailed format description */ |
| + |
| +size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + if (srcSize + ZSTD_blockHeaderSize > dstCapacity) |
| + return ERROR(dstSize_tooSmall); |
| + memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize); |
| + ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); |
| + return ZSTD_blockHeaderSize + srcSize; |
| +} |
| + |
| +static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + BYTE *const ostart = (BYTE * const)dst; |
| + U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095); |
| + |
| + if (srcSize + flSize > dstCapacity) |
| + return ERROR(dstSize_tooSmall); |
| + |
| + switch (flSize) { |
| + case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break; |
| + case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break; |
| + default: /*note : should not be necessary : flSize is within {1,2,3} */ |
| + case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break; |
| + } |
| + |
| + memcpy(ostart + flSize, src, srcSize); |
| + return srcSize + flSize; |
| +} |
| + |
| +static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + BYTE *const ostart = (BYTE * const)dst; |
| + U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095); |
| + |
| + (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ |
| + |
| + switch (flSize) { |
| + case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break; |
| + case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break; |
| + default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */ |
| + case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break; |
| + } |
| + |
| + ostart[flSize] = *(const BYTE *)src; |
| + return flSize + 1; |
| +} |
| + |
| +static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } |
| + |
| +static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + size_t const minGain = ZSTD_minGain(srcSize); |
| + size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); |
| + BYTE *const ostart = (BYTE *)dst; |
| + U32 singleStream = srcSize < 256; |
| + symbolEncodingType_e hType = set_compressed; |
| + size_t cLitSize; |
| + |
| +/* small ? don't even attempt compression (speed opt) */ |
| +#define LITERAL_NOENTROPY 63 |
| + { |
| + size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; |
| + if (srcSize <= minLitSize) |
| + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); |
| + } |
| + |
| + if (dstCapacity < lhSize + 1) |
| + return ERROR(dstSize_tooSmall); /* not enough space for compression */ |
| + { |
| + HUF_repeat repeat = zc->flagStaticHufTable; |
| + int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; |
| + if (repeat == HUF_repeat_valid && lhSize == 3) |
| + singleStream = 1; |
| + cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters, |
| + sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat) |
| + : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters, |
| + sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat); |
| + if (repeat != HUF_repeat_none) { |
| + hType = set_repeat; |
| + } /* reused the existing table */ |
| + else { |
| + zc->flagStaticHufTable = HUF_repeat_check; |
| + } /* now have a table to reuse */ |
| + } |
| + |
| + if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) { |
| + zc->flagStaticHufTable = HUF_repeat_none; |
| + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); |
| + } |
| + if (cLitSize == 1) { |
| + zc->flagStaticHufTable = HUF_repeat_none; |
| + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); |
| + } |
| + |
| + /* Build header */ |
| + switch (lhSize) { |
| + case 3: /* 2 - 2 - 10 - 10 */ |
| + { |
| + U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14); |
| + ZSTD_writeLE24(ostart, lhc); |
| + break; |
| + } |
| + case 4: /* 2 - 2 - 14 - 14 */ |
| + { |
| + U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18); |
| + ZSTD_writeLE32(ostart, lhc); |
| + break; |
| + } |
| + default: /* should not be necessary, lhSize is only {3,4,5} */ |
| + case 5: /* 2 - 2 - 18 - 18 */ |
| + { |
| + U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22); |
| + ZSTD_writeLE32(ostart, lhc); |
| + ostart[4] = (BYTE)(cLitSize >> 10); |
| + break; |
| + } |
| + } |
| + return lhSize + cLitSize; |
| +} |
| + |
| +static const BYTE LL_Code[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, |
| + 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, |
| + 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24}; |
| + |
| +static const BYTE ML_Code[128] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, |
| + 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, |
| + 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, |
| + 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, |
| + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}; |
| + |
| +void ZSTD_seqToCodes(const seqStore_t *seqStorePtr) |
| +{ |
| + BYTE const LL_deltaCode = 19; |
| + BYTE const ML_deltaCode = 36; |
| + const seqDef *const sequences = seqStorePtr->sequencesStart; |
| + BYTE *const llCodeTable = seqStorePtr->llCode; |
| + BYTE *const ofCodeTable = seqStorePtr->ofCode; |
| + BYTE *const mlCodeTable = seqStorePtr->mlCode; |
| + U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); |
| + U32 u; |
| + for (u = 0; u < nbSeq; u++) { |
| + U32 const llv = sequences[u].litLength; |
| + U32 const mlv = sequences[u].matchLength; |
| + llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv]; |
| + ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); |
| + mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv]; |
| + } |
| + if (seqStorePtr->longLengthID == 1) |
| + llCodeTable[seqStorePtr->longLengthPos] = MaxLL; |
| + if (seqStorePtr->longLengthID == 2) |
| + mlCodeTable[seqStorePtr->longLengthPos] = MaxML; |
| +} |
| + |
| +ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize) |
| +{ |
| + const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN; |
| + const seqStore_t *seqStorePtr = &(zc->seqStore); |
| + FSE_CTable *CTable_LitLength = zc->litlengthCTable; |
| + FSE_CTable *CTable_OffsetBits = zc->offcodeCTable; |
| + FSE_CTable *CTable_MatchLength = zc->matchlengthCTable; |
| + U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ |
| + const seqDef *const sequences = seqStorePtr->sequencesStart; |
| + const BYTE *const ofCodeTable = seqStorePtr->ofCode; |
| + const BYTE *const llCodeTable = seqStorePtr->llCode; |
| + const BYTE *const mlCodeTable = seqStorePtr->mlCode; |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *const oend = ostart + dstCapacity; |
| + BYTE *op = ostart; |
| + size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; |
| + BYTE *seqHead; |
| + |
| + U32 *count; |
| + S16 *norm; |
| + U32 *workspace; |
| + size_t workspaceSize = sizeof(zc->tmpCounters); |
| + { |
| + size_t spaceUsed32 = 0; |
| + count = (U32 *)zc->tmpCounters + spaceUsed32; |
| + spaceUsed32 += MaxSeq + 1; |
| + norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32); |
| + spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2; |
| + |
| + workspace = (U32 *)zc->tmpCounters + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + } |
| + |
| + /* Compress literals */ |
| + { |
| + const BYTE *const literals = seqStorePtr->litStart; |
| + size_t const litSize = seqStorePtr->lit - literals; |
| + size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); |
| + if (ZSTD_isError(cSize)) |
| + return cSize; |
| + op += cSize; |
| + } |
| + |
| + /* Sequences Header */ |
| + if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) |
| + return ERROR(dstSize_tooSmall); |
| + if (nbSeq < 0x7F) |
| + *op++ = (BYTE)nbSeq; |
| + else if (nbSeq < LONGNBSEQ) |
| + op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2; |
| + else |
| + op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3; |
| + if (nbSeq == 0) |
| + goto _check_compressibility; |
| + |
| + /* seqHead : flags for FSE encoding type */ |
| + seqHead = op++; |
| + |
| +#define MIN_SEQ_FOR_DYNAMIC_FSE 64 |
| +#define MAX_SEQ_FOR_STATIC_FSE 1000 |
| + |
| + /* convert length/distances into codes */ |
| + ZSTD_seqToCodes(seqStorePtr); |
| + |
| + /* CTable for Literal Lengths */ |
| + { |
| + U32 max = MaxLL; |
| + size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace); |
| + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { |
| + *op++ = llCodeTable[0]; |
| + FSE_buildCTable_rle(CTable_LitLength, (BYTE)max); |
| + LLtype = set_rle; |
| + } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { |
| + LLtype = set_repeat; |
| + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) { |
| + FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize); |
| + LLtype = set_basic; |
| + } else { |
| + size_t nbSeq_1 = nbSeq; |
| + const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max); |
| + if (count[llCodeTable[nbSeq - 1]] > 1) { |
| + count[llCodeTable[nbSeq - 1]]--; |
| + nbSeq_1--; |
| + } |
| + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); |
| + { |
| + size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ |
| + if (FSE_isError(NCountSize)) |
| + return NCountSize; |
| + op += NCountSize; |
| + } |
| + FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize); |
| + LLtype = set_compressed; |
| + } |
| + } |
| + |
| + /* CTable for Offsets */ |
| + { |
| + U32 max = MaxOff; |
| + size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace); |
| + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { |
| + *op++ = ofCodeTable[0]; |
| + FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max); |
| + Offtype = set_rle; |
| + } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { |
| + Offtype = set_repeat; |
| + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) { |
| + FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize); |
| + Offtype = set_basic; |
| + } else { |
| + size_t nbSeq_1 = nbSeq; |
| + const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max); |
| + if (count[ofCodeTable[nbSeq - 1]] > 1) { |
| + count[ofCodeTable[nbSeq - 1]]--; |
| + nbSeq_1--; |
| + } |
| + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); |
| + { |
| + size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ |
| + if (FSE_isError(NCountSize)) |
| + return NCountSize; |
| + op += NCountSize; |
| + } |
| + FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize); |
| + Offtype = set_compressed; |
| + } |
| + } |
| + |
| + /* CTable for MatchLengths */ |
| + { |
| + U32 max = MaxML; |
| + size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace); |
| + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { |
| + *op++ = *mlCodeTable; |
| + FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max); |
| + MLtype = set_rle; |
| + } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { |
| + MLtype = set_repeat; |
| + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) { |
| + FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize); |
| + MLtype = set_basic; |
| + } else { |
| + size_t nbSeq_1 = nbSeq; |
| + const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max); |
| + if (count[mlCodeTable[nbSeq - 1]] > 1) { |
| + count[mlCodeTable[nbSeq - 1]]--; |
| + nbSeq_1--; |
| + } |
| + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); |
| + { |
| + size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ |
| + if (FSE_isError(NCountSize)) |
| + return NCountSize; |
| + op += NCountSize; |
| + } |
| + FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize); |
| + MLtype = set_compressed; |
| + } |
| + } |
| + |
| + *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2)); |
| + zc->flagStaticTables = 0; |
| + |
| + /* Encoding Sequences */ |
| + { |
| + BIT_CStream_t blockStream; |
| + FSE_CState_t stateMatchLength; |
| + FSE_CState_t stateOffsetBits; |
| + FSE_CState_t stateLitLength; |
| + |
| + CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */ |
| + |
| + /* first symbols */ |
| + FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]); |
| + FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]); |
| + FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]); |
| + BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]); |
| + if (ZSTD_32bits()) |
| + BIT_flushBits(&blockStream); |
| + BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]); |
| + if (ZSTD_32bits()) |
| + BIT_flushBits(&blockStream); |
| + if (longOffsets) { |
| + U32 const ofBits = ofCodeTable[nbSeq - 1]; |
| + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1); |
| + if (extraBits) { |
| + BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits); |
| + BIT_flushBits(&blockStream); |
| + } |
| + BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits); |
| + } else { |
| + BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]); |
| + } |
| + BIT_flushBits(&blockStream); |
| + |
| + { |
| + size_t n; |
| + for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */ |
| + BYTE const llCode = llCodeTable[n]; |
| + BYTE const ofCode = ofCodeTable[n]; |
| + BYTE const mlCode = mlCodeTable[n]; |
| + U32 const llBits = LL_bits[llCode]; |
| + U32 const ofBits = ofCode; /* 32b*/ /* 64b*/ |
| + U32 const mlBits = ML_bits[mlCode]; |
| + /* (7)*/ /* (7)*/ |
| + FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ |
| + FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */ |
| + if (ZSTD_32bits()) |
| + BIT_flushBits(&blockStream); /* (7)*/ |
| + FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */ |
| + if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) |
| + BIT_flushBits(&blockStream); /* (7)*/ |
| + BIT_addBits(&blockStream, sequences[n].litLength, llBits); |
| + if (ZSTD_32bits() && ((llBits + mlBits) > 24)) |
| + BIT_flushBits(&blockStream); |
| + BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); |
| + if (ZSTD_32bits()) |
| + BIT_flushBits(&blockStream); /* (7)*/ |
| + if (longOffsets) { |
| + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1); |
| + if (extraBits) { |
| + BIT_addBits(&blockStream, sequences[n].offset, extraBits); |
| + BIT_flushBits(&blockStream); /* (7)*/ |
| + } |
| + BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ |
| + } else { |
| + BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ |
| + } |
| + BIT_flushBits(&blockStream); /* (7)*/ |
| + } |
| + } |
| + |
| + FSE_flushCState(&blockStream, &stateMatchLength); |
| + FSE_flushCState(&blockStream, &stateOffsetBits); |
| + FSE_flushCState(&blockStream, &stateLitLength); |
| + |
| + { |
| + size_t const streamSize = BIT_closeCStream(&blockStream); |
| + if (streamSize == 0) |
| + return ERROR(dstSize_tooSmall); /* not enough space */ |
| + op += streamSize; |
| + } |
| + } |
| + |
| +/* check compressibility */ |
| +_check_compressibility: |
| + { |
| + size_t const minGain = ZSTD_minGain(srcSize); |
| + size_t const maxCSize = srcSize - minGain; |
| + if ((size_t)(op - ostart) >= maxCSize) { |
| + zc->flagStaticHufTable = HUF_repeat_none; |
| + return 0; |
| + } |
| + } |
| + |
| + /* confirm repcodes */ |
| + { |
| + int i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + zc->rep[i] = zc->repToConfirm[i]; |
| + } |
| + |
| + return op - ostart; |
| +} |
| + |
| +/*! ZSTD_storeSeq() : |
| + Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. |
| + `offsetCode` : distance to match, or 0 == repCode. |
| + `matchCode` : matchLength - MINMATCH |
| +*/ |
| +ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode) |
| +{ |
| + /* copy Literals */ |
| + ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); |
| + seqStorePtr->lit += litLength; |
| + |
| + /* literal Length */ |
| + if (litLength > 0xFFFF) { |
| + seqStorePtr->longLengthID = 1; |
| + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); |
| + } |
| + seqStorePtr->sequences[0].litLength = (U16)litLength; |
| + |
| + /* match offset */ |
| + seqStorePtr->sequences[0].offset = offsetCode + 1; |
| + |
| + /* match Length */ |
| + if (matchCode > 0xFFFF) { |
| + seqStorePtr->longLengthID = 2; |
| + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); |
| + } |
| + seqStorePtr->sequences[0].matchLength = (U16)matchCode; |
| + |
| + seqStorePtr->sequences++; |
| +} |
| + |
| +/*-************************************* |
| +* Match length counter |
| +***************************************/ |
| +static unsigned ZSTD_NbCommonBytes(register size_t val) |
| +{ |
| + if (ZSTD_isLittleEndian()) { |
| + if (ZSTD_64bits()) { |
| + return (__builtin_ctzll((U64)val) >> 3); |
| + } else { /* 32 bits */ |
| + return (__builtin_ctz((U32)val) >> 3); |
| + } |
| + } else { /* Big Endian CPU */ |
| + if (ZSTD_64bits()) { |
| + return (__builtin_clzll(val) >> 3); |
| + } else { /* 32 bits */ |
| + return (__builtin_clz((U32)val) >> 3); |
| + } |
| + } |
| +} |
| + |
| +static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit) |
| +{ |
| + const BYTE *const pStart = pIn; |
| + const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1); |
| + |
| + while (pIn < pInLoopLimit) { |
| + size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn); |
| + if (!diff) { |
| + pIn += sizeof(size_t); |
| + pMatch += sizeof(size_t); |
| + continue; |
| + } |
| + pIn += ZSTD_NbCommonBytes(diff); |
| + return (size_t)(pIn - pStart); |
| + } |
| + if (ZSTD_64bits()) |
| + if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) { |
| + pIn += 4; |
| + pMatch += 4; |
| + } |
| + if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) { |
| + pIn += 2; |
| + pMatch += 2; |
| + } |
| + if ((pIn < pInLimit) && (*pMatch == *pIn)) |
| + pIn++; |
| + return (size_t)(pIn - pStart); |
| +} |
| + |
| +/** ZSTD_count_2segments() : |
| +* can count match length with `ip` & `match` in 2 different segments. |
| +* convention : on reaching mEnd, match count continue starting from iStart |
| +*/ |
| +static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart) |
| +{ |
| + const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd); |
| + size_t const matchLength = ZSTD_count(ip, match, vEnd); |
| + if (match + matchLength != mEnd) |
| + return matchLength; |
| + return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd); |
| +} |
| + |
| +/*-************************************* |
| +* Hashes |
| +***************************************/ |
| +static const U32 prime3bytes = 506832829U; |
| +static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); } |
| +ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */ |
| + |
| +static const U32 prime4bytes = 2654435761U; |
| +static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); } |
| +static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); } |
| + |
| +static const U64 prime5bytes = 889523592379ULL; |
| +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); } |
| +static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); } |
| + |
| +static const U64 prime6bytes = 227718039650203ULL; |
| +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); } |
| +static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); } |
| + |
| +static const U64 prime7bytes = 58295818150454627ULL; |
| +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); } |
| +static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); } |
| + |
| +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; |
| +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); } |
| +static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); } |
| + |
| +static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls) |
| +{ |
| + switch (mls) { |
| + // case 3: return ZSTD_hash3Ptr(p, hBits); |
| + default: |
| + case 4: return ZSTD_hash4Ptr(p, hBits); |
| + case 5: return ZSTD_hash5Ptr(p, hBits); |
| + case 6: return ZSTD_hash6Ptr(p, hBits); |
| + case 7: return ZSTD_hash7Ptr(p, hBits); |
| + case 8: return ZSTD_hash8Ptr(p, hBits); |
| + } |
| +} |
| + |
| +/*-************************************* |
| +* Fast Scan |
| +***************************************/ |
| +static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls) |
| +{ |
| + U32 *const hashTable = zc->hashTable; |
| + U32 const hBits = zc->params.cParams.hashLog; |
| + const BYTE *const base = zc->base; |
| + const BYTE *ip = base + zc->nextToUpdate; |
| + const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE; |
| + const size_t fastHashFillStep = 3; |
| + |
| + while (ip <= iend) { |
| + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); |
| + ip += fastHashFillStep; |
| + } |
| +} |
| + |
| +FORCE_INLINE |
| +void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls) |
| +{ |
| + U32 *const hashTable = cctx->hashTable; |
| + U32 const hBits = cctx->params.cParams.hashLog; |
| + seqStore_t *seqStorePtr = &(cctx->seqStore); |
| + const BYTE *const base = cctx->base; |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const U32 lowestIndex = cctx->dictLimit; |
| + const BYTE *const lowest = base + lowestIndex; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - HASH_READ_SIZE; |
| + U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1]; |
| + U32 offsetSaved = 0; |
| + |
| + /* init */ |
| + ip += (ip == lowest); |
| + { |
| + U32 const maxRep = (U32)(ip - lowest); |
| + if (offset_2 > maxRep) |
| + offsetSaved = offset_2, offset_2 = 0; |
| + if (offset_1 > maxRep) |
| + offsetSaved = offset_1, offset_1 = 0; |
| + } |
| + |
| + /* Main Search Loop */ |
| + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ |
| + size_t mLength; |
| + size_t const h = ZSTD_hashPtr(ip, hBits, mls); |
| + U32 const curr = (U32)(ip - base); |
| + U32 const matchIndex = hashTable[h]; |
| + const BYTE *match = base + matchIndex; |
| + hashTable[h] = curr; /* update hash table */ |
| + |
| + if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { |
| + mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; |
| + ip++; |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); |
| + } else { |
| + U32 offset; |
| + if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) { |
| + ip += ((ip - anchor) >> g_searchStrength) + 1; |
| + continue; |
| + } |
| + mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; |
| + offset = (U32)(ip - match); |
| + while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) { |
| + ip--; |
| + match--; |
| + mLength++; |
| + } /* catch up */ |
| + offset_2 = offset_1; |
| + offset_1 = offset; |
| + |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); |
| + } |
| + |
| + /* match found */ |
| + ip += mLength; |
| + anchor = ip; |
| + |
| + if (ip <= ilimit) { |
| + /* Fill Table */ |
| + hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */ |
| + hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base); |
| + /* check immediate repcode */ |
| + while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { |
| + /* store sequence */ |
| + size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; |
| + { |
| + U32 const tmpOff = offset_2; |
| + offset_2 = offset_1; |
| + offset_1 = tmpOff; |
| + } /* swap offset_2 <=> offset_1 */ |
| + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); |
| + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH); |
| + ip += rLength; |
| + anchor = ip; |
| + continue; /* faster when present ... (?) */ |
| + } |
| + } |
| + } |
| + |
| + /* save reps for next block */ |
| + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; |
| + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; |
| + |
| + /* Last Literals */ |
| + { |
| + size_t const lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| + const U32 mls = ctx->params.cParams.searchLength; |
| + switch (mls) { |
| + default: /* includes case 3 */ |
| + case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return; |
| + case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return; |
| + case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return; |
| + case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return; |
| + } |
| +} |
| + |
| +static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls) |
| +{ |
| + U32 *hashTable = ctx->hashTable; |
| + const U32 hBits = ctx->params.cParams.hashLog; |
| + seqStore_t *seqStorePtr = &(ctx->seqStore); |
| + const BYTE *const base = ctx->base; |
| + const BYTE *const dictBase = ctx->dictBase; |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const U32 lowestIndex = ctx->lowLimit; |
| + const BYTE *const dictStart = dictBase + lowestIndex; |
| + const U32 dictLimit = ctx->dictLimit; |
| + const BYTE *const lowPrefixPtr = base + dictLimit; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - 8; |
| + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; |
| + |
| + /* Search Loop */ |
| + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ |
| + const size_t h = ZSTD_hashPtr(ip, hBits, mls); |
| + const U32 matchIndex = hashTable[h]; |
| + const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base; |
| + const BYTE *match = matchBase + matchIndex; |
| + const U32 curr = (U32)(ip - base); |
| + const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ |
| + const BYTE *repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *repMatch = repBase + repIndex; |
| + size_t mLength; |
| + hashTable[h] = curr; /* update hash table */ |
| + |
| + if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) && |
| + (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) { |
| + const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend; |
| + mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32; |
| + ip++; |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); |
| + } else { |
| + if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) { |
| + ip += ((ip - anchor) >> g_searchStrength) + 1; |
| + continue; |
| + } |
| + { |
| + const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend; |
| + const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; |
| + U32 offset; |
| + mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32; |
| + while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) { |
| + ip--; |
| + match--; |
| + mLength++; |
| + } /* catch up */ |
| + offset = curr - matchIndex; |
| + offset_2 = offset_1; |
| + offset_1 = offset; |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); |
| + } |
| + } |
| + |
| + /* found a match : store it */ |
| + ip += mLength; |
| + anchor = ip; |
| + |
| + if (ip <= ilimit) { |
| + /* Fill Table */ |
| + hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; |
| + hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base); |
| + /* check immediate repcode */ |
| + while (ip <= ilimit) { |
| + U32 const curr2 = (U32)(ip - base); |
| + U32 const repIndex2 = curr2 - offset_2; |
| + const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; |
| + if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ |
| + && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) { |
| + const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; |
| + size_t repLength2 = |
| + ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; |
| + U32 tmpOffset = offset_2; |
| + offset_2 = offset_1; |
| + offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ |
| + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH); |
| + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2; |
| + ip += repLength2; |
| + anchor = ip; |
| + continue; |
| + } |
| + break; |
| + } |
| + } |
| + } |
| + |
| + /* save reps for next block */ |
| + ctx->repToConfirm[0] = offset_1; |
| + ctx->repToConfirm[1] = offset_2; |
| + |
| + /* Last Literals */ |
| + { |
| + size_t const lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| + U32 const mls = ctx->params.cParams.searchLength; |
| + switch (mls) { |
| + default: /* includes case 3 */ |
| + case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return; |
| + case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return; |
| + case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return; |
| + case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return; |
| + } |
| +} |
| + |
| +/*-************************************* |
| +* Double Fast |
| +***************************************/ |
| +static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls) |
| +{ |
| + U32 *const hashLarge = cctx->hashTable; |
| + U32 const hBitsL = cctx->params.cParams.hashLog; |
| + U32 *const hashSmall = cctx->chainTable; |
| + U32 const hBitsS = cctx->params.cParams.chainLog; |
| + const BYTE *const base = cctx->base; |
| + const BYTE *ip = base + cctx->nextToUpdate; |
| + const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE; |
| + const size_t fastHashFillStep = 3; |
| + |
| + while (ip <= iend) { |
| + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); |
| + hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); |
| + ip += fastHashFillStep; |
| + } |
| +} |
| + |
| +FORCE_INLINE |
| +void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls) |
| +{ |
| + U32 *const hashLong = cctx->hashTable; |
| + const U32 hBitsL = cctx->params.cParams.hashLog; |
| + U32 *const hashSmall = cctx->chainTable; |
| + const U32 hBitsS = cctx->params.cParams.chainLog; |
| + seqStore_t *seqStorePtr = &(cctx->seqStore); |
| + const BYTE *const base = cctx->base; |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const U32 lowestIndex = cctx->dictLimit; |
| + const BYTE *const lowest = base + lowestIndex; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - HASH_READ_SIZE; |
| + U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1]; |
| + U32 offsetSaved = 0; |
| + |
| + /* init */ |
| + ip += (ip == lowest); |
| + { |
| + U32 const maxRep = (U32)(ip - lowest); |
| + if (offset_2 > maxRep) |
| + offsetSaved = offset_2, offset_2 = 0; |
| + if (offset_1 > maxRep) |
| + offsetSaved = offset_1, offset_1 = 0; |
| + } |
| + |
| + /* Main Search Loop */ |
| + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ |
| + size_t mLength; |
| + size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); |
| + size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); |
| + U32 const curr = (U32)(ip - base); |
| + U32 const matchIndexL = hashLong[h2]; |
| + U32 const matchIndexS = hashSmall[h]; |
| + const BYTE *matchLong = base + matchIndexL; |
| + const BYTE *match = base + matchIndexS; |
| + hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ |
| + |
| + if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */ |
| + mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; |
| + ip++; |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); |
| + } else { |
| + U32 offset; |
| + if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) { |
| + mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8; |
| + offset = (U32)(ip - matchLong); |
| + while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) { |
| + ip--; |
| + matchLong--; |
| + mLength++; |
| + } /* catch up */ |
| + } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) { |
| + size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); |
| + U32 const matchIndex3 = hashLong[h3]; |
| + const BYTE *match3 = base + matchIndex3; |
| + hashLong[h3] = curr + 1; |
| + if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) { |
| + mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8; |
| + ip++; |
| + offset = (U32)(ip - match3); |
| + while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) { |
| + ip--; |
| + match3--; |
| + mLength++; |
| + } /* catch up */ |
| + } else { |
| + mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; |
| + offset = (U32)(ip - match); |
| + while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) { |
| + ip--; |
| + match--; |
| + mLength++; |
| + } /* catch up */ |
| + } |
| + } else { |
| + ip += ((ip - anchor) >> g_searchStrength) + 1; |
| + continue; |
| + } |
| + |
| + offset_2 = offset_1; |
| + offset_1 = offset; |
| + |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); |
| + } |
| + |
| + /* match found */ |
| + ip += mLength; |
| + anchor = ip; |
| + |
| + if (ip <= ilimit) { |
| + /* Fill Table */ |
| + hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = |
| + curr + 2; /* here because curr+2 could be > iend-8 */ |
| + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base); |
| + |
| + /* check immediate repcode */ |
| + while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { |
| + /* store sequence */ |
| + size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; |
| + { |
| + U32 const tmpOff = offset_2; |
| + offset_2 = offset_1; |
| + offset_1 = tmpOff; |
| + } /* swap offset_2 <=> offset_1 */ |
| + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); |
| + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); |
| + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH); |
| + ip += rLength; |
| + anchor = ip; |
| + continue; /* faster when present ... (?) */ |
| + } |
| + } |
| + } |
| + |
| + /* save reps for next block */ |
| + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; |
| + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; |
| + |
| + /* Last Literals */ |
| + { |
| + size_t const lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| + const U32 mls = ctx->params.cParams.searchLength; |
| + switch (mls) { |
| + default: /* includes case 3 */ |
| + case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return; |
| + case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return; |
| + case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return; |
| + case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return; |
| + } |
| +} |
| + |
| +static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls) |
| +{ |
| + U32 *const hashLong = ctx->hashTable; |
| + U32 const hBitsL = ctx->params.cParams.hashLog; |
| + U32 *const hashSmall = ctx->chainTable; |
| + U32 const hBitsS = ctx->params.cParams.chainLog; |
| + seqStore_t *seqStorePtr = &(ctx->seqStore); |
| + const BYTE *const base = ctx->base; |
| + const BYTE *const dictBase = ctx->dictBase; |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const U32 lowestIndex = ctx->lowLimit; |
| + const BYTE *const dictStart = dictBase + lowestIndex; |
| + const U32 dictLimit = ctx->dictLimit; |
| + const BYTE *const lowPrefixPtr = base + dictLimit; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - 8; |
| + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; |
| + |
| + /* Search Loop */ |
| + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ |
| + const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); |
| + const U32 matchIndex = hashSmall[hSmall]; |
| + const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base; |
| + const BYTE *match = matchBase + matchIndex; |
| + |
| + const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); |
| + const U32 matchLongIndex = hashLong[hLong]; |
| + const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base; |
| + const BYTE *matchLong = matchLongBase + matchLongIndex; |
| + |
| + const U32 curr = (U32)(ip - base); |
| + const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ |
| + const BYTE *repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *repMatch = repBase + repIndex; |
| + size_t mLength; |
| + hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ |
| + |
| + if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) && |
| + (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) { |
| + const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend; |
| + mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4; |
| + ip++; |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH); |
| + } else { |
| + if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) { |
| + const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; |
| + const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; |
| + U32 offset; |
| + mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8; |
| + offset = curr - matchLongIndex; |
| + while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) { |
| + ip--; |
| + matchLong--; |
| + mLength++; |
| + } /* catch up */ |
| + offset_2 = offset_1; |
| + offset_1 = offset; |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); |
| + |
| + } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) { |
| + size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); |
| + U32 const matchIndex3 = hashLong[h3]; |
| + const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base; |
| + const BYTE *match3 = match3Base + matchIndex3; |
| + U32 offset; |
| + hashLong[h3] = curr + 1; |
| + if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) { |
| + const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; |
| + const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; |
| + mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8; |
| + ip++; |
| + offset = curr + 1 - matchIndex3; |
| + while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) { |
| + ip--; |
| + match3--; |
| + mLength++; |
| + } /* catch up */ |
| + } else { |
| + const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend; |
| + const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; |
| + mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4; |
| + offset = curr - matchIndex; |
| + while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) { |
| + ip--; |
| + match--; |
| + mLength++; |
| + } /* catch up */ |
| + } |
| + offset_2 = offset_1; |
| + offset_1 = offset; |
| + ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH); |
| + |
| + } else { |
| + ip += ((ip - anchor) >> g_searchStrength) + 1; |
| + continue; |
| + } |
| + } |
| + |
| + /* found a match : store it */ |
| + ip += mLength; |
| + anchor = ip; |
| + |
| + if (ip <= ilimit) { |
| + /* Fill Table */ |
| + hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2; |
| + hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2; |
| + hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base); |
| + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base); |
| + /* check immediate repcode */ |
| + while (ip <= ilimit) { |
| + U32 const curr2 = (U32)(ip - base); |
| + U32 const repIndex2 = curr2 - offset_2; |
| + const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; |
| + if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ |
| + && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) { |
| + const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; |
| + size_t const repLength2 = |
| + ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; |
| + U32 tmpOffset = offset_2; |
| + offset_2 = offset_1; |
| + offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ |
| + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH); |
| + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2; |
| + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2; |
| + ip += repLength2; |
| + anchor = ip; |
| + continue; |
| + } |
| + break; |
| + } |
| + } |
| + } |
| + |
| + /* save reps for next block */ |
| + ctx->repToConfirm[0] = offset_1; |
| + ctx->repToConfirm[1] = offset_2; |
| + |
| + /* Last Literals */ |
| + { |
| + size_t const lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| + U32 const mls = ctx->params.cParams.searchLength; |
| + switch (mls) { |
| + default: /* includes case 3 */ |
| + case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return; |
| + case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return; |
| + case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return; |
| + case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return; |
| + } |
| +} |
| + |
| +/*-************************************* |
| +* Binary Tree search |
| +***************************************/ |
| +/** ZSTD_insertBt1() : add one or multiple positions to tree. |
| +* ip : assumed <= iend-8 . |
| +* @return : nb of positions added */ |
| +static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict) |
| +{ |
| + U32 *const hashTable = zc->hashTable; |
| + U32 const hashLog = zc->params.cParams.hashLog; |
| + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); |
| + U32 *const bt = zc->chainTable; |
| + U32 const btLog = zc->params.cParams.chainLog - 1; |
| + U32 const btMask = (1 << btLog) - 1; |
| + U32 matchIndex = hashTable[h]; |
| + size_t commonLengthSmaller = 0, commonLengthLarger = 0; |
| + const BYTE *const base = zc->base; |
| + const BYTE *const dictBase = zc->dictBase; |
| + const U32 dictLimit = zc->dictLimit; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + const BYTE *const prefixStart = base + dictLimit; |
| + const BYTE *match; |
| + const U32 curr = (U32)(ip - base); |
| + const U32 btLow = btMask >= curr ? 0 : curr - btMask; |
| + U32 *smallerPtr = bt + 2 * (curr & btMask); |
| + U32 *largerPtr = smallerPtr + 1; |
| + U32 dummy32; /* to be nullified at the end */ |
| + U32 const windowLow = zc->lowLimit; |
| + U32 matchEndIdx = curr + 8; |
| + size_t bestLength = 8; |
| + |
| + hashTable[h] = curr; /* Update Hash Table */ |
| + |
| + while (nbCompares-- && (matchIndex > windowLow)) { |
| + U32 *const nextPtr = bt + 2 * (matchIndex & btMask); |
| + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ |
| + |
| + if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { |
| + match = base + matchIndex; |
| + if (match[matchLength] == ip[matchLength]) |
| + matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1; |
| + } else { |
| + match = dictBase + matchIndex; |
| + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); |
| + if (matchIndex + matchLength >= dictLimit) |
| + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ |
| + } |
| + |
| + if (matchLength > bestLength) { |
| + bestLength = matchLength; |
| + if (matchLength > matchEndIdx - matchIndex) |
| + matchEndIdx = matchIndex + (U32)matchLength; |
| + } |
| + |
| + if (ip + matchLength == iend) /* equal : no way to know if inf or sup */ |
| + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ |
| + |
| + if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ |
| + /* match is smaller than curr */ |
| + *smallerPtr = matchIndex; /* update smaller idx */ |
| + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ |
| + if (matchIndex <= btLow) { |
| + smallerPtr = &dummy32; |
| + break; |
| + } /* beyond tree size, stop the search */ |
| + smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ |
| + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ |
| + } else { |
| + /* match is larger than curr */ |
| + *largerPtr = matchIndex; |
| + commonLengthLarger = matchLength; |
| + if (matchIndex <= btLow) { |
| + largerPtr = &dummy32; |
| + break; |
| + } /* beyond tree size, stop the search */ |
| + largerPtr = nextPtr; |
| + matchIndex = nextPtr[0]; |
| + } |
| + } |
| + |
| + *smallerPtr = *largerPtr = 0; |
| + if (bestLength > 384) |
| + return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ |
| + if (matchEndIdx > curr + 8) |
| + return matchEndIdx - curr - 8; |
| + return 1; |
| +} |
| + |
| +static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls, |
| + U32 extDict) |
| +{ |
| + U32 *const hashTable = zc->hashTable; |
| + U32 const hashLog = zc->params.cParams.hashLog; |
| + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); |
| + U32 *const bt = zc->chainTable; |
| + U32 const btLog = zc->params.cParams.chainLog - 1; |
| + U32 const btMask = (1 << btLog) - 1; |
| + U32 matchIndex = hashTable[h]; |
| + size_t commonLengthSmaller = 0, commonLengthLarger = 0; |
| + const BYTE *const base = zc->base; |
| + const BYTE *const dictBase = zc->dictBase; |
| + const U32 dictLimit = zc->dictLimit; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + const BYTE *const prefixStart = base + dictLimit; |
| + const U32 curr = (U32)(ip - base); |
| + const U32 btLow = btMask >= curr ? 0 : curr - btMask; |
| + const U32 windowLow = zc->lowLimit; |
| + U32 *smallerPtr = bt + 2 * (curr & btMask); |
| + U32 *largerPtr = bt + 2 * (curr & btMask) + 1; |
| + U32 matchEndIdx = curr + 8; |
| + U32 dummy32; /* to be nullified at the end */ |
| + size_t bestLength = 0; |
| + |
| + hashTable[h] = curr; /* Update Hash Table */ |
| + |
| + while (nbCompares-- && (matchIndex > windowLow)) { |
| + U32 *const nextPtr = bt + 2 * (matchIndex & btMask); |
| + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ |
| + const BYTE *match; |
| + |
| + if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { |
| + match = base + matchIndex; |
| + if (match[matchLength] == ip[matchLength]) |
| + matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1; |
| + } else { |
| + match = dictBase + matchIndex; |
| + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); |
| + if (matchIndex + matchLength >= dictLimit) |
| + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ |
| + } |
| + |
| + if (matchLength > bestLength) { |
| + if (matchLength > matchEndIdx - matchIndex) |
| + matchEndIdx = matchIndex + (U32)matchLength; |
| + if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1))) |
| + bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; |
| + if (ip + matchLength == iend) /* equal : no way to know if inf or sup */ |
| + break; /* drop, to guarantee consistency (miss a little bit of compression) */ |
| + } |
| + |
| + if (match[matchLength] < ip[matchLength]) { |
| + /* match is smaller than curr */ |
| + *smallerPtr = matchIndex; /* update smaller idx */ |
| + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ |
| + if (matchIndex <= btLow) { |
| + smallerPtr = &dummy32; |
| + break; |
| + } /* beyond tree size, stop the search */ |
| + smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ |
| + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ |
| + } else { |
| + /* match is larger than curr */ |
| + *largerPtr = matchIndex; |
| + commonLengthLarger = matchLength; |
| + if (matchIndex <= btLow) { |
| + largerPtr = &dummy32; |
| + break; |
| + } /* beyond tree size, stop the search */ |
| + largerPtr = nextPtr; |
| + matchIndex = nextPtr[0]; |
| + } |
| + } |
| + |
| + *smallerPtr = *largerPtr = 0; |
| + |
| + zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1; |
| + return bestLength; |
| +} |
| + |
| +static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls) |
| +{ |
| + const BYTE *const base = zc->base; |
| + const U32 target = (U32)(ip - base); |
| + U32 idx = zc->nextToUpdate; |
| + |
| + while (idx < target) |
| + idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0); |
| +} |
| + |
| +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ |
| +static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls) |
| +{ |
| + if (ip < zc->base + zc->nextToUpdate) |
| + return 0; /* skipped area */ |
| + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); |
| + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); |
| +} |
| + |
| +static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */ |
| + const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch) |
| +{ |
| + switch (matchLengthSearch) { |
| + default: /* includes case 3 */ |
| + case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); |
| + case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); |
| + case 7: |
| + case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); |
| + } |
| +} |
| + |
| +static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls) |
| +{ |
| + const BYTE *const base = zc->base; |
| + const U32 target = (U32)(ip - base); |
| + U32 idx = zc->nextToUpdate; |
| + |
| + while (idx < target) |
| + idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1); |
| +} |
| + |
| +/** Tree updater, providing best match */ |
| +static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, |
| + const U32 mls) |
| +{ |
| + if (ip < zc->base + zc->nextToUpdate) |
| + return 0; /* skipped area */ |
| + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); |
| + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); |
| +} |
| + |
| +static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */ |
| + const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, |
| + const U32 matchLengthSearch) |
| +{ |
| + switch (matchLengthSearch) { |
| + default: /* includes case 3 */ |
| + case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); |
| + case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); |
| + case 7: |
| + case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); |
| + } |
| +} |
| + |
| +/* ********************************* |
| +* Hash Chain |
| +***********************************/ |
| +#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask] |
| + |
| +/* Update chains up to ip (excluded) |
| + Assumption : always within prefix (i.e. not within extDict) */ |
| +FORCE_INLINE |
| +U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls) |
| +{ |
| + U32 *const hashTable = zc->hashTable; |
| + const U32 hashLog = zc->params.cParams.hashLog; |
| + U32 *const chainTable = zc->chainTable; |
| + const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1; |
| + const BYTE *const base = zc->base; |
| + const U32 target = (U32)(ip - base); |
| + U32 idx = zc->nextToUpdate; |
| + |
| + while (idx < target) { /* catch up */ |
| + size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); |
| + NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; |
| + hashTable[h] = idx; |
| + idx++; |
| + } |
| + |
| + zc->nextToUpdate = target; |
| + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; |
| +} |
| + |
| +/* inlining is important to hardwire a hot branch (template emulation) */ |
| +FORCE_INLINE |
| +size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */ |
| + const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls, |
| + const U32 extDict) |
| +{ |
| + U32 *const chainTable = zc->chainTable; |
| + const U32 chainSize = (1 << zc->params.cParams.chainLog); |
| + const U32 chainMask = chainSize - 1; |
| + const BYTE *const base = zc->base; |
| + const BYTE *const dictBase = zc->dictBase; |
| + const U32 dictLimit = zc->dictLimit; |
| + const BYTE *const prefixStart = base + dictLimit; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + const U32 lowLimit = zc->lowLimit; |
| + const U32 curr = (U32)(ip - base); |
| + const U32 minChain = curr > chainSize ? curr - chainSize : 0; |
| + int nbAttempts = maxNbAttempts; |
| + size_t ml = EQUAL_READ32 - 1; |
| + |
| + /* HC4 match finder */ |
| + U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls); |
| + |
| + for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) { |
| + const BYTE *match; |
| + size_t currMl = 0; |
| + if ((!extDict) || matchIndex >= dictLimit) { |
| + match = base + matchIndex; |
| + if (match[ml] == ip[ml]) /* potentially better */ |
| + currMl = ZSTD_count(ip, match, iLimit); |
| + } else { |
| + match = dictBase + matchIndex; |
| + if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ |
| + currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32; |
| + } |
| + |
| + /* save best solution */ |
| + if (currMl > ml) { |
| + ml = currMl; |
| + *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; |
| + if (ip + currMl == iLimit) |
| + break; /* best possible, and avoid read overflow*/ |
| + } |
| + |
| + if (matchIndex <= minChain) |
| + break; |
| + matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); |
| + } |
| + |
| + return ml; |
| +} |
| + |
| +FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, |
| + const U32 matchLengthSearch) |
| +{ |
| + switch (matchLengthSearch) { |
| + default: /* includes case 3 */ |
| + case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); |
| + case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); |
| + case 7: |
| + case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); |
| + } |
| +} |
| + |
| +FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, |
| + const U32 matchLengthSearch) |
| +{ |
| + switch (matchLengthSearch) { |
| + default: /* includes case 3 */ |
| + case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); |
| + case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); |
| + case 7: |
| + case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); |
| + } |
| +} |
| + |
| +/* ******************************* |
| +* Common parser - lazy strategy |
| +*********************************/ |
| +FORCE_INLINE |
| +void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth) |
| +{ |
| + seqStore_t *seqStorePtr = &(ctx->seqStore); |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - 8; |
| + const BYTE *const base = ctx->base + ctx->dictLimit; |
| + |
| + U32 const maxSearches = 1 << ctx->params.cParams.searchLog; |
| + U32 const mls = ctx->params.cParams.searchLength; |
| + |
| + typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch); |
| + searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; |
| + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0; |
| + |
| + /* init */ |
| + ip += (ip == base); |
| + ctx->nextToUpdate3 = ctx->nextToUpdate; |
| + { |
| + U32 const maxRep = (U32)(ip - base); |
| + if (offset_2 > maxRep) |
| + savedOffset = offset_2, offset_2 = 0; |
| + if (offset_1 > maxRep) |
| + savedOffset = offset_1, offset_1 = 0; |
| + } |
| + |
| + /* Match Loop */ |
| + while (ip < ilimit) { |
| + size_t matchLength = 0; |
| + size_t offset = 0; |
| + const BYTE *start = ip + 1; |
| + |
| + /* check repCode */ |
| + if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) { |
| + /* repcode : we take it */ |
| + matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; |
| + if (depth == 0) |
| + goto _storeSequence; |
| + } |
| + |
| + /* first search (depth 0) */ |
| + { |
| + size_t offsetFound = 99999999; |
| + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); |
| + if (ml2 > matchLength) |
| + matchLength = ml2, start = ip, offset = offsetFound; |
| + } |
| + |
| + if (matchLength < EQUAL_READ32) { |
| + ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ |
| + continue; |
| + } |
| + |
| + /* let's try to find a better solution */ |
| + if (depth >= 1) |
| + while (ip < ilimit) { |
| + ip++; |
| + if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) { |
| + size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; |
| + int const gain2 = (int)(mlRep * 3); |
| + int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1); |
| + if ((mlRep >= EQUAL_READ32) && (gain2 > gain1)) |
| + matchLength = mlRep, offset = 0, start = ip; |
| + } |
| + { |
| + size_t offset2 = 99999999; |
| + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); |
| + int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ |
| + int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4); |
| + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { |
| + matchLength = ml2, offset = offset2, start = ip; |
| + continue; /* search a better one */ |
| + } |
| + } |
| + |
| + /* let's find an even better one */ |
| + if ((depth == 2) && (ip < ilimit)) { |
| + ip++; |
| + if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) { |
| + size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32; |
| + int const gain2 = (int)(ml2 * 4); |
| + int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1); |
| + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) |
| + matchLength = ml2, offset = 0, start = ip; |
| + } |
| + { |
| + size_t offset2 = 99999999; |
| + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); |
| + int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ |
| + int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7); |
| + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { |
| + matchLength = ml2, offset = offset2, start = ip; |
| + continue; |
| + } |
| + } |
| + } |
| + break; /* nothing found : store previous solution */ |
| + } |
| + |
| + /* NOTE: |
| + * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. |
| + * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which |
| + * overflows the pointer, which is undefined behavior. |
| + */ |
| + /* catch up */ |
| + if (offset) { |
| + while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) && |
| + (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */ |
| + { |
| + start--; |
| + matchLength++; |
| + } |
| + offset_2 = offset_1; |
| + offset_1 = (U32)(offset - ZSTD_REP_MOVE); |
| + } |
| + |
| + /* store sequence */ |
| +_storeSequence: |
| + { |
| + size_t const litLength = start - anchor; |
| + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH); |
| + anchor = ip = start + matchLength; |
| + } |
| + |
| + /* check immediate repcode */ |
| + while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) { |
| + /* store sequence */ |
| + matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32; |
| + offset = offset_2; |
| + offset_2 = offset_1; |
| + offset_1 = (U32)offset; /* swap repcodes */ |
| + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH); |
| + ip += matchLength; |
| + anchor = ip; |
| + continue; /* faster when present ... (?) */ |
| + } |
| + } |
| + |
| + /* Save reps for next block */ |
| + ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; |
| + ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; |
| + |
| + /* Last Literals */ |
| + { |
| + size_t const lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); } |
| + |
| +static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); } |
| + |
| +static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); } |
| + |
| +static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); } |
| + |
| +FORCE_INLINE |
| +void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth) |
| +{ |
| + seqStore_t *seqStorePtr = &(ctx->seqStore); |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - 8; |
| + const BYTE *const base = ctx->base; |
| + const U32 dictLimit = ctx->dictLimit; |
| + const U32 lowestIndex = ctx->lowLimit; |
| + const BYTE *const prefixStart = base + dictLimit; |
| + const BYTE *const dictBase = ctx->dictBase; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + const BYTE *const dictStart = dictBase + ctx->lowLimit; |
| + |
| + const U32 maxSearches = 1 << ctx->params.cParams.searchLog; |
| + const U32 mls = ctx->params.cParams.searchLength; |
| + |
| + typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch); |
| + searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; |
| + |
| + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; |
| + |
| + /* init */ |
| + ctx->nextToUpdate3 = ctx->nextToUpdate; |
| + ip += (ip == prefixStart); |
| + |
| + /* Match Loop */ |
| + while (ip < ilimit) { |
| + size_t matchLength = 0; |
| + size_t offset = 0; |
| + const BYTE *start = ip + 1; |
| + U32 curr = (U32)(ip - base); |
| + |
| + /* check repCode */ |
| + { |
| + const U32 repIndex = (U32)(curr + 1 - offset_1); |
| + const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *const repMatch = repBase + repIndex; |
| + if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ |
| + if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) { |
| + /* repcode detected we should take it */ |
| + const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; |
| + matchLength = |
| + ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; |
| + if (depth == 0) |
| + goto _storeSequence; |
| + } |
| + } |
| + |
| + /* first search (depth 0) */ |
| + { |
| + size_t offsetFound = 99999999; |
| + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); |
| + if (ml2 > matchLength) |
| + matchLength = ml2, start = ip, offset = offsetFound; |
| + } |
| + |
| + if (matchLength < EQUAL_READ32) { |
| + ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ |
| + continue; |
| + } |
| + |
| + /* let's try to find a better solution */ |
| + if (depth >= 1) |
| + while (ip < ilimit) { |
| + ip++; |
| + curr++; |
| + /* check repCode */ |
| + if (offset) { |
| + const U32 repIndex = (U32)(curr - offset_1); |
| + const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *const repMatch = repBase + repIndex; |
| + if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ |
| + if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { |
| + /* repcode detected */ |
| + const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; |
| + size_t const repLength = |
| + ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + |
| + EQUAL_READ32; |
| + int const gain2 = (int)(repLength * 3); |
| + int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1); |
| + if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) |
| + matchLength = repLength, offset = 0, start = ip; |
| + } |
| + } |
| + |
| + /* search match, depth 1 */ |
| + { |
| + size_t offset2 = 99999999; |
| + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); |
| + int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ |
| + int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4); |
| + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { |
| + matchLength = ml2, offset = offset2, start = ip; |
| + continue; /* search a better one */ |
| + } |
| + } |
| + |
| + /* let's find an even better one */ |
| + if ((depth == 2) && (ip < ilimit)) { |
| + ip++; |
| + curr++; |
| + /* check repCode */ |
| + if (offset) { |
| + const U32 repIndex = (U32)(curr - offset_1); |
| + const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *const repMatch = repBase + repIndex; |
| + if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ |
| + if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { |
| + /* repcode detected */ |
| + const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; |
| + size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, |
| + repEnd, prefixStart) + |
| + EQUAL_READ32; |
| + int gain2 = (int)(repLength * 4); |
| + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1); |
| + if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) |
| + matchLength = repLength, offset = 0, start = ip; |
| + } |
| + } |
| + |
| + /* search match, depth 2 */ |
| + { |
| + size_t offset2 = 99999999; |
| + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); |
| + int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */ |
| + int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7); |
| + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { |
| + matchLength = ml2, offset = offset2, start = ip; |
| + continue; |
| + } |
| + } |
| + } |
| + break; /* nothing found : store previous solution */ |
| + } |
| + |
| + /* catch up */ |
| + if (offset) { |
| + U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE)); |
| + const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; |
| + const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; |
| + while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) { |
| + start--; |
| + match--; |
| + matchLength++; |
| + } /* catch up */ |
| + offset_2 = offset_1; |
| + offset_1 = (U32)(offset - ZSTD_REP_MOVE); |
| + } |
| + |
| + /* store sequence */ |
| + _storeSequence : { |
| + size_t const litLength = start - anchor; |
| + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH); |
| + anchor = ip = start + matchLength; |
| + } |
| + |
| + /* check immediate repcode */ |
| + while (ip <= ilimit) { |
| + const U32 repIndex = (U32)((ip - base) - offset_2); |
| + const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *const repMatch = repBase + repIndex; |
| + if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ |
| + if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) { |
| + /* repcode detected we should take it */ |
| + const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; |
| + matchLength = |
| + ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; |
| + offset = offset_2; |
| + offset_2 = offset_1; |
| + offset_1 = (U32)offset; /* swap offset history */ |
| + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH); |
| + ip += matchLength; |
| + anchor = ip; |
| + continue; /* faster when present ... (?) */ |
| + } |
| + break; |
| + } |
| + } |
| + |
| + /* Save reps for next block */ |
| + ctx->repToConfirm[0] = offset_1; |
| + ctx->repToConfirm[1] = offset_2; |
| + |
| + /* Last Literals */ |
| + { |
| + size_t const lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); } |
| + |
| +static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); |
| +} |
| + |
| +static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); |
| +} |
| + |
| +static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); |
| +} |
| + |
| +/* The optimal parser */ |
| +#include "zstd_opt.h" |
| + |
| +static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| +#ifdef ZSTD_OPT_H_91842398743 |
| + ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); |
| +#else |
| + (void)ctx; |
| + (void)src; |
| + (void)srcSize; |
| + return; |
| +#endif |
| +} |
| + |
| +static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| +#ifdef ZSTD_OPT_H_91842398743 |
| + ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); |
| +#else |
| + (void)ctx; |
| + (void)src; |
| + (void)srcSize; |
| + return; |
| +#endif |
| +} |
| + |
| +static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| +#ifdef ZSTD_OPT_H_91842398743 |
| + ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); |
| +#else |
| + (void)ctx; |
| + (void)src; |
| + (void)srcSize; |
| + return; |
| +#endif |
| +} |
| + |
| +static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) |
| +{ |
| +#ifdef ZSTD_OPT_H_91842398743 |
| + ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); |
| +#else |
| + (void)ctx; |
| + (void)src; |
| + (void)srcSize; |
| + return; |
| +#endif |
| +} |
| + |
| +typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize); |
| + |
| +static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) |
| +{ |
| + static const ZSTD_blockCompressor blockCompressor[2][8] = { |
| + {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, |
| + ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2}, |
| + {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict, |
| + ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}}; |
| + |
| + return blockCompressor[extDict][(U32)strat]; |
| +} |
| + |
| +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit); |
| + const BYTE *const base = zc->base; |
| + const BYTE *const istart = (const BYTE *)src; |
| + const U32 curr = (U32)(istart - base); |
| + if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1) |
| + return 0; /* don't even attempt compression below a certain srcSize */ |
| + ZSTD_resetSeqStore(&(zc->seqStore)); |
| + if (curr > zc->nextToUpdate + 384) |
| + zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */ |
| + blockCompressor(zc, src, srcSize); |
| + return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize); |
| +} |
| + |
| +/*! ZSTD_compress_generic() : |
| +* Compress a chunk of data into one or multiple blocks. |
| +* All blocks will be terminated, all input will be consumed. |
| +* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. |
| +* Frame is supposed already started (header already produced) |
| +* @return : compressed size, or an error code |
| +*/ |
| +static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk) |
| +{ |
| + size_t blockSize = cctx->blockSize; |
| + size_t remaining = srcSize; |
| + const BYTE *ip = (const BYTE *)src; |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *op = ostart; |
| + U32 const maxDist = 1 << cctx->params.cParams.windowLog; |
| + |
| + if (cctx->params.fParams.checksumFlag && srcSize) |
| + xxh64_update(&cctx->xxhState, src, srcSize); |
| + |
| + while (remaining) { |
| + U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); |
| + size_t cSize; |
| + |
| + if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) |
| + return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ |
| + if (remaining < blockSize) |
| + blockSize = remaining; |
| + |
| + /* preemptive overflow correction */ |
| + if (cctx->lowLimit > (3U << 29)) { |
| + U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1; |
| + U32 const curr = (U32)(ip - cctx->base); |
| + U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog); |
| + U32 const correction = curr - newCurr; |
| + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); |
| + ZSTD_reduceIndex(cctx, correction); |
| + cctx->base += correction; |
| + cctx->dictBase += correction; |
| + cctx->lowLimit -= correction; |
| + cctx->dictLimit -= correction; |
| + if (cctx->nextToUpdate < correction) |
| + cctx->nextToUpdate = 0; |
| + else |
| + cctx->nextToUpdate -= correction; |
| + } |
| + |
| + if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) { |
| + /* enforce maxDist */ |
| + U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist; |
| + if (cctx->lowLimit < newLowLimit) |
| + cctx->lowLimit = newLowLimit; |
| + if (cctx->dictLimit < cctx->lowLimit) |
| + cctx->dictLimit = cctx->lowLimit; |
| + } |
| + |
| + cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize); |
| + if (ZSTD_isError(cSize)) |
| + return cSize; |
| + |
| + if (cSize == 0) { /* block is not compressible */ |
| + U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3); |
| + if (blockSize + ZSTD_blockHeaderSize > dstCapacity) |
| + return ERROR(dstSize_tooSmall); |
| + ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */ |
| + memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); |
| + cSize = ZSTD_blockHeaderSize + blockSize; |
| + } else { |
| + U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3); |
| + ZSTD_writeLE24(op, cBlockHeader24); |
| + cSize += ZSTD_blockHeaderSize; |
| + } |
| + |
| + remaining -= blockSize; |
| + dstCapacity -= cSize; |
| + ip += blockSize; |
| + op += cSize; |
| + } |
| + |
| + if (lastFrameChunk && (op > ostart)) |
| + cctx->stage = ZSTDcs_ending; |
| + return op - ostart; |
| +} |
| + |
| +static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID) |
| +{ |
| + BYTE *const op = (BYTE *)dst; |
| + U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */ |
| + U32 const checksumFlag = params.fParams.checksumFlag > 0; |
| + U32 const windowSize = 1U << params.cParams.windowLog; |
| + U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); |
| + BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); |
| + U32 const fcsCode = |
| + params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */ |
| + BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6)); |
| + size_t pos; |
| + |
| + if (dstCapacity < ZSTD_frameHeaderSize_max) |
| + return ERROR(dstSize_tooSmall); |
| + |
| + ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER); |
| + op[4] = frameHeaderDecriptionByte; |
| + pos = 5; |
| + if (!singleSegment) |
| + op[pos++] = windowLogByte; |
| + switch (dictIDSizeCode) { |
| + default: /* impossible */ |
| + case 0: break; |
| + case 1: |
| + op[pos] = (BYTE)(dictID); |
| + pos++; |
| + break; |
| + case 2: |
| + ZSTD_writeLE16(op + pos, (U16)dictID); |
| + pos += 2; |
| + break; |
| + case 3: |
| + ZSTD_writeLE32(op + pos, dictID); |
| + pos += 4; |
| + break; |
| + } |
| + switch (fcsCode) { |
| + default: /* impossible */ |
| + case 0: |
| + if (singleSegment) |
| + op[pos++] = (BYTE)(pledgedSrcSize); |
| + break; |
| + case 1: |
| + ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256)); |
| + pos += 2; |
| + break; |
| + case 2: |
| + ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize)); |
| + pos += 4; |
| + break; |
| + case 3: |
| + ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize)); |
| + pos += 8; |
| + break; |
| + } |
| + return pos; |
| +} |
| + |
| +static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk) |
| +{ |
| + const BYTE *const ip = (const BYTE *)src; |
| + size_t fhSize = 0; |
| + |
| + if (cctx->stage == ZSTDcs_created) |
| + return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ |
| + |
| + if (frame && (cctx->stage == ZSTDcs_init)) { |
| + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID); |
| + if (ZSTD_isError(fhSize)) |
| + return fhSize; |
| + dstCapacity -= fhSize; |
| + dst = (char *)dst + fhSize; |
| + cctx->stage = ZSTDcs_ongoing; |
| + } |
| + |
| + /* Check if blocks follow each other */ |
| + if (src != cctx->nextSrc) { |
| + /* not contiguous */ |
| + ptrdiff_t const delta = cctx->nextSrc - ip; |
| + cctx->lowLimit = cctx->dictLimit; |
| + cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base); |
| + cctx->dictBase = cctx->base; |
| + cctx->base -= delta; |
| + cctx->nextToUpdate = cctx->dictLimit; |
| + if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) |
| + cctx->lowLimit = cctx->dictLimit; /* too small extDict */ |
| + } |
| + |
| + /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ |
| + if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) { |
| + ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase; |
| + U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx; |
| + cctx->lowLimit = lowLimitMax; |
| + } |
| + |
| + cctx->nextSrc = ip + srcSize; |
| + |
| + if (srcSize) { |
| + size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) |
| + : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize); |
| + if (ZSTD_isError(cSize)) |
| + return cSize; |
| + return cSize + fhSize; |
| + } else |
| + return fhSize; |
| +} |
| + |
| +size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); |
| +} |
| + |
| +size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); } |
| + |
| +size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx); |
| + if (srcSize > blockSizeMax) |
| + return ERROR(srcSize_wrong); |
| + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); |
| +} |
| + |
| +/*! ZSTD_loadDictionaryContent() : |
| + * @return : 0, or an error code |
| + */ |
| +static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize) |
| +{ |
| + const BYTE *const ip = (const BYTE *)src; |
| + const BYTE *const iend = ip + srcSize; |
| + |
| + /* input becomes curr prefix */ |
| + zc->lowLimit = zc->dictLimit; |
| + zc->dictLimit = (U32)(zc->nextSrc - zc->base); |
| + zc->dictBase = zc->base; |
| + zc->base += ip - zc->nextSrc; |
| + zc->nextToUpdate = zc->dictLimit; |
| + zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); |
| + |
| + zc->nextSrc = iend; |
| + if (srcSize <= HASH_READ_SIZE) |
| + return 0; |
| + |
| + switch (zc->params.cParams.strategy) { |
| + case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break; |
| + |
| + case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break; |
| + |
| + case ZSTD_greedy: |
| + case ZSTD_lazy: |
| + case ZSTD_lazy2: |
| + if (srcSize >= HASH_READ_SIZE) |
| + ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength); |
| + break; |
| + |
| + case ZSTD_btlazy2: |
| + case ZSTD_btopt: |
| + case ZSTD_btopt2: |
| + if (srcSize >= HASH_READ_SIZE) |
| + ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength); |
| + break; |
| + |
| + default: |
| + return ERROR(GENERIC); /* strategy doesn't exist; impossible */ |
| + } |
| + |
| + zc->nextToUpdate = (U32)(iend - zc->base); |
| + return 0; |
| +} |
| + |
| +/* Dictionaries that assign zero probability to symbols that show up causes problems |
| + when FSE encoding. Refuse dictionaries that assign zero probability to symbols |
| + that we may encounter during compression. |
| + NOTE: This behavior is not standard and could be improved in the future. */ |
| +static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) |
| +{ |
| + U32 s; |
| + if (dictMaxSymbolValue < maxSymbolValue) |
| + return ERROR(dictionary_corrupted); |
| + for (s = 0; s <= maxSymbolValue; ++s) { |
| + if (normalizedCounter[s] == 0) |
| + return ERROR(dictionary_corrupted); |
| + } |
| + return 0; |
| +} |
| + |
| +/* Dictionary format : |
| + * See : |
| + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format |
| + */ |
| +/*! ZSTD_loadZstdDictionary() : |
| + * @return : 0, or an error code |
| + * assumptions : magic number supposed already checked |
| + * dictSize supposed > 8 |
| + */ |
| +static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize) |
| +{ |
| + const BYTE *dictPtr = (const BYTE *)dict; |
| + const BYTE *const dictEnd = dictPtr + dictSize; |
| + short offcodeNCount[MaxOff + 1]; |
| + unsigned offcodeMaxValue = MaxOff; |
| + |
| + dictPtr += 4; /* skip magic number */ |
| + cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr); |
| + dictPtr += 4; |
| + |
| + { |
| + size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters)); |
| + if (HUF_isError(hufHeaderSize)) |
| + return ERROR(dictionary_corrupted); |
| + dictPtr += hufHeaderSize; |
| + } |
| + |
| + { |
| + unsigned offcodeLog; |
| + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr); |
| + if (FSE_isError(offcodeHeaderSize)) |
| + return ERROR(dictionary_corrupted); |
| + if (offcodeLog > OffFSELog) |
| + return ERROR(dictionary_corrupted); |
| + /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ |
| + CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), |
| + dictionary_corrupted); |
| + dictPtr += offcodeHeaderSize; |
| + } |
| + |
| + { |
| + short matchlengthNCount[MaxML + 1]; |
| + unsigned matchlengthMaxValue = MaxML, matchlengthLog; |
| + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr); |
| + if (FSE_isError(matchlengthHeaderSize)) |
| + return ERROR(dictionary_corrupted); |
| + if (matchlengthLog > MLFSELog) |
| + return ERROR(dictionary_corrupted); |
| + /* Every match length code must have non-zero probability */ |
| + CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); |
| + CHECK_E( |
| + FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), |
| + dictionary_corrupted); |
| + dictPtr += matchlengthHeaderSize; |
| + } |
| + |
| + { |
| + short litlengthNCount[MaxLL + 1]; |
| + unsigned litlengthMaxValue = MaxLL, litlengthLog; |
| + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr); |
| + if (FSE_isError(litlengthHeaderSize)) |
| + return ERROR(dictionary_corrupted); |
| + if (litlengthLog > LLFSELog) |
| + return ERROR(dictionary_corrupted); |
| + /* Every literal length code must have non-zero probability */ |
| + CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); |
| + CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)), |
| + dictionary_corrupted); |
| + dictPtr += litlengthHeaderSize; |
| + } |
| + |
| + if (dictPtr + 12 > dictEnd) |
| + return ERROR(dictionary_corrupted); |
| + cctx->rep[0] = ZSTD_readLE32(dictPtr + 0); |
| + cctx->rep[1] = ZSTD_readLE32(dictPtr + 4); |
| + cctx->rep[2] = ZSTD_readLE32(dictPtr + 8); |
| + dictPtr += 12; |
| + |
| + { |
| + size_t const dictContentSize = (size_t)(dictEnd - dictPtr); |
| + U32 offcodeMax = MaxOff; |
| + if (dictContentSize <= ((U32)-1) - 128 KB) { |
| + U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ |
| + offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ |
| + } |
| + /* All offset values <= dictContentSize + 128 KB must be representable */ |
| + CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); |
| + /* All repCodes must be <= dictContentSize and != 0*/ |
| + { |
| + U32 u; |
| + for (u = 0; u < 3; u++) { |
| + if (cctx->rep[u] == 0) |
| + return ERROR(dictionary_corrupted); |
| + if (cctx->rep[u] > dictContentSize) |
| + return ERROR(dictionary_corrupted); |
| + } |
| + } |
| + |
| + cctx->flagStaticTables = 1; |
| + cctx->flagStaticHufTable = HUF_repeat_valid; |
| + return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize); |
| + } |
| +} |
| + |
| +/** ZSTD_compress_insertDictionary() : |
| +* @return : 0, or an error code */ |
| +static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize) |
| +{ |
| + if ((dict == NULL) || (dictSize <= 8)) |
| + return 0; |
| + |
| + /* dict as pure content */ |
| + if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict)) |
| + return ZSTD_loadDictionaryContent(cctx, dict, dictSize); |
| + |
| + /* dict as zstd dictionary */ |
| + return ZSTD_loadZstdDictionary(cctx, dict, dictSize); |
| +} |
| + |
| +/*! ZSTD_compressBegin_internal() : |
| +* @return : 0, or an error code */ |
| +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize) |
| +{ |
| + ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue; |
| + CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp)); |
| + return ZSTD_compress_insertDictionary(cctx, dict, dictSize); |
| +} |
| + |
| +/*! ZSTD_compressBegin_advanced() : |
| +* @return : 0, or an error code */ |
| +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) |
| +{ |
| + /* compression parameters verification and optimization */ |
| + CHECK_F(ZSTD_checkCParams(params.cParams)); |
| + return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize); |
| +} |
| + |
| +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel) |
| +{ |
| + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); |
| + return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0); |
| +} |
| + |
| +size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } |
| + |
| +/*! ZSTD_writeEpilogue() : |
| +* Ends a frame. |
| +* @return : nb of bytes written into dst (or an error code) */ |
| +static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity) |
| +{ |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *op = ostart; |
| + size_t fhSize = 0; |
| + |
| + if (cctx->stage == ZSTDcs_created) |
| + return ERROR(stage_wrong); /* init missing */ |
| + |
| + /* special case : empty frame */ |
| + if (cctx->stage == ZSTDcs_init) { |
| + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0); |
| + if (ZSTD_isError(fhSize)) |
| + return fhSize; |
| + dstCapacity -= fhSize; |
| + op += fhSize; |
| + cctx->stage = ZSTDcs_ongoing; |
| + } |
| + |
| + if (cctx->stage != ZSTDcs_ending) { |
| + /* write one last empty block, make it the "last" block */ |
| + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0; |
| + if (dstCapacity < 4) |
| + return ERROR(dstSize_tooSmall); |
| + ZSTD_writeLE32(op, cBlockHeader24); |
| + op += ZSTD_blockHeaderSize; |
| + dstCapacity -= ZSTD_blockHeaderSize; |
| + } |
| + |
| + if (cctx->params.fParams.checksumFlag) { |
| + U32 const checksum = (U32)xxh64_digest(&cctx->xxhState); |
| + if (dstCapacity < 4) |
| + return ERROR(dstSize_tooSmall); |
| + ZSTD_writeLE32(op, checksum); |
| + op += 4; |
| + } |
| + |
| + cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ |
| + return op - ostart; |
| +} |
| + |
| +size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + size_t endResult; |
| + size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); |
| + if (ZSTD_isError(cSize)) |
| + return cSize; |
| + endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize); |
| + if (ZSTD_isError(endResult)) |
| + return endResult; |
| + return cSize + endResult; |
| +} |
| + |
| +static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, |
| + ZSTD_parameters params) |
| +{ |
| + CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize)); |
| + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); |
| +} |
| + |
| +size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, |
| + ZSTD_parameters params) |
| +{ |
| + return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); |
| +} |
| + |
| +size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params) |
| +{ |
| + return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params); |
| +} |
| + |
| +/* ===== Dictionary API ===== */ |
| + |
| +struct ZSTD_CDict_s { |
| + void *dictBuffer; |
| + const void *dictContent; |
| + size_t dictContentSize; |
| + ZSTD_CCtx *refContext; |
| +}; /* typedef'd tp ZSTD_CDict within "zstd.h" */ |
| + |
| +size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); } |
| + |
| +static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem) |
| +{ |
| + if (!customMem.customAlloc || !customMem.customFree) |
| + return NULL; |
| + |
| + { |
| + ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); |
| + ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem); |
| + |
| + if (!cdict || !cctx) { |
| + ZSTD_free(cdict, customMem); |
| + ZSTD_freeCCtx(cctx); |
| + return NULL; |
| + } |
| + |
| + if ((byReference) || (!dictBuffer) || (!dictSize)) { |
| + cdict->dictBuffer = NULL; |
| + cdict->dictContent = dictBuffer; |
| + } else { |
| + void *const internalBuffer = ZSTD_malloc(dictSize, customMem); |
| + if (!internalBuffer) { |
| + ZSTD_free(cctx, customMem); |
| + ZSTD_free(cdict, customMem); |
| + return NULL; |
| + } |
| + memcpy(internalBuffer, dictBuffer, dictSize); |
| + cdict->dictBuffer = internalBuffer; |
| + cdict->dictContent = internalBuffer; |
| + } |
| + |
| + { |
| + size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); |
| + if (ZSTD_isError(errorCode)) { |
| + ZSTD_free(cdict->dictBuffer, customMem); |
| + ZSTD_free(cdict, customMem); |
| + ZSTD_freeCCtx(cctx); |
| + return NULL; |
| + } |
| + } |
| + |
| + cdict->refContext = cctx; |
| + cdict->dictContentSize = dictSize; |
| + return cdict; |
| + } |
| +} |
| + |
| +ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); |
| + return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem); |
| +} |
| + |
| +size_t ZSTD_freeCDict(ZSTD_CDict *cdict) |
| +{ |
| + if (cdict == NULL) |
| + return 0; /* support free on NULL */ |
| + { |
| + ZSTD_customMem const cMem = cdict->refContext->customMem; |
| + ZSTD_freeCCtx(cdict->refContext); |
| + ZSTD_free(cdict->dictBuffer, cMem); |
| + ZSTD_free(cdict, cMem); |
| + return 0; |
| + } |
| +} |
| + |
| +static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); } |
| + |
| +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize) |
| +{ |
| + if (cdict->dictContentSize) |
| + CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize)) |
| + else { |
| + ZSTD_parameters params = cdict->refContext->params; |
| + params.fParams.contentSizeFlag = (pledgedSrcSize > 0); |
| + CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize)); |
| + } |
| + return 0; |
| +} |
| + |
| +/*! ZSTD_compress_usingCDict() : |
| +* Compression using a digested Dictionary. |
| +* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. |
| +* Note that compression level is decided during dictionary creation */ |
| +size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict) |
| +{ |
| + CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize)); |
| + |
| + if (cdict->refContext->params.fParams.contentSizeFlag == 1) { |
| + cctx->params.fParams.contentSizeFlag = 1; |
| + cctx->frameContentSize = srcSize; |
| + } else { |
| + cctx->params.fParams.contentSizeFlag = 0; |
| + } |
| + |
| + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); |
| +} |
| + |
| +/* ****************************************************************** |
| +* Streaming |
| +********************************************************************/ |
| + |
| +typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage; |
| + |
| +struct ZSTD_CStream_s { |
| + ZSTD_CCtx *cctx; |
| + ZSTD_CDict *cdictLocal; |
| + const ZSTD_CDict *cdict; |
| + char *inBuff; |
| + size_t inBuffSize; |
| + size_t inToCompress; |
| + size_t inBuffPos; |
| + size_t inBuffTarget; |
| + size_t blockSize; |
| + char *outBuff; |
| + size_t outBuffSize; |
| + size_t outBuffContentSize; |
| + size_t outBuffFlushedSize; |
| + ZSTD_cStreamStage stage; |
| + U32 checksum; |
| + U32 frameEnded; |
| + U64 pledgedSrcSize; |
| + U64 inputProcessed; |
| + ZSTD_parameters params; |
| + ZSTD_customMem customMem; |
| +}; /* typedef'd to ZSTD_CStream within "zstd.h" */ |
| + |
| +size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams) |
| +{ |
| + size_t const inBuffSize = (size_t)1 << cParams.windowLog; |
| + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize); |
| + size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; |
| + |
| + return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); |
| +} |
| + |
| +ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem) |
| +{ |
| + ZSTD_CStream *zcs; |
| + |
| + if (!customMem.customAlloc || !customMem.customFree) |
| + return NULL; |
| + |
| + zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem); |
| + if (zcs == NULL) |
| + return NULL; |
| + memset(zcs, 0, sizeof(ZSTD_CStream)); |
| + memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem)); |
| + zcs->cctx = ZSTD_createCCtx_advanced(customMem); |
| + if (zcs->cctx == NULL) { |
| + ZSTD_freeCStream(zcs); |
| + return NULL; |
| + } |
| + return zcs; |
| +} |
| + |
| +size_t ZSTD_freeCStream(ZSTD_CStream *zcs) |
| +{ |
| + if (zcs == NULL) |
| + return 0; /* support free on NULL */ |
| + { |
| + ZSTD_customMem const cMem = zcs->customMem; |
| + ZSTD_freeCCtx(zcs->cctx); |
| + zcs->cctx = NULL; |
| + ZSTD_freeCDict(zcs->cdictLocal); |
| + zcs->cdictLocal = NULL; |
| + ZSTD_free(zcs->inBuff, cMem); |
| + zcs->inBuff = NULL; |
| + ZSTD_free(zcs->outBuff, cMem); |
| + zcs->outBuff = NULL; |
| + ZSTD_free(zcs, cMem); |
| + return 0; |
| + } |
| +} |
| + |
| +/*====== Initialization ======*/ |
| + |
| +size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } |
| +size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; } |
| + |
| +static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize) |
| +{ |
| + if (zcs->inBuffSize == 0) |
| + return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */ |
| + |
| + if (zcs->cdict) |
| + CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize)) |
| + else |
| + CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize)); |
| + |
| + zcs->inToCompress = 0; |
| + zcs->inBuffPos = 0; |
| + zcs->inBuffTarget = zcs->blockSize; |
| + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; |
| + zcs->stage = zcss_load; |
| + zcs->frameEnded = 0; |
| + zcs->pledgedSrcSize = pledgedSrcSize; |
| + zcs->inputProcessed = 0; |
| + return 0; /* ready to go */ |
| +} |
| + |
| +size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize) |
| +{ |
| + |
| + zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0); |
| + |
| + return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); |
| +} |
| + |
| +static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) |
| +{ |
| + /* allocate buffers */ |
| + { |
| + size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; |
| + if (zcs->inBuffSize < neededInBuffSize) { |
| + zcs->inBuffSize = neededInBuffSize; |
| + ZSTD_free(zcs->inBuff, zcs->customMem); |
| + zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem); |
| + if (zcs->inBuff == NULL) |
| + return ERROR(memory_allocation); |
| + } |
| + zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize); |
| + } |
| + if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) { |
| + zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1; |
| + ZSTD_free(zcs->outBuff, zcs->customMem); |
| + zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem); |
| + if (zcs->outBuff == NULL) |
| + return ERROR(memory_allocation); |
| + } |
| + |
| + if (dict && dictSize >= 8) { |
| + ZSTD_freeCDict(zcs->cdictLocal); |
| + zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem); |
| + if (zcs->cdictLocal == NULL) |
| + return ERROR(memory_allocation); |
| + zcs->cdict = zcs->cdictLocal; |
| + } else |
| + zcs->cdict = NULL; |
| + |
| + zcs->checksum = params.fParams.checksumFlag > 0; |
| + zcs->params = params; |
| + |
| + return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); |
| +} |
| + |
| +ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); |
| + ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem); |
| + if (zcs) { |
| + size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize); |
| + if (ZSTD_isError(code)) { |
| + return NULL; |
| + } |
| + } |
| + return zcs; |
| +} |
| + |
| +ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict); |
| + ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize); |
| + if (zcs) { |
| + zcs->cdict = cdict; |
| + if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) { |
| + return NULL; |
| + } |
| + } |
| + return zcs; |
| +} |
| + |
| +/*====== Compression ======*/ |
| + |
| +typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e; |
| + |
| +ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + size_t const length = MIN(dstCapacity, srcSize); |
| + memcpy(dst, src, length); |
| + return length; |
| +} |
| + |
| +static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush) |
| +{ |
| + U32 someMoreWork = 1; |
| + const char *const istart = (const char *)src; |
| + const char *const iend = istart + *srcSizePtr; |
| + const char *ip = istart; |
| + char *const ostart = (char *)dst; |
| + char *const oend = ostart + *dstCapacityPtr; |
| + char *op = ostart; |
| + |
| + while (someMoreWork) { |
| + switch (zcs->stage) { |
| + case zcss_init: |
| + return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */ |
| + |
| + case zcss_load: |
| + /* complete inBuffer */ |
| + { |
| + size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; |
| + size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip); |
| + zcs->inBuffPos += loaded; |
| + ip += loaded; |
| + if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) { |
| + someMoreWork = 0; |
| + break; /* not enough input to get a full block : stop there, wait for more */ |
| + } |
| + } |
| + /* compress curr block (note : this stage cannot be stopped in the middle) */ |
| + { |
| + void *cDst; |
| + size_t cSize; |
| + size_t const iSize = zcs->inBuffPos - zcs->inToCompress; |
| + size_t oSize = oend - op; |
| + if (oSize >= ZSTD_compressBound(iSize)) |
| + cDst = op; /* compress directly into output buffer (avoid flush stage) */ |
| + else |
| + cDst = zcs->outBuff, oSize = zcs->outBuffSize; |
| + cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) |
| + : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); |
| + if (ZSTD_isError(cSize)) |
| + return cSize; |
| + if (flush == zsf_end) |
| + zcs->frameEnded = 1; |
| + /* prepare next block */ |
| + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; |
| + if (zcs->inBuffTarget > zcs->inBuffSize) |
| + zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */ |
| + zcs->inToCompress = zcs->inBuffPos; |
| + if (cDst == op) { |
| + op += cSize; |
| + break; |
| + } /* no need to flush */ |
| + zcs->outBuffContentSize = cSize; |
| + zcs->outBuffFlushedSize = 0; |
| + zcs->stage = zcss_flush; /* pass-through to flush stage */ |
| + } |
| + |
| + case zcss_flush: { |
| + size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; |
| + size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); |
| + op += flushed; |
| + zcs->outBuffFlushedSize += flushed; |
| + if (toFlush != flushed) { |
| + someMoreWork = 0; |
| + break; |
| + } /* dst too small to store flushed data : stop there */ |
| + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; |
| + zcs->stage = zcss_load; |
| + break; |
| + } |
| + |
| + case zcss_final: |
| + someMoreWork = 0; /* do nothing */ |
| + break; |
| + |
| + default: |
| + return ERROR(GENERIC); /* impossible */ |
| + } |
| + } |
| + |
| + *srcSizePtr = ip - istart; |
| + *dstCapacityPtr = op - ostart; |
| + zcs->inputProcessed += *srcSizePtr; |
| + if (zcs->frameEnded) |
| + return 0; |
| + { |
| + size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; |
| + if (hintInSize == 0) |
| + hintInSize = zcs->blockSize; |
| + return hintInSize; |
| + } |
| +} |
| + |
| +size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input) |
| +{ |
| + size_t sizeRead = input->size - input->pos; |
| + size_t sizeWritten = output->size - output->pos; |
| + size_t const result = |
| + ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather); |
| + input->pos += sizeRead; |
| + output->pos += sizeWritten; |
| + return result; |
| +} |
| + |
| +/*====== Finalize ======*/ |
| + |
| +/*! ZSTD_flushStream() : |
| +* @return : amount of data remaining to flush */ |
| +size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output) |
| +{ |
| + size_t srcSize = 0; |
| + size_t sizeWritten = output->size - output->pos; |
| + size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize, |
| + &srcSize, /* use a valid src address instead of NULL */ |
| + zsf_flush); |
| + output->pos += sizeWritten; |
| + if (ZSTD_isError(result)) |
| + return result; |
| + return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ |
| +} |
| + |
| +size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output) |
| +{ |
| + BYTE *const ostart = (BYTE *)(output->dst) + output->pos; |
| + BYTE *const oend = (BYTE *)(output->dst) + output->size; |
| + BYTE *op = ostart; |
| + |
| + if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize)) |
| + return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */ |
| + |
| + if (zcs->stage != zcss_final) { |
| + /* flush whatever remains */ |
| + size_t srcSize = 0; |
| + size_t sizeWritten = output->size - output->pos; |
| + size_t const notEnded = |
| + ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */ |
| + size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; |
| + op += sizeWritten; |
| + if (remainingToFlush) { |
| + output->pos += sizeWritten; |
| + return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4); |
| + } |
| + /* create epilogue */ |
| + zcs->stage = zcss_final; |
| + zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, |
| + 0); /* write epilogue, including final empty block, into outBuff */ |
| + } |
| + |
| + /* flush epilogue */ |
| + { |
| + size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; |
| + size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); |
| + op += flushed; |
| + zcs->outBuffFlushedSize += flushed; |
| + output->pos += op - ostart; |
| + if (toFlush == flushed) |
| + zcs->stage = zcss_init; /* end reached */ |
| + return toFlush - flushed; |
| + } |
| +} |
| + |
| +/*-===== Pre-defined compression levels =====-*/ |
| + |
| +#define ZSTD_DEFAULT_CLEVEL 1 |
| +#define ZSTD_MAX_CLEVEL 22 |
| +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } |
| + |
| +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = { |
| + { |
| + /* "default" */ |
| + /* W, C, H, S, L, TL, strat */ |
| + {18, 12, 12, 1, 7, 16, ZSTD_fast}, /* level 0 - never used */ |
| + {19, 13, 14, 1, 7, 16, ZSTD_fast}, /* level 1 */ |
| + {19, 15, 16, 1, 6, 16, ZSTD_fast}, /* level 2 */ |
| + {20, 16, 17, 1, 5, 16, ZSTD_dfast}, /* level 3.*/ |
| + {20, 18, 18, 1, 5, 16, ZSTD_dfast}, /* level 4.*/ |
| + {20, 15, 18, 3, 5, 16, ZSTD_greedy}, /* level 5 */ |
| + {21, 16, 19, 2, 5, 16, ZSTD_lazy}, /* level 6 */ |
| + {21, 17, 20, 3, 5, 16, ZSTD_lazy}, /* level 7 */ |
| + {21, 18, 20, 3, 5, 16, ZSTD_lazy2}, /* level 8 */ |
| + {21, 20, 20, 3, 5, 16, ZSTD_lazy2}, /* level 9 */ |
| + {21, 19, 21, 4, 5, 16, ZSTD_lazy2}, /* level 10 */ |
| + {22, 20, 22, 4, 5, 16, ZSTD_lazy2}, /* level 11 */ |
| + {22, 20, 22, 5, 5, 16, ZSTD_lazy2}, /* level 12 */ |
| + {22, 21, 22, 5, 5, 16, ZSTD_lazy2}, /* level 13 */ |
| + {22, 21, 22, 6, 5, 16, ZSTD_lazy2}, /* level 14 */ |
| + {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */ |
| + {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */ |
| + {23, 21, 22, 4, 5, 24, ZSTD_btopt}, /* level 17 */ |
| + {23, 23, 22, 6, 5, 32, ZSTD_btopt}, /* level 18 */ |
| + {23, 23, 22, 6, 3, 48, ZSTD_btopt}, /* level 19 */ |
| + {25, 25, 23, 7, 3, 64, ZSTD_btopt2}, /* level 20 */ |
| + {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */ |
| + {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */ |
| + }, |
| + { |
| + /* for srcSize <= 256 KB */ |
| + /* W, C, H, S, L, T, strat */ |
| + {0, 0, 0, 0, 0, 0, ZSTD_fast}, /* level 0 - not used */ |
| + {18, 13, 14, 1, 6, 8, ZSTD_fast}, /* level 1 */ |
| + {18, 14, 13, 1, 5, 8, ZSTD_dfast}, /* level 2 */ |
| + {18, 16, 15, 1, 5, 8, ZSTD_dfast}, /* level 3 */ |
| + {18, 15, 17, 1, 5, 8, ZSTD_greedy}, /* level 4.*/ |
| + {18, 16, 17, 4, 5, 8, ZSTD_greedy}, /* level 5.*/ |
| + {18, 16, 17, 3, 5, 8, ZSTD_lazy}, /* level 6.*/ |
| + {18, 17, 17, 4, 4, 8, ZSTD_lazy}, /* level 7 */ |
| + {18, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */ |
| + {18, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */ |
| + {18, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */ |
| + {18, 18, 17, 6, 4, 8, ZSTD_lazy2}, /* level 11.*/ |
| + {18, 18, 17, 7, 4, 8, ZSTD_lazy2}, /* level 12.*/ |
| + {18, 19, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13 */ |
| + {18, 18, 18, 4, 4, 16, ZSTD_btopt}, /* level 14.*/ |
| + {18, 18, 18, 4, 3, 16, ZSTD_btopt}, /* level 15.*/ |
| + {18, 19, 18, 6, 3, 32, ZSTD_btopt}, /* level 16.*/ |
| + {18, 19, 18, 8, 3, 64, ZSTD_btopt}, /* level 17.*/ |
| + {18, 19, 18, 9, 3, 128, ZSTD_btopt}, /* level 18.*/ |
| + {18, 19, 18, 10, 3, 256, ZSTD_btopt}, /* level 19.*/ |
| + {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/ |
| + {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/ |
| + {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/ |
| + }, |
| + { |
| + /* for srcSize <= 128 KB */ |
| + /* W, C, H, S, L, T, strat */ |
| + {17, 12, 12, 1, 7, 8, ZSTD_fast}, /* level 0 - not used */ |
| + {17, 12, 13, 1, 6, 8, ZSTD_fast}, /* level 1 */ |
| + {17, 13, 16, 1, 5, 8, ZSTD_fast}, /* level 2 */ |
| + {17, 16, 16, 2, 5, 8, ZSTD_dfast}, /* level 3 */ |
| + {17, 13, 15, 3, 4, 8, ZSTD_greedy}, /* level 4 */ |
| + {17, 15, 17, 4, 4, 8, ZSTD_greedy}, /* level 5 */ |
| + {17, 16, 17, 3, 4, 8, ZSTD_lazy}, /* level 6 */ |
| + {17, 15, 17, 4, 4, 8, ZSTD_lazy2}, /* level 7 */ |
| + {17, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */ |
| + {17, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */ |
| + {17, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */ |
| + {17, 17, 17, 7, 4, 8, ZSTD_lazy2}, /* level 11 */ |
| + {17, 17, 17, 8, 4, 8, ZSTD_lazy2}, /* level 12 */ |
| + {17, 18, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13.*/ |
| + {17, 17, 17, 7, 3, 8, ZSTD_btopt}, /* level 14.*/ |
| + {17, 17, 17, 7, 3, 16, ZSTD_btopt}, /* level 15.*/ |
| + {17, 18, 17, 7, 3, 32, ZSTD_btopt}, /* level 16.*/ |
| + {17, 18, 17, 7, 3, 64, ZSTD_btopt}, /* level 17.*/ |
| + {17, 18, 17, 7, 3, 256, ZSTD_btopt}, /* level 18.*/ |
| + {17, 18, 17, 8, 3, 256, ZSTD_btopt}, /* level 19.*/ |
| + {17, 18, 17, 9, 3, 256, ZSTD_btopt2}, /* level 20.*/ |
| + {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/ |
| + {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/ |
| + }, |
| + { |
| + /* for srcSize <= 16 KB */ |
| + /* W, C, H, S, L, T, strat */ |
| + {14, 12, 12, 1, 7, 6, ZSTD_fast}, /* level 0 - not used */ |
| + {14, 14, 14, 1, 6, 6, ZSTD_fast}, /* level 1 */ |
| + {14, 14, 14, 1, 4, 6, ZSTD_fast}, /* level 2 */ |
| + {14, 14, 14, 1, 4, 6, ZSTD_dfast}, /* level 3.*/ |
| + {14, 14, 14, 4, 4, 6, ZSTD_greedy}, /* level 4.*/ |
| + {14, 14, 14, 3, 4, 6, ZSTD_lazy}, /* level 5.*/ |
| + {14, 14, 14, 4, 4, 6, ZSTD_lazy2}, /* level 6 */ |
| + {14, 14, 14, 5, 4, 6, ZSTD_lazy2}, /* level 7 */ |
| + {14, 14, 14, 6, 4, 6, ZSTD_lazy2}, /* level 8.*/ |
| + {14, 15, 14, 6, 4, 6, ZSTD_btlazy2}, /* level 9.*/ |
| + {14, 15, 14, 3, 3, 6, ZSTD_btopt}, /* level 10.*/ |
| + {14, 15, 14, 6, 3, 8, ZSTD_btopt}, /* level 11.*/ |
| + {14, 15, 14, 6, 3, 16, ZSTD_btopt}, /* level 12.*/ |
| + {14, 15, 14, 6, 3, 24, ZSTD_btopt}, /* level 13.*/ |
| + {14, 15, 15, 6, 3, 48, ZSTD_btopt}, /* level 14.*/ |
| + {14, 15, 15, 6, 3, 64, ZSTD_btopt}, /* level 15.*/ |
| + {14, 15, 15, 6, 3, 96, ZSTD_btopt}, /* level 16.*/ |
| + {14, 15, 15, 6, 3, 128, ZSTD_btopt}, /* level 17.*/ |
| + {14, 15, 15, 6, 3, 256, ZSTD_btopt}, /* level 18.*/ |
| + {14, 15, 15, 7, 3, 256, ZSTD_btopt}, /* level 19.*/ |
| + {14, 15, 15, 8, 3, 256, ZSTD_btopt2}, /* level 20.*/ |
| + {14, 15, 15, 9, 3, 256, ZSTD_btopt2}, /* level 21.*/ |
| + {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/ |
| + }, |
| +}; |
| + |
| +/*! ZSTD_getCParams() : |
| +* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`. |
| +* Size values are optional, provide 0 if not known or unused */ |
| +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) |
| +{ |
| + ZSTD_compressionParameters cp; |
| + size_t const addedSize = srcSize ? 0 : 500; |
| + U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1; |
| + U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ |
| + if (compressionLevel <= 0) |
| + compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */ |
| + if (compressionLevel > ZSTD_MAX_CLEVEL) |
| + compressionLevel = ZSTD_MAX_CLEVEL; |
| + cp = ZSTD_defaultCParameters[tableID][compressionLevel]; |
| + if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */ |
| + if (cp.windowLog > ZSTD_WINDOWLOG_MAX) |
| + cp.windowLog = ZSTD_WINDOWLOG_MAX; |
| + if (cp.chainLog > ZSTD_CHAINLOG_MAX) |
| + cp.chainLog = ZSTD_CHAINLOG_MAX; |
| + if (cp.hashLog > ZSTD_HASHLOG_MAX) |
| + cp.hashLog = ZSTD_HASHLOG_MAX; |
| + } |
| + cp = ZSTD_adjustCParams(cp, srcSize, dictSize); |
| + return cp; |
| +} |
| + |
| +/*! ZSTD_getParams() : |
| +* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). |
| +* All fields of `ZSTD_frameParameters` are set to default (0) */ |
| +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) |
| +{ |
| + ZSTD_parameters params; |
| + ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize); |
| + memset(¶ms, 0, sizeof(params)); |
| + params.cParams = cParams; |
| + return params; |
| +} |
| + |
| +EXPORT_SYMBOL(ZSTD_maxCLevel); |
| +EXPORT_SYMBOL(ZSTD_compressBound); |
| + |
| +EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound); |
| +EXPORT_SYMBOL(ZSTD_initCCtx); |
| +EXPORT_SYMBOL(ZSTD_compressCCtx); |
| +EXPORT_SYMBOL(ZSTD_compress_usingDict); |
| + |
| +EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound); |
| +EXPORT_SYMBOL(ZSTD_initCDict); |
| +EXPORT_SYMBOL(ZSTD_compress_usingCDict); |
| + |
| +EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound); |
| +EXPORT_SYMBOL(ZSTD_initCStream); |
| +EXPORT_SYMBOL(ZSTD_initCStream_usingCDict); |
| +EXPORT_SYMBOL(ZSTD_resetCStream); |
| +EXPORT_SYMBOL(ZSTD_compressStream); |
| +EXPORT_SYMBOL(ZSTD_flushStream); |
| +EXPORT_SYMBOL(ZSTD_endStream); |
| +EXPORT_SYMBOL(ZSTD_CStreamInSize); |
| +EXPORT_SYMBOL(ZSTD_CStreamOutSize); |
| + |
| +EXPORT_SYMBOL(ZSTD_getCParams); |
| +EXPORT_SYMBOL(ZSTD_getParams); |
| +EXPORT_SYMBOL(ZSTD_checkCParams); |
| +EXPORT_SYMBOL(ZSTD_adjustCParams); |
| + |
| +EXPORT_SYMBOL(ZSTD_compressBegin); |
| +EXPORT_SYMBOL(ZSTD_compressBegin_usingDict); |
| +EXPORT_SYMBOL(ZSTD_compressBegin_advanced); |
| +EXPORT_SYMBOL(ZSTD_copyCCtx); |
| +EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict); |
| +EXPORT_SYMBOL(ZSTD_compressContinue); |
| +EXPORT_SYMBOL(ZSTD_compressEnd); |
| + |
| +EXPORT_SYMBOL(ZSTD_getBlockSizeMax); |
| +EXPORT_SYMBOL(ZSTD_compressBlock); |
| + |
| +MODULE_LICENSE("Dual BSD/GPL"); |
| +MODULE_DESCRIPTION("Zstd Compressor"); |
| diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c |
| new file mode 100644 |
| index 0000000..62449ae |
| --- /dev/null |
| +++ b/lib/zstd/decompress.c |
| @@ -0,0 +1,2526 @@ |
| +/** |
| + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +/* *************************************************************** |
| +* Tuning parameters |
| +*****************************************************************/ |
| +/*! |
| +* MAXWINDOWSIZE_DEFAULT : |
| +* maximum window size accepted by DStream, by default. |
| +* Frames requiring more memory will be rejected. |
| +*/ |
| +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT |
| +#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */ |
| +#endif |
| + |
| +/*-******************************************************* |
| +* Dependencies |
| +*********************************************************/ |
| +#include "fse.h" |
| +#include "huf.h" |
| +#include "mem.h" /* low level memory routines */ |
| +#include "zstd_internal.h" |
| +#include <linux/kernel.h> |
| +#include <linux/module.h> |
| +#include <linux/string.h> /* memcpy, memmove, memset */ |
| + |
| +#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) |
| + |
| +/*-************************************* |
| +* Macros |
| +***************************************/ |
| +#define ZSTD_isError ERR_isError /* for inlining */ |
| +#define FSE_isError ERR_isError |
| +#define HUF_isError ERR_isError |
| + |
| +/*_******************************************************* |
| +* Memory operations |
| +**********************************************************/ |
| +static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); } |
| + |
| +/*-************************************************************* |
| +* Context management |
| +***************************************************************/ |
| +typedef enum { |
| + ZSTDds_getFrameHeaderSize, |
| + ZSTDds_decodeFrameHeader, |
| + ZSTDds_decodeBlockHeader, |
| + ZSTDds_decompressBlock, |
| + ZSTDds_decompressLastBlock, |
| + ZSTDds_checkChecksum, |
| + ZSTDds_decodeSkippableHeader, |
| + ZSTDds_skipFrame |
| +} ZSTD_dStage; |
| + |
| +typedef struct { |
| + FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; |
| + FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; |
| + FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; |
| + HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ |
| + U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2]; |
| + U32 rep[ZSTD_REP_NUM]; |
| +} ZSTD_entropyTables_t; |
| + |
| +struct ZSTD_DCtx_s { |
| + const FSE_DTable *LLTptr; |
| + const FSE_DTable *MLTptr; |
| + const FSE_DTable *OFTptr; |
| + const HUF_DTable *HUFptr; |
| + ZSTD_entropyTables_t entropy; |
| + const void *previousDstEnd; /* detect continuity */ |
| + const void *base; /* start of curr segment */ |
| + const void *vBase; /* virtual start of previous segment if it was just before curr one */ |
| + const void *dictEnd; /* end of previous segment */ |
| + size_t expected; |
| + ZSTD_frameParams fParams; |
| + blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ |
| + ZSTD_dStage stage; |
| + U32 litEntropy; |
| + U32 fseEntropy; |
| + struct xxh64_state xxhState; |
| + size_t headerSize; |
| + U32 dictID; |
| + const BYTE *litPtr; |
| + ZSTD_customMem customMem; |
| + size_t litSize; |
| + size_t rleSize; |
| + BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH]; |
| + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; |
| +}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ |
| + |
| +size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); } |
| + |
| +size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx) |
| +{ |
| + dctx->expected = ZSTD_frameHeaderSize_prefix; |
| + dctx->stage = ZSTDds_getFrameHeaderSize; |
| + dctx->previousDstEnd = NULL; |
| + dctx->base = NULL; |
| + dctx->vBase = NULL; |
| + dctx->dictEnd = NULL; |
| + dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ |
| + dctx->litEntropy = dctx->fseEntropy = 0; |
| + dctx->dictID = 0; |
| + ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); |
| + memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ |
| + dctx->LLTptr = dctx->entropy.LLTable; |
| + dctx->MLTptr = dctx->entropy.MLTable; |
| + dctx->OFTptr = dctx->entropy.OFTable; |
| + dctx->HUFptr = dctx->entropy.hufTable; |
| + return 0; |
| +} |
| + |
| +ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem) |
| +{ |
| + ZSTD_DCtx *dctx; |
| + |
| + if (!customMem.customAlloc || !customMem.customFree) |
| + return NULL; |
| + |
| + dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem); |
| + if (!dctx) |
| + return NULL; |
| + memcpy(&dctx->customMem, &customMem, sizeof(customMem)); |
| + ZSTD_decompressBegin(dctx); |
| + return dctx; |
| +} |
| + |
| +ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); |
| + return ZSTD_createDCtx_advanced(stackMem); |
| +} |
| + |
| +size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx) |
| +{ |
| + if (dctx == NULL) |
| + return 0; /* support free on NULL */ |
| + ZSTD_free(dctx, dctx->customMem); |
| + return 0; /* reserved as a potential error code in the future */ |
| +} |
| + |
| +void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx) |
| +{ |
| + size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max; |
| + memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */ |
| +} |
| + |
| +static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict); |
| + |
| +/*-************************************************************* |
| +* Decompression section |
| +***************************************************************/ |
| + |
| +/*! ZSTD_isFrame() : |
| + * Tells if the content of `buffer` starts with a valid Frame Identifier. |
| + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. |
| + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. |
| + * Note 3 : Skippable Frame Identifiers are considered valid. */ |
| +unsigned ZSTD_isFrame(const void *buffer, size_t size) |
| +{ |
| + if (size < 4) |
| + return 0; |
| + { |
| + U32 const magic = ZSTD_readLE32(buffer); |
| + if (magic == ZSTD_MAGICNUMBER) |
| + return 1; |
| + if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) |
| + return 1; |
| + } |
| + return 0; |
| +} |
| + |
| +/** ZSTD_frameHeaderSize() : |
| +* srcSize must be >= ZSTD_frameHeaderSize_prefix. |
| +* @return : size of the Frame Header */ |
| +static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize) |
| +{ |
| + if (srcSize < ZSTD_frameHeaderSize_prefix) |
| + return ERROR(srcSize_wrong); |
| + { |
| + BYTE const fhd = ((const BYTE *)src)[4]; |
| + U32 const dictID = fhd & 3; |
| + U32 const singleSegment = (fhd >> 5) & 1; |
| + U32 const fcsId = fhd >> 6; |
| + return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId); |
| + } |
| +} |
| + |
| +/** ZSTD_getFrameParams() : |
| +* decode Frame Header, or require larger `srcSize`. |
| +* @return : 0, `fparamsPtr` is correctly filled, |
| +* >0, `srcSize` is too small, result is expected `srcSize`, |
| +* or an error code, which can be tested using ZSTD_isError() */ |
| +size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)src; |
| + |
| + if (srcSize < ZSTD_frameHeaderSize_prefix) |
| + return ZSTD_frameHeaderSize_prefix; |
| + if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) { |
| + if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { |
| + if (srcSize < ZSTD_skippableHeaderSize) |
| + return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */ |
| + memset(fparamsPtr, 0, sizeof(*fparamsPtr)); |
| + fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4); |
| + fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */ |
| + return 0; |
| + } |
| + return ERROR(prefix_unknown); |
| + } |
| + |
| + /* ensure there is enough `srcSize` to fully read/decode frame header */ |
| + { |
| + size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); |
| + if (srcSize < fhsize) |
| + return fhsize; |
| + } |
| + |
| + { |
| + BYTE const fhdByte = ip[4]; |
| + size_t pos = 5; |
| + U32 const dictIDSizeCode = fhdByte & 3; |
| + U32 const checksumFlag = (fhdByte >> 2) & 1; |
| + U32 const singleSegment = (fhdByte >> 5) & 1; |
| + U32 const fcsID = fhdByte >> 6; |
| + U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; |
| + U32 windowSize = 0; |
| + U32 dictID = 0; |
| + U64 frameContentSize = 0; |
| + if ((fhdByte & 0x08) != 0) |
| + return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */ |
| + if (!singleSegment) { |
| + BYTE const wlByte = ip[pos++]; |
| + U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; |
| + if (windowLog > ZSTD_WINDOWLOG_MAX) |
| + return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */ |
| + windowSize = (1U << windowLog); |
| + windowSize += (windowSize >> 3) * (wlByte & 7); |
| + } |
| + |
| + switch (dictIDSizeCode) { |
| + default: /* impossible */ |
| + case 0: break; |
| + case 1: |
| + dictID = ip[pos]; |
| + pos++; |
| + break; |
| + case 2: |
| + dictID = ZSTD_readLE16(ip + pos); |
| + pos += 2; |
| + break; |
| + case 3: |
| + dictID = ZSTD_readLE32(ip + pos); |
| + pos += 4; |
| + break; |
| + } |
| + switch (fcsID) { |
| + default: /* impossible */ |
| + case 0: |
| + if (singleSegment) |
| + frameContentSize = ip[pos]; |
| + break; |
| + case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break; |
| + case 2: frameContentSize = ZSTD_readLE32(ip + pos); break; |
| + case 3: frameContentSize = ZSTD_readLE64(ip + pos); break; |
| + } |
| + if (!windowSize) |
| + windowSize = (U32)frameContentSize; |
| + if (windowSize > windowSizeMax) |
| + return ERROR(frameParameter_windowTooLarge); |
| + fparamsPtr->frameContentSize = frameContentSize; |
| + fparamsPtr->windowSize = windowSize; |
| + fparamsPtr->dictID = dictID; |
| + fparamsPtr->checksumFlag = checksumFlag; |
| + } |
| + return 0; |
| +} |
| + |
| +/** ZSTD_getFrameContentSize() : |
| +* compatible with legacy mode |
| +* @return : decompressed size of the single frame pointed to be `src` if known, otherwise |
| +* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined |
| +* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ |
| +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) |
| +{ |
| + { |
| + ZSTD_frameParams fParams; |
| + if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) |
| + return ZSTD_CONTENTSIZE_ERROR; |
| + if (fParams.windowSize == 0) { |
| + /* Either skippable or empty frame, size == 0 either way */ |
| + return 0; |
| + } else if (fParams.frameContentSize != 0) { |
| + return fParams.frameContentSize; |
| + } else { |
| + return ZSTD_CONTENTSIZE_UNKNOWN; |
| + } |
| + } |
| +} |
| + |
| +/** ZSTD_findDecompressedSize() : |
| + * compatible with legacy mode |
| + * `srcSize` must be the exact length of some number of ZSTD compressed and/or |
| + * skippable frames |
| + * @return : decompressed size of the frames contained */ |
| +unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize) |
| +{ |
| + { |
| + unsigned long long totalDstSize = 0; |
| + while (srcSize >= ZSTD_frameHeaderSize_prefix) { |
| + const U32 magicNumber = ZSTD_readLE32(src); |
| + |
| + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { |
| + size_t skippableSize; |
| + if (srcSize < ZSTD_skippableHeaderSize) |
| + return ERROR(srcSize_wrong); |
| + skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize; |
| + if (srcSize < skippableSize) { |
| + return ZSTD_CONTENTSIZE_ERROR; |
| + } |
| + |
| + src = (const BYTE *)src + skippableSize; |
| + srcSize -= skippableSize; |
| + continue; |
| + } |
| + |
| + { |
| + unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); |
| + if (ret >= ZSTD_CONTENTSIZE_ERROR) |
| + return ret; |
| + |
| + /* check for overflow */ |
| + if (totalDstSize + ret < totalDstSize) |
| + return ZSTD_CONTENTSIZE_ERROR; |
| + totalDstSize += ret; |
| + } |
| + { |
| + size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); |
| + if (ZSTD_isError(frameSrcSize)) { |
| + return ZSTD_CONTENTSIZE_ERROR; |
| + } |
| + |
| + src = (const BYTE *)src + frameSrcSize; |
| + srcSize -= frameSrcSize; |
| + } |
| + } |
| + |
| + if (srcSize) { |
| + return ZSTD_CONTENTSIZE_ERROR; |
| + } |
| + |
| + return totalDstSize; |
| + } |
| +} |
| + |
| +/** ZSTD_decodeFrameHeader() : |
| +* `headerSize` must be the size provided by ZSTD_frameHeaderSize(). |
| +* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ |
| +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize) |
| +{ |
| + size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize); |
| + if (ZSTD_isError(result)) |
| + return result; /* invalid header */ |
| + if (result > 0) |
| + return ERROR(srcSize_wrong); /* headerSize too small */ |
| + if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) |
| + return ERROR(dictionary_wrong); |
| + if (dctx->fParams.checksumFlag) |
| + xxh64_reset(&dctx->xxhState, 0); |
| + return 0; |
| +} |
| + |
| +typedef struct { |
| + blockType_e blockType; |
| + U32 lastBlock; |
| + U32 origSize; |
| +} blockProperties_t; |
| + |
| +/*! ZSTD_getcBlockSize() : |
| +* Provides the size of compressed block from block header `src` */ |
| +size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr) |
| +{ |
| + if (srcSize < ZSTD_blockHeaderSize) |
| + return ERROR(srcSize_wrong); |
| + { |
| + U32 const cBlockHeader = ZSTD_readLE24(src); |
| + U32 const cSize = cBlockHeader >> 3; |
| + bpPtr->lastBlock = cBlockHeader & 1; |
| + bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); |
| + bpPtr->origSize = cSize; /* only useful for RLE */ |
| + if (bpPtr->blockType == bt_rle) |
| + return 1; |
| + if (bpPtr->blockType == bt_reserved) |
| + return ERROR(corruption_detected); |
| + return cSize; |
| + } |
| +} |
| + |
| +static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + if (srcSize > dstCapacity) |
| + return ERROR(dstSize_tooSmall); |
| + memcpy(dst, src, srcSize); |
| + return srcSize; |
| +} |
| + |
| +static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize) |
| +{ |
| + if (srcSize != 1) |
| + return ERROR(srcSize_wrong); |
| + if (regenSize > dstCapacity) |
| + return ERROR(dstSize_tooSmall); |
| + memset(dst, *(const BYTE *)src, regenSize); |
| + return regenSize; |
| +} |
| + |
| +/*! ZSTD_decodeLiteralsBlock() : |
| + @return : nb of bytes read from src (< srcSize ) */ |
| +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ |
| +{ |
| + if (srcSize < MIN_CBLOCK_SIZE) |
| + return ERROR(corruption_detected); |
| + |
| + { |
| + const BYTE *const istart = (const BYTE *)src; |
| + symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); |
| + |
| + switch (litEncType) { |
| + case set_repeat: |
| + if (dctx->litEntropy == 0) |
| + return ERROR(dictionary_corrupted); |
| + /* fall-through */ |
| + case set_compressed: |
| + if (srcSize < 5) |
| + return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ |
| + { |
| + size_t lhSize, litSize, litCSize; |
| + U32 singleStream = 0; |
| + U32 const lhlCode = (istart[0] >> 2) & 3; |
| + U32 const lhc = ZSTD_readLE32(istart); |
| + switch (lhlCode) { |
| + case 0: |
| + case 1: |
| + default: /* note : default is impossible, since lhlCode into [0..3] */ |
| + /* 2 - 2 - 10 - 10 */ |
| + singleStream = !lhlCode; |
| + lhSize = 3; |
| + litSize = (lhc >> 4) & 0x3FF; |
| + litCSize = (lhc >> 14) & 0x3FF; |
| + break; |
| + case 2: |
| + /* 2 - 2 - 14 - 14 */ |
| + lhSize = 4; |
| + litSize = (lhc >> 4) & 0x3FFF; |
| + litCSize = lhc >> 18; |
| + break; |
| + case 3: |
| + /* 2 - 2 - 18 - 18 */ |
| + lhSize = 5; |
| + litSize = (lhc >> 4) & 0x3FFFF; |
| + litCSize = (lhc >> 22) + (istart[4] << 10); |
| + break; |
| + } |
| + if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) |
| + return ERROR(corruption_detected); |
| + if (litCSize + lhSize > srcSize) |
| + return ERROR(corruption_detected); |
| + |
| + if (HUF_isError( |
| + (litEncType == set_repeat) |
| + ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr) |
| + : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)) |
| + : (singleStream |
| + ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, |
| + dctx->entropy.workspace, sizeof(dctx->entropy.workspace)) |
| + : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, |
| + dctx->entropy.workspace, sizeof(dctx->entropy.workspace))))) |
| + return ERROR(corruption_detected); |
| + |
| + dctx->litPtr = dctx->litBuffer; |
| + dctx->litSize = litSize; |
| + dctx->litEntropy = 1; |
| + if (litEncType == set_compressed) |
| + dctx->HUFptr = dctx->entropy.hufTable; |
| + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); |
| + return litCSize + lhSize; |
| + } |
| + |
| + case set_basic: { |
| + size_t litSize, lhSize; |
| + U32 const lhlCode = ((istart[0]) >> 2) & 3; |
| + switch (lhlCode) { |
| + case 0: |
| + case 2: |
| + default: /* note : default is impossible, since lhlCode into [0..3] */ |
| + lhSize = 1; |
| + litSize = istart[0] >> 3; |
| + break; |
| + case 1: |
| + lhSize = 2; |
| + litSize = ZSTD_readLE16(istart) >> 4; |
| + break; |
| + case 3: |
| + lhSize = 3; |
| + litSize = ZSTD_readLE24(istart) >> 4; |
| + break; |
| + } |
| + |
| + if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ |
| + if (litSize + lhSize > srcSize) |
| + return ERROR(corruption_detected); |
| + memcpy(dctx->litBuffer, istart + lhSize, litSize); |
| + dctx->litPtr = dctx->litBuffer; |
| + dctx->litSize = litSize; |
| + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); |
| + return lhSize + litSize; |
| + } |
| + /* direct reference into compressed stream */ |
| + dctx->litPtr = istart + lhSize; |
| + dctx->litSize = litSize; |
| + return lhSize + litSize; |
| + } |
| + |
| + case set_rle: { |
| + U32 const lhlCode = ((istart[0]) >> 2) & 3; |
| + size_t litSize, lhSize; |
| + switch (lhlCode) { |
| + case 0: |
| + case 2: |
| + default: /* note : default is impossible, since lhlCode into [0..3] */ |
| + lhSize = 1; |
| + litSize = istart[0] >> 3; |
| + break; |
| + case 1: |
| + lhSize = 2; |
| + litSize = ZSTD_readLE16(istart) >> 4; |
| + break; |
| + case 3: |
| + lhSize = 3; |
| + litSize = ZSTD_readLE24(istart) >> 4; |
| + if (srcSize < 4) |
| + return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ |
| + break; |
| + } |
| + if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) |
| + return ERROR(corruption_detected); |
| + memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); |
| + dctx->litPtr = dctx->litBuffer; |
| + dctx->litSize = litSize; |
| + return lhSize + 1; |
| + } |
| + default: |
| + return ERROR(corruption_detected); /* impossible */ |
| + } |
| + } |
| +} |
| + |
| +typedef union { |
| + FSE_decode_t realData; |
| + U32 alignedBy4; |
| +} FSE_decode_t4; |
| + |
| +static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = { |
| + {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ |
| + {{0, 0, 4}}, /* 0 : base, symbol, bits */ |
| + {{16, 0, 4}}, |
| + {{32, 1, 5}}, |
| + {{0, 3, 5}}, |
| + {{0, 4, 5}}, |
| + {{0, 6, 5}}, |
| + {{0, 7, 5}}, |
| + {{0, 9, 5}}, |
| + {{0, 10, 5}}, |
| + {{0, 12, 5}}, |
| + {{0, 14, 6}}, |
| + {{0, 16, 5}}, |
| + {{0, 18, 5}}, |
| + {{0, 19, 5}}, |
| + {{0, 21, 5}}, |
| + {{0, 22, 5}}, |
| + {{0, 24, 5}}, |
| + {{32, 25, 5}}, |
| + {{0, 26, 5}}, |
| + {{0, 27, 6}}, |
| + {{0, 29, 6}}, |
| + {{0, 31, 6}}, |
| + {{32, 0, 4}}, |
| + {{0, 1, 4}}, |
| + {{0, 2, 5}}, |
| + {{32, 4, 5}}, |
| + {{0, 5, 5}}, |
| + {{32, 7, 5}}, |
| + {{0, 8, 5}}, |
| + {{32, 10, 5}}, |
| + {{0, 11, 5}}, |
| + {{0, 13, 6}}, |
| + {{32, 16, 5}}, |
| + {{0, 17, 5}}, |
| + {{32, 19, 5}}, |
| + {{0, 20, 5}}, |
| + {{32, 22, 5}}, |
| + {{0, 23, 5}}, |
| + {{0, 25, 4}}, |
| + {{16, 25, 4}}, |
| + {{32, 26, 5}}, |
| + {{0, 28, 6}}, |
| + {{0, 30, 6}}, |
| + {{48, 0, 4}}, |
| + {{16, 1, 4}}, |
| + {{32, 2, 5}}, |
| + {{32, 3, 5}}, |
| + {{32, 5, 5}}, |
| + {{32, 6, 5}}, |
| + {{32, 8, 5}}, |
| + {{32, 9, 5}}, |
| + {{32, 11, 5}}, |
| + {{32, 12, 5}}, |
| + {{0, 15, 6}}, |
| + {{32, 17, 5}}, |
| + {{32, 18, 5}}, |
| + {{32, 20, 5}}, |
| + {{32, 21, 5}}, |
| + {{32, 23, 5}}, |
| + {{32, 24, 5}}, |
| + {{0, 35, 6}}, |
| + {{0, 34, 6}}, |
| + {{0, 33, 6}}, |
| + {{0, 32, 6}}, |
| +}; /* LL_defaultDTable */ |
| + |
| +static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = { |
| + {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ |
| + {{0, 0, 6}}, /* 0 : base, symbol, bits */ |
| + {{0, 1, 4}}, |
| + {{32, 2, 5}}, |
| + {{0, 3, 5}}, |
| + {{0, 5, 5}}, |
| + {{0, 6, 5}}, |
| + {{0, 8, 5}}, |
| + {{0, 10, 6}}, |
| + {{0, 13, 6}}, |
| + {{0, 16, 6}}, |
| + {{0, 19, 6}}, |
| + {{0, 22, 6}}, |
| + {{0, 25, 6}}, |
| + {{0, 28, 6}}, |
| + {{0, 31, 6}}, |
| + {{0, 33, 6}}, |
| + {{0, 35, 6}}, |
| + {{0, 37, 6}}, |
| + {{0, 39, 6}}, |
| + {{0, 41, 6}}, |
| + {{0, 43, 6}}, |
| + {{0, 45, 6}}, |
| + {{16, 1, 4}}, |
| + {{0, 2, 4}}, |
| + {{32, 3, 5}}, |
| + {{0, 4, 5}}, |
| + {{32, 6, 5}}, |
| + {{0, 7, 5}}, |
| + {{0, 9, 6}}, |
| + {{0, 12, 6}}, |
| + {{0, 15, 6}}, |
| + {{0, 18, 6}}, |
| + {{0, 21, 6}}, |
| + {{0, 24, 6}}, |
| + {{0, 27, 6}}, |
| + {{0, 30, 6}}, |
| + {{0, 32, 6}}, |
| + {{0, 34, 6}}, |
| + {{0, 36, 6}}, |
| + {{0, 38, 6}}, |
| + {{0, 40, 6}}, |
| + {{0, 42, 6}}, |
| + {{0, 44, 6}}, |
| + {{32, 1, 4}}, |
| + {{48, 1, 4}}, |
| + {{16, 2, 4}}, |
| + {{32, 4, 5}}, |
| + {{32, 5, 5}}, |
| + {{32, 7, 5}}, |
| + {{32, 8, 5}}, |
| + {{0, 11, 6}}, |
| + {{0, 14, 6}}, |
| + {{0, 17, 6}}, |
| + {{0, 20, 6}}, |
| + {{0, 23, 6}}, |
| + {{0, 26, 6}}, |
| + {{0, 29, 6}}, |
| + {{0, 52, 6}}, |
| + {{0, 51, 6}}, |
| + {{0, 50, 6}}, |
| + {{0, 49, 6}}, |
| + {{0, 48, 6}}, |
| + {{0, 47, 6}}, |
| + {{0, 46, 6}}, |
| +}; /* ML_defaultDTable */ |
| + |
| +static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = { |
| + {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */ |
| + {{0, 0, 5}}, /* 0 : base, symbol, bits */ |
| + {{0, 6, 4}}, |
| + {{0, 9, 5}}, |
| + {{0, 15, 5}}, |
| + {{0, 21, 5}}, |
| + {{0, 3, 5}}, |
| + {{0, 7, 4}}, |
| + {{0, 12, 5}}, |
| + {{0, 18, 5}}, |
| + {{0, 23, 5}}, |
| + {{0, 5, 5}}, |
| + {{0, 8, 4}}, |
| + {{0, 14, 5}}, |
| + {{0, 20, 5}}, |
| + {{0, 2, 5}}, |
| + {{16, 7, 4}}, |
| + {{0, 11, 5}}, |
| + {{0, 17, 5}}, |
| + {{0, 22, 5}}, |
| + {{0, 4, 5}}, |
| + {{16, 8, 4}}, |
| + {{0, 13, 5}}, |
| + {{0, 19, 5}}, |
| + {{0, 1, 5}}, |
| + {{16, 6, 4}}, |
| + {{0, 10, 5}}, |
| + {{0, 16, 5}}, |
| + {{0, 28, 5}}, |
| + {{0, 27, 5}}, |
| + {{0, 26, 5}}, |
| + {{0, 25, 5}}, |
| + {{0, 24, 5}}, |
| +}; /* OF_defaultDTable */ |
| + |
| +/*! ZSTD_buildSeqTable() : |
| + @return : nb bytes read from src, |
| + or an error code if it fails, testable with ZSTD_isError() |
| +*/ |
| +static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src, |
| + size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize) |
| +{ |
| + const void *const tmpPtr = defaultTable; /* bypass strict aliasing */ |
| + switch (type) { |
| + case set_rle: |
| + if (!srcSize) |
| + return ERROR(srcSize_wrong); |
| + if ((*(const BYTE *)src) > max) |
| + return ERROR(corruption_detected); |
| + FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src); |
| + *DTablePtr = DTableSpace; |
| + return 1; |
| + case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0; |
| + case set_repeat: |
| + if (!flagRepeatTable) |
| + return ERROR(corruption_detected); |
| + return 0; |
| + default: /* impossible */ |
| + case set_compressed: { |
| + U32 tableLog; |
| + S16 *norm = (S16 *)workspace; |
| + size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(GENERIC); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + { |
| + size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); |
| + if (FSE_isError(headerSize)) |
| + return ERROR(corruption_detected); |
| + if (tableLog > maxLog) |
| + return ERROR(corruption_detected); |
| + FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize); |
| + *DTablePtr = DTableSpace; |
| + return headerSize; |
| + } |
| + } |
| + } |
| +} |
| + |
| +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize) |
| +{ |
| + const BYTE *const istart = (const BYTE *const)src; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *ip = istart; |
| + |
| + /* check */ |
| + if (srcSize < MIN_SEQUENCES_SIZE) |
| + return ERROR(srcSize_wrong); |
| + |
| + /* SeqHead */ |
| + { |
| + int nbSeq = *ip++; |
| + if (!nbSeq) { |
| + *nbSeqPtr = 0; |
| + return 1; |
| + } |
| + if (nbSeq > 0x7F) { |
| + if (nbSeq == 0xFF) { |
| + if (ip + 2 > iend) |
| + return ERROR(srcSize_wrong); |
| + nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2; |
| + } else { |
| + if (ip >= iend) |
| + return ERROR(srcSize_wrong); |
| + nbSeq = ((nbSeq - 0x80) << 8) + *ip++; |
| + } |
| + } |
| + *nbSeqPtr = nbSeq; |
| + } |
| + |
| + /* FSE table descriptors */ |
| + if (ip + 4 > iend) |
| + return ERROR(srcSize_wrong); /* minimum possible size */ |
| + { |
| + symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); |
| + symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); |
| + symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); |
| + ip++; |
| + |
| + /* Build DTables */ |
| + { |
| + size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip, |
| + LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); |
| + if (ZSTD_isError(llhSize)) |
| + return ERROR(corruption_detected); |
| + ip += llhSize; |
| + } |
| + { |
| + size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip, |
| + OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); |
| + if (ZSTD_isError(ofhSize)) |
| + return ERROR(corruption_detected); |
| + ip += ofhSize; |
| + } |
| + { |
| + size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip, |
| + ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)); |
| + if (ZSTD_isError(mlhSize)) |
| + return ERROR(corruption_detected); |
| + ip += mlhSize; |
| + } |
| + } |
| + |
| + return ip - istart; |
| +} |
| + |
| +typedef struct { |
| + size_t litLength; |
| + size_t matchLength; |
| + size_t offset; |
| + const BYTE *match; |
| +} seq_t; |
| + |
| +typedef struct { |
| + BIT_DStream_t DStream; |
| + FSE_DState_t stateLL; |
| + FSE_DState_t stateOffb; |
| + FSE_DState_t stateML; |
| + size_t prevOffset[ZSTD_REP_NUM]; |
| + const BYTE *base; |
| + size_t pos; |
| + uPtrDiff gotoDict; |
| +} seqState_t; |
| + |
| +FORCE_NOINLINE |
| +size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, |
| + const BYTE *const vBase, const BYTE *const dictEnd) |
| +{ |
| + BYTE *const oLitEnd = op + sequence.litLength; |
| + size_t const sequenceLength = sequence.litLength + sequence.matchLength; |
| + BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ |
| + BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; |
| + const BYTE *const iLitEnd = *litPtr + sequence.litLength; |
| + const BYTE *match = oLitEnd - sequence.offset; |
| + |
| + /* check */ |
| + if (oMatchEnd > oend) |
| + return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ |
| + if (iLitEnd > litLimit) |
| + return ERROR(corruption_detected); /* over-read beyond lit buffer */ |
| + if (oLitEnd <= oend_w) |
| + return ERROR(GENERIC); /* Precondition */ |
| + |
| + /* copy literals */ |
| + if (op < oend_w) { |
| + ZSTD_wildcopy(op, *litPtr, oend_w - op); |
| + *litPtr += oend_w - op; |
| + op = oend_w; |
| + } |
| + while (op < oLitEnd) |
| + *op++ = *(*litPtr)++; |
| + |
| + /* copy Match */ |
| + if (sequence.offset > (size_t)(oLitEnd - base)) { |
| + /* offset beyond prefix */ |
| + if (sequence.offset > (size_t)(oLitEnd - vBase)) |
| + return ERROR(corruption_detected); |
| + match = dictEnd - (base - match); |
| + if (match + sequence.matchLength <= dictEnd) { |
| + memmove(oLitEnd, match, sequence.matchLength); |
| + return sequenceLength; |
| + } |
| + /* span extDict & currPrefixSegment */ |
| + { |
| + size_t const length1 = dictEnd - match; |
| + memmove(oLitEnd, match, length1); |
| + op = oLitEnd + length1; |
| + sequence.matchLength -= length1; |
| + match = base; |
| + } |
| + } |
| + while (op < oMatchEnd) |
| + *op++ = *match++; |
| + return sequenceLength; |
| +} |
| + |
| +static seq_t ZSTD_decodeSequence(seqState_t *seqState) |
| +{ |
| + seq_t seq; |
| + |
| + U32 const llCode = FSE_peekSymbol(&seqState->stateLL); |
| + U32 const mlCode = FSE_peekSymbol(&seqState->stateML); |
| + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ |
| + |
| + U32 const llBits = LL_bits[llCode]; |
| + U32 const mlBits = ML_bits[mlCode]; |
| + U32 const ofBits = ofCode; |
| + U32 const totalBits = llBits + mlBits + ofBits; |
| + |
| + static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, |
| + 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000}; |
| + |
| + static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, |
| + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, |
| + 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003}; |
| + |
| + static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, |
| + 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, |
| + 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD}; |
| + |
| + /* sequence */ |
| + { |
| + size_t offset; |
| + if (!ofCode) |
| + offset = 0; |
| + else { |
| + offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ |
| + if (ZSTD_32bits()) |
| + BIT_reloadDStream(&seqState->DStream); |
| + } |
| + |
| + if (ofCode <= 1) { |
| + offset += (llCode == 0); |
| + if (offset) { |
| + size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; |
| + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ |
| + if (offset != 1) |
| + seqState->prevOffset[2] = seqState->prevOffset[1]; |
| + seqState->prevOffset[1] = seqState->prevOffset[0]; |
| + seqState->prevOffset[0] = offset = temp; |
| + } else { |
| + offset = seqState->prevOffset[0]; |
| + } |
| + } else { |
| + seqState->prevOffset[2] = seqState->prevOffset[1]; |
| + seqState->prevOffset[1] = seqState->prevOffset[0]; |
| + seqState->prevOffset[0] = offset; |
| + } |
| + seq.offset = offset; |
| + } |
| + |
| + seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ |
| + if (ZSTD_32bits() && (mlBits + llBits > 24)) |
| + BIT_reloadDStream(&seqState->DStream); |
| + |
| + seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ |
| + if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) |
| + BIT_reloadDStream(&seqState->DStream); |
| + |
| + /* ANS state update */ |
| + FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ |
| + FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ |
| + if (ZSTD_32bits()) |
| + BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ |
| + FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ |
| + |
| + return seq; |
| +} |
| + |
| +FORCE_INLINE |
| +size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, |
| + const BYTE *const vBase, const BYTE *const dictEnd) |
| +{ |
| + BYTE *const oLitEnd = op + sequence.litLength; |
| + size_t const sequenceLength = sequence.litLength + sequence.matchLength; |
| + BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ |
| + BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; |
| + const BYTE *const iLitEnd = *litPtr + sequence.litLength; |
| + const BYTE *match = oLitEnd - sequence.offset; |
| + |
| + /* check */ |
| + if (oMatchEnd > oend) |
| + return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ |
| + if (iLitEnd > litLimit) |
| + return ERROR(corruption_detected); /* over-read beyond lit buffer */ |
| + if (oLitEnd > oend_w) |
| + return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); |
| + |
| + /* copy Literals */ |
| + ZSTD_copy8(op, *litPtr); |
| + if (sequence.litLength > 8) |
| + ZSTD_wildcopy(op + 8, (*litPtr) + 8, |
| + sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ |
| + op = oLitEnd; |
| + *litPtr = iLitEnd; /* update for next sequence */ |
| + |
| + /* copy Match */ |
| + if (sequence.offset > (size_t)(oLitEnd - base)) { |
| + /* offset beyond prefix */ |
| + if (sequence.offset > (size_t)(oLitEnd - vBase)) |
| + return ERROR(corruption_detected); |
| + match = dictEnd + (match - base); |
| + if (match + sequence.matchLength <= dictEnd) { |
| + memmove(oLitEnd, match, sequence.matchLength); |
| + return sequenceLength; |
| + } |
| + /* span extDict & currPrefixSegment */ |
| + { |
| + size_t const length1 = dictEnd - match; |
| + memmove(oLitEnd, match, length1); |
| + op = oLitEnd + length1; |
| + sequence.matchLength -= length1; |
| + match = base; |
| + if (op > oend_w || sequence.matchLength < MINMATCH) { |
| + U32 i; |
| + for (i = 0; i < sequence.matchLength; ++i) |
| + op[i] = match[i]; |
| + return sequenceLength; |
| + } |
| + } |
| + } |
| + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ |
| + |
| + /* match within prefix */ |
| + if (sequence.offset < 8) { |
| + /* close range match, overlap */ |
| + static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ |
| + static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */ |
| + int const sub2 = dec64table[sequence.offset]; |
| + op[0] = match[0]; |
| + op[1] = match[1]; |
| + op[2] = match[2]; |
| + op[3] = match[3]; |
| + match += dec32table[sequence.offset]; |
| + ZSTD_copy4(op + 4, match); |
| + match -= sub2; |
| + } else { |
| + ZSTD_copy8(op, match); |
| + } |
| + op += 8; |
| + match += 8; |
| + |
| + if (oMatchEnd > oend - (16 - MINMATCH)) { |
| + if (op < oend_w) { |
| + ZSTD_wildcopy(op, match, oend_w - op); |
| + match += oend_w - op; |
| + op = oend_w; |
| + } |
| + while (op < oMatchEnd) |
| + *op++ = *match++; |
| + } else { |
| + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */ |
| + } |
| + return sequenceLength; |
| +} |
| + |
| +static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)seqStart; |
| + const BYTE *const iend = ip + seqSize; |
| + BYTE *const ostart = (BYTE * const)dst; |
| + BYTE *const oend = ostart + maxDstSize; |
| + BYTE *op = ostart; |
| + const BYTE *litPtr = dctx->litPtr; |
| + const BYTE *const litEnd = litPtr + dctx->litSize; |
| + const BYTE *const base = (const BYTE *)(dctx->base); |
| + const BYTE *const vBase = (const BYTE *)(dctx->vBase); |
| + const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd); |
| + int nbSeq; |
| + |
| + /* Build Decoding Tables */ |
| + { |
| + size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); |
| + if (ZSTD_isError(seqHSize)) |
| + return seqHSize; |
| + ip += seqHSize; |
| + } |
| + |
| + /* Regen sequences */ |
| + if (nbSeq) { |
| + seqState_t seqState; |
| + dctx->fseEntropy = 1; |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + seqState.prevOffset[i] = dctx->entropy.rep[i]; |
| + } |
| + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected); |
| + FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); |
| + FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); |
| + FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); |
| + |
| + for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) { |
| + nbSeq--; |
| + { |
| + seq_t const sequence = ZSTD_decodeSequence(&seqState); |
| + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); |
| + if (ZSTD_isError(oneSeqSize)) |
| + return oneSeqSize; |
| + op += oneSeqSize; |
| + } |
| + } |
| + |
| + /* check if reached exact end */ |
| + if (nbSeq) |
| + return ERROR(corruption_detected); |
| + /* save reps for next block */ |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); |
| + } |
| + } |
| + |
| + /* last literal segment */ |
| + { |
| + size_t const lastLLSize = litEnd - litPtr; |
| + if (lastLLSize > (size_t)(oend - op)) |
| + return ERROR(dstSize_tooSmall); |
| + memcpy(op, litPtr, lastLLSize); |
| + op += lastLLSize; |
| + } |
| + |
| + return op - ostart; |
| +} |
| + |
| +FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets) |
| +{ |
| + seq_t seq; |
| + |
| + U32 const llCode = FSE_peekSymbol(&seqState->stateLL); |
| + U32 const mlCode = FSE_peekSymbol(&seqState->stateML); |
| + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ |
| + |
| + U32 const llBits = LL_bits[llCode]; |
| + U32 const mlBits = ML_bits[mlCode]; |
| + U32 const ofBits = ofCode; |
| + U32 const totalBits = llBits + mlBits + ofBits; |
| + |
| + static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, |
| + 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000}; |
| + |
| + static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, |
| + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, |
| + 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003}; |
| + |
| + static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, |
| + 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, |
| + 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD}; |
| + |
| + /* sequence */ |
| + { |
| + size_t offset; |
| + if (!ofCode) |
| + offset = 0; |
| + else { |
| + if (longOffsets) { |
| + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN); |
| + offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); |
| + if (ZSTD_32bits() || extraBits) |
| + BIT_reloadDStream(&seqState->DStream); |
| + if (extraBits) |
| + offset += BIT_readBitsFast(&seqState->DStream, extraBits); |
| + } else { |
| + offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ |
| + if (ZSTD_32bits()) |
| + BIT_reloadDStream(&seqState->DStream); |
| + } |
| + } |
| + |
| + if (ofCode <= 1) { |
| + offset += (llCode == 0); |
| + if (offset) { |
| + size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; |
| + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ |
| + if (offset != 1) |
| + seqState->prevOffset[2] = seqState->prevOffset[1]; |
| + seqState->prevOffset[1] = seqState->prevOffset[0]; |
| + seqState->prevOffset[0] = offset = temp; |
| + } else { |
| + offset = seqState->prevOffset[0]; |
| + } |
| + } else { |
| + seqState->prevOffset[2] = seqState->prevOffset[1]; |
| + seqState->prevOffset[1] = seqState->prevOffset[0]; |
| + seqState->prevOffset[0] = offset; |
| + } |
| + seq.offset = offset; |
| + } |
| + |
| + seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ |
| + if (ZSTD_32bits() && (mlBits + llBits > 24)) |
| + BIT_reloadDStream(&seqState->DStream); |
| + |
| + seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ |
| + if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog))) |
| + BIT_reloadDStream(&seqState->DStream); |
| + |
| + { |
| + size_t const pos = seqState->pos + seq.litLength; |
| + seq.match = seqState->base + pos - seq.offset; /* single memory segment */ |
| + if (seq.offset > pos) |
| + seq.match += seqState->gotoDict; /* separate memory segment */ |
| + seqState->pos = pos + seq.matchLength; |
| + } |
| + |
| + /* ANS state update */ |
| + FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ |
| + FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ |
| + if (ZSTD_32bits()) |
| + BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ |
| + FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ |
| + |
| + return seq; |
| +} |
| + |
| +static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize) |
| +{ |
| + if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) { |
| + return ZSTD_decodeSequenceLong_generic(seqState, 1); |
| + } else { |
| + return ZSTD_decodeSequenceLong_generic(seqState, 0); |
| + } |
| +} |
| + |
| +FORCE_INLINE |
| +size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base, |
| + const BYTE *const vBase, const BYTE *const dictEnd) |
| +{ |
| + BYTE *const oLitEnd = op + sequence.litLength; |
| + size_t const sequenceLength = sequence.litLength + sequence.matchLength; |
| + BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ |
| + BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH; |
| + const BYTE *const iLitEnd = *litPtr + sequence.litLength; |
| + const BYTE *match = sequence.match; |
| + |
| + /* check */ |
| + if (oMatchEnd > oend) |
| + return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ |
| + if (iLitEnd > litLimit) |
| + return ERROR(corruption_detected); /* over-read beyond lit buffer */ |
| + if (oLitEnd > oend_w) |
| + return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); |
| + |
| + /* copy Literals */ |
| + ZSTD_copy8(op, *litPtr); |
| + if (sequence.litLength > 8) |
| + ZSTD_wildcopy(op + 8, (*litPtr) + 8, |
| + sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ |
| + op = oLitEnd; |
| + *litPtr = iLitEnd; /* update for next sequence */ |
| + |
| + /* copy Match */ |
| + if (sequence.offset > (size_t)(oLitEnd - base)) { |
| + /* offset beyond prefix */ |
| + if (sequence.offset > (size_t)(oLitEnd - vBase)) |
| + return ERROR(corruption_detected); |
| + if (match + sequence.matchLength <= dictEnd) { |
| + memmove(oLitEnd, match, sequence.matchLength); |
| + return sequenceLength; |
| + } |
| + /* span extDict & currPrefixSegment */ |
| + { |
| + size_t const length1 = dictEnd - match; |
| + memmove(oLitEnd, match, length1); |
| + op = oLitEnd + length1; |
| + sequence.matchLength -= length1; |
| + match = base; |
| + if (op > oend_w || sequence.matchLength < MINMATCH) { |
| + U32 i; |
| + for (i = 0; i < sequence.matchLength; ++i) |
| + op[i] = match[i]; |
| + return sequenceLength; |
| + } |
| + } |
| + } |
| + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ |
| + |
| + /* match within prefix */ |
| + if (sequence.offset < 8) { |
| + /* close range match, overlap */ |
| + static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ |
| + static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */ |
| + int const sub2 = dec64table[sequence.offset]; |
| + op[0] = match[0]; |
| + op[1] = match[1]; |
| + op[2] = match[2]; |
| + op[3] = match[3]; |
| + match += dec32table[sequence.offset]; |
| + ZSTD_copy4(op + 4, match); |
| + match -= sub2; |
| + } else { |
| + ZSTD_copy8(op, match); |
| + } |
| + op += 8; |
| + match += 8; |
| + |
| + if (oMatchEnd > oend - (16 - MINMATCH)) { |
| + if (op < oend_w) { |
| + ZSTD_wildcopy(op, match, oend_w - op); |
| + match += oend_w - op; |
| + op = oend_w; |
| + } |
| + while (op < oMatchEnd) |
| + *op++ = *match++; |
| + } else { |
| + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */ |
| + } |
| + return sequenceLength; |
| +} |
| + |
| +static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)seqStart; |
| + const BYTE *const iend = ip + seqSize; |
| + BYTE *const ostart = (BYTE * const)dst; |
| + BYTE *const oend = ostart + maxDstSize; |
| + BYTE *op = ostart; |
| + const BYTE *litPtr = dctx->litPtr; |
| + const BYTE *const litEnd = litPtr + dctx->litSize; |
| + const BYTE *const base = (const BYTE *)(dctx->base); |
| + const BYTE *const vBase = (const BYTE *)(dctx->vBase); |
| + const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd); |
| + unsigned const windowSize = dctx->fParams.windowSize; |
| + int nbSeq; |
| + |
| + /* Build Decoding Tables */ |
| + { |
| + size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); |
| + if (ZSTD_isError(seqHSize)) |
| + return seqHSize; |
| + ip += seqHSize; |
| + } |
| + |
| + /* Regen sequences */ |
| + if (nbSeq) { |
| +#define STORED_SEQS 4 |
| +#define STOSEQ_MASK (STORED_SEQS - 1) |
| +#define ADVANCED_SEQS 4 |
| + seq_t *sequences = (seq_t *)dctx->entropy.workspace; |
| + int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); |
| + seqState_t seqState; |
| + int seqNb; |
| + ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS); |
| + dctx->fseEntropy = 1; |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + seqState.prevOffset[i] = dctx->entropy.rep[i]; |
| + } |
| + seqState.base = base; |
| + seqState.pos = (size_t)(op - base); |
| + seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */ |
| + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected); |
| + FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); |
| + FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); |
| + FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); |
| + |
| + /* prepare in advance */ |
| + for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) { |
| + sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize); |
| + } |
| + if (seqNb < seqAdvance) |
| + return ERROR(corruption_detected); |
| + |
| + /* decode and decompress */ |
| + for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) { |
| + seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize); |
| + size_t const oneSeqSize = |
| + ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); |
| + if (ZSTD_isError(oneSeqSize)) |
| + return oneSeqSize; |
| + ZSTD_PREFETCH(sequence.match); |
| + sequences[seqNb & STOSEQ_MASK] = sequence; |
| + op += oneSeqSize; |
| + } |
| + if (seqNb < nbSeq) |
| + return ERROR(corruption_detected); |
| + |
| + /* finish queue */ |
| + seqNb -= seqAdvance; |
| + for (; seqNb < nbSeq; seqNb++) { |
| + size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); |
| + if (ZSTD_isError(oneSeqSize)) |
| + return oneSeqSize; |
| + op += oneSeqSize; |
| + } |
| + |
| + /* save reps for next block */ |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); |
| + } |
| + } |
| + |
| + /* last literal segment */ |
| + { |
| + size_t const lastLLSize = litEnd - litPtr; |
| + if (lastLLSize > (size_t)(oend - op)) |
| + return ERROR(dstSize_tooSmall); |
| + memcpy(op, litPtr, lastLLSize); |
| + op += lastLLSize; |
| + } |
| + |
| + return op - ostart; |
| +} |
| + |
| +static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ /* blockType == blockCompressed */ |
| + const BYTE *ip = (const BYTE *)src; |
| + |
| + if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) |
| + return ERROR(srcSize_wrong); |
| + |
| + /* Decode literals section */ |
| + { |
| + size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); |
| + if (ZSTD_isError(litCSize)) |
| + return litCSize; |
| + ip += litCSize; |
| + srcSize -= litCSize; |
| + } |
| + if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */ |
| + /* likely because of register pressure */ |
| + /* if that's the correct cause, then 32-bits ARM should be affected differently */ |
| + /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */ |
| + if (dctx->fParams.windowSize > (1 << 23)) |
| + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize); |
| + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); |
| +} |
| + |
| +static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst) |
| +{ |
| + if (dst != dctx->previousDstEnd) { /* not contiguous */ |
| + dctx->dictEnd = dctx->previousDstEnd; |
| + dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base)); |
| + dctx->base = dst; |
| + dctx->previousDstEnd = dst; |
| + } |
| +} |
| + |
| +size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + size_t dSize; |
| + ZSTD_checkContinuity(dctx, dst); |
| + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); |
| + dctx->previousDstEnd = (char *)dst + dSize; |
| + return dSize; |
| +} |
| + |
| +/** ZSTD_insertBlock() : |
| + insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ |
| +size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize) |
| +{ |
| + ZSTD_checkContinuity(dctx, blockStart); |
| + dctx->previousDstEnd = (const char *)blockStart + blockSize; |
| + return blockSize; |
| +} |
| + |
| +size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length) |
| +{ |
| + if (length > dstCapacity) |
| + return ERROR(dstSize_tooSmall); |
| + memset(dst, byte, length); |
| + return length; |
| +} |
| + |
| +/** ZSTD_findFrameCompressedSize() : |
| + * compatible with legacy mode |
| + * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame |
| + * `srcSize` must be at least as large as the frame contained |
| + * @return : the compressed size of the frame starting at `src` */ |
| +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) |
| +{ |
| + if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { |
| + return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4); |
| + } else { |
| + const BYTE *ip = (const BYTE *)src; |
| + const BYTE *const ipstart = ip; |
| + size_t remainingSize = srcSize; |
| + ZSTD_frameParams fParams; |
| + |
| + size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize); |
| + if (ZSTD_isError(headerSize)) |
| + return headerSize; |
| + |
| + /* Frame Header */ |
| + { |
| + size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); |
| + if (ZSTD_isError(ret)) |
| + return ret; |
| + if (ret > 0) |
| + return ERROR(srcSize_wrong); |
| + } |
| + |
| + ip += headerSize; |
| + remainingSize -= headerSize; |
| + |
| + /* Loop on each block */ |
| + while (1) { |
| + blockProperties_t blockProperties; |
| + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); |
| + if (ZSTD_isError(cBlockSize)) |
| + return cBlockSize; |
| + |
| + if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) |
| + return ERROR(srcSize_wrong); |
| + |
| + ip += ZSTD_blockHeaderSize + cBlockSize; |
| + remainingSize -= ZSTD_blockHeaderSize + cBlockSize; |
| + |
| + if (blockProperties.lastBlock) |
| + break; |
| + } |
| + |
| + if (fParams.checksumFlag) { /* Frame content checksum */ |
| + if (remainingSize < 4) |
| + return ERROR(srcSize_wrong); |
| + ip += 4; |
| + remainingSize -= 4; |
| + } |
| + |
| + return ip - ipstart; |
| + } |
| +} |
| + |
| +/*! ZSTD_decompressFrame() : |
| +* @dctx must be properly initialized */ |
| +static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr) |
| +{ |
| + const BYTE *ip = (const BYTE *)(*srcPtr); |
| + BYTE *const ostart = (BYTE * const)dst; |
| + BYTE *const oend = ostart + dstCapacity; |
| + BYTE *op = ostart; |
| + size_t remainingSize = *srcSizePtr; |
| + |
| + /* check */ |
| + if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize) |
| + return ERROR(srcSize_wrong); |
| + |
| + /* Frame Header */ |
| + { |
| + size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); |
| + if (ZSTD_isError(frameHeaderSize)) |
| + return frameHeaderSize; |
| + if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize) |
| + return ERROR(srcSize_wrong); |
| + CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); |
| + ip += frameHeaderSize; |
| + remainingSize -= frameHeaderSize; |
| + } |
| + |
| + /* Loop on each block */ |
| + while (1) { |
| + size_t decodedSize; |
| + blockProperties_t blockProperties; |
| + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); |
| + if (ZSTD_isError(cBlockSize)) |
| + return cBlockSize; |
| + |
| + ip += ZSTD_blockHeaderSize; |
| + remainingSize -= ZSTD_blockHeaderSize; |
| + if (cBlockSize > remainingSize) |
| + return ERROR(srcSize_wrong); |
| + |
| + switch (blockProperties.blockType) { |
| + case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break; |
| + case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break; |
| + case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break; |
| + case bt_reserved: |
| + default: return ERROR(corruption_detected); |
| + } |
| + |
| + if (ZSTD_isError(decodedSize)) |
| + return decodedSize; |
| + if (dctx->fParams.checksumFlag) |
| + xxh64_update(&dctx->xxhState, op, decodedSize); |
| + op += decodedSize; |
| + ip += cBlockSize; |
| + remainingSize -= cBlockSize; |
| + if (blockProperties.lastBlock) |
| + break; |
| + } |
| + |
| + if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ |
| + U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState); |
| + U32 checkRead; |
| + if (remainingSize < 4) |
| + return ERROR(checksum_wrong); |
| + checkRead = ZSTD_readLE32(ip); |
| + if (checkRead != checkCalc) |
| + return ERROR(checksum_wrong); |
| + ip += 4; |
| + remainingSize -= 4; |
| + } |
| + |
| + /* Allow caller to get size read */ |
| + *srcPtr = ip; |
| + *srcSizePtr = remainingSize; |
| + return op - ostart; |
| +} |
| + |
| +static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict); |
| +static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict); |
| + |
| +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, |
| + const ZSTD_DDict *ddict) |
| +{ |
| + void *const dststart = dst; |
| + |
| + if (ddict) { |
| + if (dict) { |
| + /* programmer error, these two cases should be mutually exclusive */ |
| + return ERROR(GENERIC); |
| + } |
| + |
| + dict = ZSTD_DDictDictContent(ddict); |
| + dictSize = ZSTD_DDictDictSize(ddict); |
| + } |
| + |
| + while (srcSize >= ZSTD_frameHeaderSize_prefix) { |
| + U32 magicNumber; |
| + |
| + magicNumber = ZSTD_readLE32(src); |
| + if (magicNumber != ZSTD_MAGICNUMBER) { |
| + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { |
| + size_t skippableSize; |
| + if (srcSize < ZSTD_skippableHeaderSize) |
| + return ERROR(srcSize_wrong); |
| + skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize; |
| + if (srcSize < skippableSize) { |
| + return ERROR(srcSize_wrong); |
| + } |
| + |
| + src = (const BYTE *)src + skippableSize; |
| + srcSize -= skippableSize; |
| + continue; |
| + } else { |
| + return ERROR(prefix_unknown); |
| + } |
| + } |
| + |
| + if (ddict) { |
| + /* we were called from ZSTD_decompress_usingDDict */ |
| + ZSTD_refDDict(dctx, ddict); |
| + } else { |
| + /* this will initialize correctly with no dict if dict == NULL, so |
| + * use this in all cases but ddict */ |
| + CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); |
| + } |
| + ZSTD_checkContinuity(dctx, dst); |
| + |
| + { |
| + const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); |
| + if (ZSTD_isError(res)) |
| + return res; |
| + /* don't need to bounds check this, ZSTD_decompressFrame will have |
| + * already */ |
| + dst = (BYTE *)dst + res; |
| + dstCapacity -= res; |
| + } |
| + } |
| + |
| + if (srcSize) |
| + return ERROR(srcSize_wrong); /* input not entirely consumed */ |
| + |
| + return (BYTE *)dst - (BYTE *)dststart; |
| +} |
| + |
| +size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize) |
| +{ |
| + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); |
| +} |
| + |
| +size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); |
| +} |
| + |
| +/*-************************************** |
| +* Advanced Streaming Decompression API |
| +* Bufferless and synchronous |
| +****************************************/ |
| +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; } |
| + |
| +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx) |
| +{ |
| + switch (dctx->stage) { |
| + default: /* should not happen */ |
| + case ZSTDds_getFrameHeaderSize: |
| + case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; |
| + case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader; |
| + case ZSTDds_decompressBlock: return ZSTDnit_block; |
| + case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock; |
| + case ZSTDds_checkChecksum: return ZSTDnit_checksum; |
| + case ZSTDds_decodeSkippableHeader: |
| + case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; |
| + } |
| +} |
| + |
| +int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */ |
| + |
| +/** ZSTD_decompressContinue() : |
| +* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) |
| +* or an error code, which can be tested using ZSTD_isError() */ |
| +size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + /* Sanity check */ |
| + if (srcSize != dctx->expected) |
| + return ERROR(srcSize_wrong); |
| + if (dstCapacity) |
| + ZSTD_checkContinuity(dctx, dst); |
| + |
| + switch (dctx->stage) { |
| + case ZSTDds_getFrameHeaderSize: |
| + if (srcSize != ZSTD_frameHeaderSize_prefix) |
| + return ERROR(srcSize_wrong); /* impossible */ |
| + if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ |
| + memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); |
| + dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */ |
| + dctx->stage = ZSTDds_decodeSkippableHeader; |
| + return 0; |
| + } |
| + dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix); |
| + if (ZSTD_isError(dctx->headerSize)) |
| + return dctx->headerSize; |
| + memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); |
| + if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) { |
| + dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix; |
| + dctx->stage = ZSTDds_decodeFrameHeader; |
| + return 0; |
| + } |
| + dctx->expected = 0; /* not necessary to copy more */ |
| + |
| + case ZSTDds_decodeFrameHeader: |
| + memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); |
| + CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); |
| + dctx->expected = ZSTD_blockHeaderSize; |
| + dctx->stage = ZSTDds_decodeBlockHeader; |
| + return 0; |
| + |
| + case ZSTDds_decodeBlockHeader: { |
| + blockProperties_t bp; |
| + size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); |
| + if (ZSTD_isError(cBlockSize)) |
| + return cBlockSize; |
| + dctx->expected = cBlockSize; |
| + dctx->bType = bp.blockType; |
| + dctx->rleSize = bp.origSize; |
| + if (cBlockSize) { |
| + dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; |
| + return 0; |
| + } |
| + /* empty block */ |
| + if (bp.lastBlock) { |
| + if (dctx->fParams.checksumFlag) { |
| + dctx->expected = 4; |
| + dctx->stage = ZSTDds_checkChecksum; |
| + } else { |
| + dctx->expected = 0; /* end of frame */ |
| + dctx->stage = ZSTDds_getFrameHeaderSize; |
| + } |
| + } else { |
| + dctx->expected = 3; /* go directly to next header */ |
| + dctx->stage = ZSTDds_decodeBlockHeader; |
| + } |
| + return 0; |
| + } |
| + case ZSTDds_decompressLastBlock: |
| + case ZSTDds_decompressBlock: { |
| + size_t rSize; |
| + switch (dctx->bType) { |
| + case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break; |
| + case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break; |
| + case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break; |
| + case bt_reserved: /* should never happen */ |
| + default: return ERROR(corruption_detected); |
| + } |
| + if (ZSTD_isError(rSize)) |
| + return rSize; |
| + if (dctx->fParams.checksumFlag) |
| + xxh64_update(&dctx->xxhState, dst, rSize); |
| + |
| + if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ |
| + if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ |
| + dctx->expected = 4; |
| + dctx->stage = ZSTDds_checkChecksum; |
| + } else { |
| + dctx->expected = 0; /* ends here */ |
| + dctx->stage = ZSTDds_getFrameHeaderSize; |
| + } |
| + } else { |
| + dctx->stage = ZSTDds_decodeBlockHeader; |
| + dctx->expected = ZSTD_blockHeaderSize; |
| + dctx->previousDstEnd = (char *)dst + rSize; |
| + } |
| + return rSize; |
| + } |
| + case ZSTDds_checkChecksum: { |
| + U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); |
| + U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ |
| + if (check32 != h32) |
| + return ERROR(checksum_wrong); |
| + dctx->expected = 0; |
| + dctx->stage = ZSTDds_getFrameHeaderSize; |
| + return 0; |
| + } |
| + case ZSTDds_decodeSkippableHeader: { |
| + memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); |
| + dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4); |
| + dctx->stage = ZSTDds_skipFrame; |
| + return 0; |
| + } |
| + case ZSTDds_skipFrame: { |
| + dctx->expected = 0; |
| + dctx->stage = ZSTDds_getFrameHeaderSize; |
| + return 0; |
| + } |
| + default: |
| + return ERROR(GENERIC); /* impossible */ |
| + } |
| +} |
| + |
| +static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
| +{ |
| + dctx->dictEnd = dctx->previousDstEnd; |
| + dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base)); |
| + dctx->base = dict; |
| + dctx->previousDstEnd = (const char *)dict + dictSize; |
| + return 0; |
| +} |
| + |
| +/* ZSTD_loadEntropy() : |
| + * dict : must point at beginning of a valid zstd dictionary |
| + * @return : size of entropy tables read */ |
| +static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize) |
| +{ |
| + const BYTE *dictPtr = (const BYTE *)dict; |
| + const BYTE *const dictEnd = dictPtr + dictSize; |
| + |
| + if (dictSize <= 8) |
| + return ERROR(dictionary_corrupted); |
| + dictPtr += 8; /* skip header = magic + dictID */ |
| + |
| + { |
| + size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace)); |
| + if (HUF_isError(hSize)) |
| + return ERROR(dictionary_corrupted); |
| + dictPtr += hSize; |
| + } |
| + |
| + { |
| + short offcodeNCount[MaxOff + 1]; |
| + U32 offcodeMaxValue = MaxOff, offcodeLog; |
| + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr); |
| + if (FSE_isError(offcodeHeaderSize)) |
| + return ERROR(dictionary_corrupted); |
| + if (offcodeLog > OffFSELog) |
| + return ERROR(dictionary_corrupted); |
| + CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); |
| + dictPtr += offcodeHeaderSize; |
| + } |
| + |
| + { |
| + short matchlengthNCount[MaxML + 1]; |
| + unsigned matchlengthMaxValue = MaxML, matchlengthLog; |
| + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr); |
| + if (FSE_isError(matchlengthHeaderSize)) |
| + return ERROR(dictionary_corrupted); |
| + if (matchlengthLog > MLFSELog) |
| + return ERROR(dictionary_corrupted); |
| + CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); |
| + dictPtr += matchlengthHeaderSize; |
| + } |
| + |
| + { |
| + short litlengthNCount[MaxLL + 1]; |
| + unsigned litlengthMaxValue = MaxLL, litlengthLog; |
| + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr); |
| + if (FSE_isError(litlengthHeaderSize)) |
| + return ERROR(dictionary_corrupted); |
| + if (litlengthLog > LLFSELog) |
| + return ERROR(dictionary_corrupted); |
| + CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted); |
| + dictPtr += litlengthHeaderSize; |
| + } |
| + |
| + if (dictPtr + 12 > dictEnd) |
| + return ERROR(dictionary_corrupted); |
| + { |
| + int i; |
| + size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12)); |
| + for (i = 0; i < 3; i++) { |
| + U32 const rep = ZSTD_readLE32(dictPtr); |
| + dictPtr += 4; |
| + if (rep == 0 || rep >= dictContentSize) |
| + return ERROR(dictionary_corrupted); |
| + entropy->rep[i] = rep; |
| + } |
| + } |
| + |
| + return dictPtr - (const BYTE *)dict; |
| +} |
| + |
| +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
| +{ |
| + if (dictSize < 8) |
| + return ZSTD_refDictContent(dctx, dict, dictSize); |
| + { |
| + U32 const magic = ZSTD_readLE32(dict); |
| + if (magic != ZSTD_DICT_MAGIC) { |
| + return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ |
| + } |
| + } |
| + dctx->dictID = ZSTD_readLE32((const char *)dict + 4); |
| + |
| + /* load entropy tables */ |
| + { |
| + size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); |
| + if (ZSTD_isError(eSize)) |
| + return ERROR(dictionary_corrupted); |
| + dict = (const char *)dict + eSize; |
| + dictSize -= eSize; |
| + } |
| + dctx->litEntropy = dctx->fseEntropy = 1; |
| + |
| + /* reference dictionary content */ |
| + return ZSTD_refDictContent(dctx, dict, dictSize); |
| +} |
| + |
| +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize) |
| +{ |
| + CHECK_F(ZSTD_decompressBegin(dctx)); |
| + if (dict && dictSize) |
| + CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); |
| + return 0; |
| +} |
| + |
| +/* ====== ZSTD_DDict ====== */ |
| + |
| +struct ZSTD_DDict_s { |
| + void *dictBuffer; |
| + const void *dictContent; |
| + size_t dictSize; |
| + ZSTD_entropyTables_t entropy; |
| + U32 dictID; |
| + U32 entropyPresent; |
| + ZSTD_customMem cMem; |
| +}; /* typedef'd to ZSTD_DDict within "zstd.h" */ |
| + |
| +size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); } |
| + |
| +static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; } |
| + |
| +static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; } |
| + |
| +static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict) |
| +{ |
| + ZSTD_decompressBegin(dstDCtx); /* init */ |
| + if (ddict) { /* support refDDict on NULL */ |
| + dstDCtx->dictID = ddict->dictID; |
| + dstDCtx->base = ddict->dictContent; |
| + dstDCtx->vBase = ddict->dictContent; |
| + dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize; |
| + dstDCtx->previousDstEnd = dstDCtx->dictEnd; |
| + if (ddict->entropyPresent) { |
| + dstDCtx->litEntropy = 1; |
| + dstDCtx->fseEntropy = 1; |
| + dstDCtx->LLTptr = ddict->entropy.LLTable; |
| + dstDCtx->MLTptr = ddict->entropy.MLTable; |
| + dstDCtx->OFTptr = ddict->entropy.OFTable; |
| + dstDCtx->HUFptr = ddict->entropy.hufTable; |
| + dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; |
| + dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; |
| + dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; |
| + } else { |
| + dstDCtx->litEntropy = 0; |
| + dstDCtx->fseEntropy = 0; |
| + } |
| + } |
| +} |
| + |
| +static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict) |
| +{ |
| + ddict->dictID = 0; |
| + ddict->entropyPresent = 0; |
| + if (ddict->dictSize < 8) |
| + return 0; |
| + { |
| + U32 const magic = ZSTD_readLE32(ddict->dictContent); |
| + if (magic != ZSTD_DICT_MAGIC) |
| + return 0; /* pure content mode */ |
| + } |
| + ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4); |
| + |
| + /* load entropy tables */ |
| + CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted); |
| + ddict->entropyPresent = 1; |
| + return 0; |
| +} |
| + |
| +static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) |
| +{ |
| + if (!customMem.customAlloc || !customMem.customFree) |
| + return NULL; |
| + |
| + { |
| + ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem); |
| + if (!ddict) |
| + return NULL; |
| + ddict->cMem = customMem; |
| + |
| + if ((byReference) || (!dict) || (!dictSize)) { |
| + ddict->dictBuffer = NULL; |
| + ddict->dictContent = dict; |
| + } else { |
| + void *const internalBuffer = ZSTD_malloc(dictSize, customMem); |
| + if (!internalBuffer) { |
| + ZSTD_freeDDict(ddict); |
| + return NULL; |
| + } |
| + memcpy(internalBuffer, dict, dictSize); |
| + ddict->dictBuffer = internalBuffer; |
| + ddict->dictContent = internalBuffer; |
| + } |
| + ddict->dictSize = dictSize; |
| + ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ |
| + /* parse dictionary content */ |
| + { |
| + size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); |
| + if (ZSTD_isError(errorCode)) { |
| + ZSTD_freeDDict(ddict); |
| + return NULL; |
| + } |
| + } |
| + |
| + return ddict; |
| + } |
| +} |
| + |
| +/*! ZSTD_initDDict() : |
| +* Create a digested dictionary, to start decompression without startup delay. |
| +* `dict` content is copied inside DDict. |
| +* Consequently, `dict` can be released after `ZSTD_DDict` creation */ |
| +ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); |
| + return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem); |
| +} |
| + |
| +size_t ZSTD_freeDDict(ZSTD_DDict *ddict) |
| +{ |
| + if (ddict == NULL) |
| + return 0; /* support free on NULL */ |
| + { |
| + ZSTD_customMem const cMem = ddict->cMem; |
| + ZSTD_free(ddict->dictBuffer, cMem); |
| + ZSTD_free(ddict, cMem); |
| + return 0; |
| + } |
| +} |
| + |
| +/*! ZSTD_getDictID_fromDict() : |
| + * Provides the dictID stored within dictionary. |
| + * if @return == 0, the dictionary is not conformant with Zstandard specification. |
| + * It can still be loaded, but as a content-only dictionary. */ |
| +unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize) |
| +{ |
| + if (dictSize < 8) |
| + return 0; |
| + if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) |
| + return 0; |
| + return ZSTD_readLE32((const char *)dict + 4); |
| +} |
| + |
| +/*! ZSTD_getDictID_fromDDict() : |
| + * Provides the dictID of the dictionary loaded into `ddict`. |
| + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. |
| + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ |
| +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict) |
| +{ |
| + if (ddict == NULL) |
| + return 0; |
| + return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); |
| +} |
| + |
| +/*! ZSTD_getDictID_fromFrame() : |
| + * Provides the dictID required to decompressed the frame stored within `src`. |
| + * If @return == 0, the dictID could not be decoded. |
| + * This could for one of the following reasons : |
| + * - The frame does not require a dictionary to be decoded (most common case). |
| + * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. |
| + * Note : this use case also happens when using a non-conformant dictionary. |
| + * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). |
| + * - This is not a Zstandard frame. |
| + * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */ |
| +unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize) |
| +{ |
| + ZSTD_frameParams zfp = {0, 0, 0, 0}; |
| + size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize); |
| + if (ZSTD_isError(hError)) |
| + return 0; |
| + return zfp.dictID; |
| +} |
| + |
| +/*! ZSTD_decompress_usingDDict() : |
| +* Decompression using a pre-digested Dictionary |
| +* Use dictionary without significant overhead. */ |
| +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict) |
| +{ |
| + /* pass content and size in case legacy frames are encountered */ |
| + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict); |
| +} |
| + |
| +/*===================================== |
| +* Streaming decompression |
| +*====================================*/ |
| + |
| +typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; |
| + |
| +/* *** Resource management *** */ |
| +struct ZSTD_DStream_s { |
| + ZSTD_DCtx *dctx; |
| + ZSTD_DDict *ddictLocal; |
| + const ZSTD_DDict *ddict; |
| + ZSTD_frameParams fParams; |
| + ZSTD_dStreamStage stage; |
| + char *inBuff; |
| + size_t inBuffSize; |
| + size_t inPos; |
| + size_t maxWindowSize; |
| + char *outBuff; |
| + size_t outBuffSize; |
| + size_t outStart; |
| + size_t outEnd; |
| + size_t blockSize; |
| + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */ |
| + size_t lhSize; |
| + ZSTD_customMem customMem; |
| + void *legacyContext; |
| + U32 previousLegacyVersion; |
| + U32 legacyVersion; |
| + U32 hostageByte; |
| +}; /* typedef'd to ZSTD_DStream within "zstd.h" */ |
| + |
| +size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize) |
| +{ |
| + size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); |
| + size_t const inBuffSize = blockSize; |
| + size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; |
| + return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); |
| +} |
| + |
| +static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem) |
| +{ |
| + ZSTD_DStream *zds; |
| + |
| + if (!customMem.customAlloc || !customMem.customFree) |
| + return NULL; |
| + |
| + zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem); |
| + if (zds == NULL) |
| + return NULL; |
| + memset(zds, 0, sizeof(ZSTD_DStream)); |
| + memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem)); |
| + zds->dctx = ZSTD_createDCtx_advanced(customMem); |
| + if (zds->dctx == NULL) { |
| + ZSTD_freeDStream(zds); |
| + return NULL; |
| + } |
| + zds->stage = zdss_init; |
| + zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; |
| + return zds; |
| +} |
| + |
| +ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); |
| + ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem); |
| + if (!zds) { |
| + return NULL; |
| + } |
| + |
| + zds->maxWindowSize = maxWindowSize; |
| + zds->stage = zdss_loadHeader; |
| + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; |
| + ZSTD_freeDDict(zds->ddictLocal); |
| + zds->ddictLocal = NULL; |
| + zds->ddict = zds->ddictLocal; |
| + zds->legacyVersion = 0; |
| + zds->hostageByte = 0; |
| + |
| + { |
| + size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); |
| + size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; |
| + |
| + zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem); |
| + zds->inBuffSize = blockSize; |
| + zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem); |
| + zds->outBuffSize = neededOutSize; |
| + if (zds->inBuff == NULL || zds->outBuff == NULL) { |
| + ZSTD_freeDStream(zds); |
| + return NULL; |
| + } |
| + } |
| + return zds; |
| +} |
| + |
| +ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize); |
| + if (zds) { |
| + zds->ddict = ddict; |
| + } |
| + return zds; |
| +} |
| + |
| +size_t ZSTD_freeDStream(ZSTD_DStream *zds) |
| +{ |
| + if (zds == NULL) |
| + return 0; /* support free on null */ |
| + { |
| + ZSTD_customMem const cMem = zds->customMem; |
| + ZSTD_freeDCtx(zds->dctx); |
| + zds->dctx = NULL; |
| + ZSTD_freeDDict(zds->ddictLocal); |
| + zds->ddictLocal = NULL; |
| + ZSTD_free(zds->inBuff, cMem); |
| + zds->inBuff = NULL; |
| + ZSTD_free(zds->outBuff, cMem); |
| + zds->outBuff = NULL; |
| + ZSTD_free(zds, cMem); |
| + return 0; |
| + } |
| +} |
| + |
| +/* *** Initialization *** */ |
| + |
| +size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; } |
| +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } |
| + |
| +size_t ZSTD_resetDStream(ZSTD_DStream *zds) |
| +{ |
| + zds->stage = zdss_loadHeader; |
| + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; |
| + zds->legacyVersion = 0; |
| + zds->hostageByte = 0; |
| + return ZSTD_frameHeaderSize_prefix; |
| +} |
| + |
| +/* ***** Decompression ***** */ |
| + |
| +ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize) |
| +{ |
| + size_t const length = MIN(dstCapacity, srcSize); |
| + memcpy(dst, src, length); |
| + return length; |
| +} |
| + |
| +size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input) |
| +{ |
| + const char *const istart = (const char *)(input->src) + input->pos; |
| + const char *const iend = (const char *)(input->src) + input->size; |
| + const char *ip = istart; |
| + char *const ostart = (char *)(output->dst) + output->pos; |
| + char *const oend = (char *)(output->dst) + output->size; |
| + char *op = ostart; |
| + U32 someMoreWork = 1; |
| + |
| + while (someMoreWork) { |
| + switch (zds->stage) { |
| + case zdss_init: |
| + ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ |
| + /* fall-through */ |
| + |
| + case zdss_loadHeader: { |
| + size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); |
| + if (ZSTD_isError(hSize)) |
| + return hSize; |
| + if (hSize != 0) { /* need more input */ |
| + size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ |
| + if (toLoad > (size_t)(iend - ip)) { /* not enough input to load full header */ |
| + memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip); |
| + zds->lhSize += iend - ip; |
| + input->pos = input->size; |
| + return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + |
| + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ |
| + } |
| + memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); |
| + zds->lhSize = hSize; |
| + ip += toLoad; |
| + break; |
| + } |
| + |
| + /* check for single-pass mode opportunity */ |
| + if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ |
| + && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) { |
| + size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart); |
| + if (cSize <= (size_t)(iend - istart)) { |
| + size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict); |
| + if (ZSTD_isError(decompressedSize)) |
| + return decompressedSize; |
| + ip = istart + cSize; |
| + op += decompressedSize; |
| + zds->dctx->expected = 0; |
| + zds->stage = zdss_init; |
| + someMoreWork = 0; |
| + break; |
| + } |
| + } |
| + |
| + /* Consume header */ |
| + ZSTD_refDDict(zds->dctx, zds->ddict); |
| + { |
| + size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ |
| + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); |
| + { |
| + size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); |
| + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size)); |
| + } |
| + } |
| + |
| + zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); |
| + if (zds->fParams.windowSize > zds->maxWindowSize) |
| + return ERROR(frameParameter_windowTooLarge); |
| + |
| + /* Buffers are preallocated, but double check */ |
| + { |
| + size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); |
| + size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; |
| + if (zds->inBuffSize < blockSize) { |
| + return ERROR(GENERIC); |
| + } |
| + if (zds->outBuffSize < neededOutSize) { |
| + return ERROR(GENERIC); |
| + } |
| + zds->blockSize = blockSize; |
| + } |
| + zds->stage = zdss_read; |
| + } |
| + /* pass-through */ |
| + |
| + case zdss_read: { |
| + size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); |
| + if (neededInSize == 0) { /* end of frame */ |
| + zds->stage = zdss_init; |
| + someMoreWork = 0; |
| + break; |
| + } |
| + if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */ |
| + const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); |
| + size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, |
| + (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize); |
| + if (ZSTD_isError(decodedSize)) |
| + return decodedSize; |
| + ip += neededInSize; |
| + if (!decodedSize && !isSkipFrame) |
| + break; /* this was just a header */ |
| + zds->outEnd = zds->outStart + decodedSize; |
| + zds->stage = zdss_flush; |
| + break; |
| + } |
| + if (ip == iend) { |
| + someMoreWork = 0; |
| + break; |
| + } /* no more input */ |
| + zds->stage = zdss_load; |
| + /* pass-through */ |
| + } |
| + |
| + case zdss_load: { |
| + size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); |
| + size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ |
| + size_t loadedSize; |
| + if (toLoad > zds->inBuffSize - zds->inPos) |
| + return ERROR(corruption_detected); /* should never happen */ |
| + loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip); |
| + ip += loadedSize; |
| + zds->inPos += loadedSize; |
| + if (loadedSize < toLoad) { |
| + someMoreWork = 0; |
| + break; |
| + } /* not enough input, wait for more */ |
| + |
| + /* decode loaded input */ |
| + { |
| + const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); |
| + size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, |
| + zds->inBuff, neededInSize); |
| + if (ZSTD_isError(decodedSize)) |
| + return decodedSize; |
| + zds->inPos = 0; /* input is consumed */ |
| + if (!decodedSize && !isSkipFrame) { |
| + zds->stage = zdss_read; |
| + break; |
| + } /* this was just a header */ |
| + zds->outEnd = zds->outStart + decodedSize; |
| + zds->stage = zdss_flush; |
| + /* pass-through */ |
| + } |
| + } |
| + |
| + case zdss_flush: { |
| + size_t const toFlushSize = zds->outEnd - zds->outStart; |
| + size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize); |
| + op += flushedSize; |
| + zds->outStart += flushedSize; |
| + if (flushedSize == toFlushSize) { /* flush completed */ |
| + zds->stage = zdss_read; |
| + if (zds->outStart + zds->blockSize > zds->outBuffSize) |
| + zds->outStart = zds->outEnd = 0; |
| + break; |
| + } |
| + /* cannot complete flush */ |
| + someMoreWork = 0; |
| + break; |
| + } |
| + default: |
| + return ERROR(GENERIC); /* impossible */ |
| + } |
| + } |
| + |
| + /* result */ |
| + input->pos += (size_t)(ip - istart); |
| + output->pos += (size_t)(op - ostart); |
| + { |
| + size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); |
| + if (!nextSrcSizeHint) { /* frame fully decoded */ |
| + if (zds->outEnd == zds->outStart) { /* output fully flushed */ |
| + if (zds->hostageByte) { |
| + if (input->pos >= input->size) { |
| + zds->stage = zdss_read; |
| + return 1; |
| + } /* can't release hostage (not present) */ |
| + input->pos++; /* release hostage */ |
| + } |
| + return 0; |
| + } |
| + if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ |
| + input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ |
| + zds->hostageByte = 1; |
| + } |
| + return 1; |
| + } |
| + nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */ |
| + if (zds->inPos > nextSrcSizeHint) |
| + return ERROR(GENERIC); /* should never happen */ |
| + nextSrcSizeHint -= zds->inPos; /* already loaded*/ |
| + return nextSrcSizeHint; |
| + } |
| +} |
| + |
| +EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound); |
| +EXPORT_SYMBOL(ZSTD_initDCtx); |
| +EXPORT_SYMBOL(ZSTD_decompressDCtx); |
| +EXPORT_SYMBOL(ZSTD_decompress_usingDict); |
| + |
| +EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound); |
| +EXPORT_SYMBOL(ZSTD_initDDict); |
| +EXPORT_SYMBOL(ZSTD_decompress_usingDDict); |
| + |
| +EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound); |
| +EXPORT_SYMBOL(ZSTD_initDStream); |
| +EXPORT_SYMBOL(ZSTD_initDStream_usingDDict); |
| +EXPORT_SYMBOL(ZSTD_resetDStream); |
| +EXPORT_SYMBOL(ZSTD_decompressStream); |
| +EXPORT_SYMBOL(ZSTD_DStreamInSize); |
| +EXPORT_SYMBOL(ZSTD_DStreamOutSize); |
| + |
| +EXPORT_SYMBOL(ZSTD_findFrameCompressedSize); |
| +EXPORT_SYMBOL(ZSTD_getFrameContentSize); |
| +EXPORT_SYMBOL(ZSTD_findDecompressedSize); |
| + |
| +EXPORT_SYMBOL(ZSTD_isFrame); |
| +EXPORT_SYMBOL(ZSTD_getDictID_fromDict); |
| +EXPORT_SYMBOL(ZSTD_getDictID_fromDDict); |
| +EXPORT_SYMBOL(ZSTD_getDictID_fromFrame); |
| + |
| +EXPORT_SYMBOL(ZSTD_getFrameParams); |
| +EXPORT_SYMBOL(ZSTD_decompressBegin); |
| +EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict); |
| +EXPORT_SYMBOL(ZSTD_copyDCtx); |
| +EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress); |
| +EXPORT_SYMBOL(ZSTD_decompressContinue); |
| +EXPORT_SYMBOL(ZSTD_nextInputType); |
| + |
| +EXPORT_SYMBOL(ZSTD_decompressBlock); |
| +EXPORT_SYMBOL(ZSTD_insertBlock); |
| + |
| +MODULE_LICENSE("Dual BSD/GPL"); |
| +MODULE_DESCRIPTION("Zstd Decompressor"); |
| diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c |
| new file mode 100644 |
| index 0000000..2b0a643 |
| --- /dev/null |
| +++ b/lib/zstd/entropy_common.c |
| @@ -0,0 +1,243 @@ |
| +/* |
| + * Common functions of New Generation Entropy library |
| + * Copyright (C) 2016, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| + |
| +/* ************************************* |
| +* Dependencies |
| +***************************************/ |
| +#include "error_private.h" /* ERR_*, ERROR */ |
| +#include "fse.h" |
| +#include "huf.h" |
| +#include "mem.h" |
| + |
| +/*=== Version ===*/ |
| +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } |
| + |
| +/*=== Error Management ===*/ |
| +unsigned FSE_isError(size_t code) { return ERR_isError(code); } |
| + |
| +unsigned HUF_isError(size_t code) { return ERR_isError(code); } |
| + |
| +/*-************************************************************** |
| +* FSE NCount encoding-decoding |
| +****************************************************************/ |
| +size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize) |
| +{ |
| + const BYTE *const istart = (const BYTE *)headerBuffer; |
| + const BYTE *const iend = istart + hbSize; |
| + const BYTE *ip = istart; |
| + int nbBits; |
| + int remaining; |
| + int threshold; |
| + U32 bitStream; |
| + int bitCount; |
| + unsigned charnum = 0; |
| + int previous0 = 0; |
| + |
| + if (hbSize < 4) |
| + return ERROR(srcSize_wrong); |
| + bitStream = ZSTD_readLE32(ip); |
| + nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ |
| + if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) |
| + return ERROR(tableLog_tooLarge); |
| + bitStream >>= 4; |
| + bitCount = 4; |
| + *tableLogPtr = nbBits; |
| + remaining = (1 << nbBits) + 1; |
| + threshold = 1 << nbBits; |
| + nbBits++; |
| + |
| + while ((remaining > 1) & (charnum <= *maxSVPtr)) { |
| + if (previous0) { |
| + unsigned n0 = charnum; |
| + while ((bitStream & 0xFFFF) == 0xFFFF) { |
| + n0 += 24; |
| + if (ip < iend - 5) { |
| + ip += 2; |
| + bitStream = ZSTD_readLE32(ip) >> bitCount; |
| + } else { |
| + bitStream >>= 16; |
| + bitCount += 16; |
| + } |
| + } |
| + while ((bitStream & 3) == 3) { |
| + n0 += 3; |
| + bitStream >>= 2; |
| + bitCount += 2; |
| + } |
| + n0 += bitStream & 3; |
| + bitCount += 2; |
| + if (n0 > *maxSVPtr) |
| + return ERROR(maxSymbolValue_tooSmall); |
| + while (charnum < n0) |
| + normalizedCounter[charnum++] = 0; |
| + if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) { |
| + ip += bitCount >> 3; |
| + bitCount &= 7; |
| + bitStream = ZSTD_readLE32(ip) >> bitCount; |
| + } else { |
| + bitStream >>= 2; |
| + } |
| + } |
| + { |
| + int const max = (2 * threshold - 1) - remaining; |
| + int count; |
| + |
| + if ((bitStream & (threshold - 1)) < (U32)max) { |
| + count = bitStream & (threshold - 1); |
| + bitCount += nbBits - 1; |
| + } else { |
| + count = bitStream & (2 * threshold - 1); |
| + if (count >= threshold) |
| + count -= max; |
| + bitCount += nbBits; |
| + } |
| + |
| + count--; /* extra accuracy */ |
| + remaining -= count < 0 ? -count : count; /* -1 means +1 */ |
| + normalizedCounter[charnum++] = (short)count; |
| + previous0 = !count; |
| + while (remaining < threshold) { |
| + nbBits--; |
| + threshold >>= 1; |
| + } |
| + |
| + if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) { |
| + ip += bitCount >> 3; |
| + bitCount &= 7; |
| + } else { |
| + bitCount -= (int)(8 * (iend - 4 - ip)); |
| + ip = iend - 4; |
| + } |
| + bitStream = ZSTD_readLE32(ip) >> (bitCount & 31); |
| + } |
| + } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ |
| + if (remaining != 1) |
| + return ERROR(corruption_detected); |
| + if (bitCount > 32) |
| + return ERROR(corruption_detected); |
| + *maxSVPtr = charnum - 1; |
| + |
| + ip += (bitCount + 7) >> 3; |
| + return ip - istart; |
| +} |
| + |
| +/*! HUF_readStats() : |
| + Read compact Huffman tree, saved by HUF_writeCTable(). |
| + `huffWeight` is destination buffer. |
| + `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. |
| + @return : size read from `src` , or an error Code . |
| + Note : Needed by HUF_readCTable() and HUF_readDTableX?() . |
| +*/ |
| +size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + U32 weightTotal; |
| + const BYTE *ip = (const BYTE *)src; |
| + size_t iSize; |
| + size_t oSize; |
| + |
| + if (!srcSize) |
| + return ERROR(srcSize_wrong); |
| + iSize = ip[0]; |
| + /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */ |
| + |
| + if (iSize >= 128) { /* special header */ |
| + oSize = iSize - 127; |
| + iSize = ((oSize + 1) / 2); |
| + if (iSize + 1 > srcSize) |
| + return ERROR(srcSize_wrong); |
| + if (oSize >= hwSize) |
| + return ERROR(corruption_detected); |
| + ip += 1; |
| + { |
| + U32 n; |
| + for (n = 0; n < oSize; n += 2) { |
| + huffWeight[n] = ip[n / 2] >> 4; |
| + huffWeight[n + 1] = ip[n / 2] & 15; |
| + } |
| + } |
| + } else { /* header compressed with FSE (normal case) */ |
| + if (iSize + 1 > srcSize) |
| + return ERROR(srcSize_wrong); |
| + oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */ |
| + if (FSE_isError(oSize)) |
| + return oSize; |
| + } |
| + |
| + /* collect weight stats */ |
| + memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); |
| + weightTotal = 0; |
| + { |
| + U32 n; |
| + for (n = 0; n < oSize; n++) { |
| + if (huffWeight[n] >= HUF_TABLELOG_MAX) |
| + return ERROR(corruption_detected); |
| + rankStats[huffWeight[n]]++; |
| + weightTotal += (1 << huffWeight[n]) >> 1; |
| + } |
| + } |
| + if (weightTotal == 0) |
| + return ERROR(corruption_detected); |
| + |
| + /* get last non-null symbol weight (implied, total must be 2^n) */ |
| + { |
| + U32 const tableLog = BIT_highbit32(weightTotal) + 1; |
| + if (tableLog > HUF_TABLELOG_MAX) |
| + return ERROR(corruption_detected); |
| + *tableLogPtr = tableLog; |
| + /* determine last weight */ |
| + { |
| + U32 const total = 1 << tableLog; |
| + U32 const rest = total - weightTotal; |
| + U32 const verif = 1 << BIT_highbit32(rest); |
| + U32 const lastWeight = BIT_highbit32(rest) + 1; |
| + if (verif != rest) |
| + return ERROR(corruption_detected); /* last value must be a clean power of 2 */ |
| + huffWeight[oSize] = (BYTE)lastWeight; |
| + rankStats[lastWeight]++; |
| + } |
| + } |
| + |
| + /* check tree construction validity */ |
| + if ((rankStats[1] < 2) || (rankStats[1] & 1)) |
| + return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ |
| + |
| + /* results */ |
| + *nbSymbolsPtr = (U32)(oSize + 1); |
| + return iSize + 1; |
| +} |
| diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h |
| new file mode 100644 |
| index 0000000..1a60b31 |
| --- /dev/null |
| +++ b/lib/zstd/error_private.h |
| @@ -0,0 +1,53 @@ |
| +/** |
| + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +/* Note : this module is expected to remain private, do not expose it */ |
| + |
| +#ifndef ERROR_H_MODULE |
| +#define ERROR_H_MODULE |
| + |
| +/* **************************************** |
| +* Dependencies |
| +******************************************/ |
| +#include <linux/types.h> /* size_t */ |
| +#include <linux/zstd.h> /* enum list */ |
| + |
| +/* **************************************** |
| +* Compiler-specific |
| +******************************************/ |
| +#define ERR_STATIC static __attribute__((unused)) |
| + |
| +/*-**************************************** |
| +* Customization (error_public.h) |
| +******************************************/ |
| +typedef ZSTD_ErrorCode ERR_enum; |
| +#define PREFIX(name) ZSTD_error_##name |
| + |
| +/*-**************************************** |
| +* Error codes handling |
| +******************************************/ |
| +#define ERROR(name) ((size_t)-PREFIX(name)) |
| + |
| +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } |
| + |
| +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) |
| +{ |
| + if (!ERR_isError(code)) |
| + return (ERR_enum)0; |
| + return (ERR_enum)(0 - code); |
| +} |
| + |
| +#endif /* ERROR_H_MODULE */ |
| diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h |
| new file mode 100644 |
| index 0000000..7460ab0 |
| --- /dev/null |
| +++ b/lib/zstd/fse.h |
| @@ -0,0 +1,575 @@ |
| +/* |
| + * FSE : Finite State Entropy codec |
| + * Public Prototypes declaration |
| + * Copyright (C) 2013-2016, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| +#ifndef FSE_H |
| +#define FSE_H |
| + |
| +/*-***************************************** |
| +* Dependencies |
| +******************************************/ |
| +#include <linux/types.h> /* size_t, ptrdiff_t */ |
| + |
| +/*-***************************************** |
| +* FSE_PUBLIC_API : control library symbols visibility |
| +******************************************/ |
| +#define FSE_PUBLIC_API |
| + |
| +/*------ Version ------*/ |
| +#define FSE_VERSION_MAJOR 0 |
| +#define FSE_VERSION_MINOR 9 |
| +#define FSE_VERSION_RELEASE 0 |
| + |
| +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE |
| +#define FSE_QUOTE(str) #str |
| +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) |
| +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) |
| + |
| +#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE) |
| +FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ |
| + |
| +/*-***************************************** |
| +* Tool functions |
| +******************************************/ |
| +FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ |
| + |
| +/* Error Management */ |
| +FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ |
| + |
| +/*-***************************************** |
| +* FSE detailed API |
| +******************************************/ |
| +/*! |
| +FSE_compress() does the following: |
| +1. count symbol occurrence from source[] into table count[] |
| +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) |
| +3. save normalized counters to memory buffer using writeNCount() |
| +4. build encoding table 'CTable' from normalized counters |
| +5. encode the data stream using encoding table 'CTable' |
| + |
| +FSE_decompress() does the following: |
| +1. read normalized counters with readNCount() |
| +2. build decoding table 'DTable' from normalized counters |
| +3. decode the data stream using decoding table 'DTable' |
| + |
| +The following API allows targeting specific sub-functions for advanced tasks. |
| +For example, it's possible to compress several blocks using the same 'CTable', |
| +or to save and provide normalized distribution using external method. |
| +*/ |
| + |
| +/* *** COMPRESSION *** */ |
| +/*! FSE_optimalTableLog(): |
| + dynamically downsize 'tableLog' when conditions are met. |
| + It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. |
| + @return : recommended tableLog (necessarily <= 'maxTableLog') */ |
| +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); |
| + |
| +/*! FSE_normalizeCount(): |
| + normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) |
| + 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). |
| + @return : tableLog, |
| + or an errorCode, which can be tested using FSE_isError() */ |
| +FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue); |
| + |
| +/*! FSE_NCountWriteBound(): |
| + Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. |
| + Typically useful for allocation purpose. */ |
| +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); |
| + |
| +/*! FSE_writeNCount(): |
| + Compactly save 'normalizedCounter' into 'buffer'. |
| + @return : size of the compressed table, |
| + or an errorCode, which can be tested using FSE_isError(). */ |
| +FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); |
| + |
| +/*! Constructor and Destructor of FSE_CTable. |
| + Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ |
| +typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ |
| + |
| +/*! FSE_compress_usingCTable(): |
| + Compress `src` using `ct` into `dst` which must be already allocated. |
| + @return : size of compressed data (<= `dstCapacity`), |
| + or 0 if compressed data could not fit into `dst`, |
| + or an errorCode, which can be tested using FSE_isError() */ |
| +FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct); |
| + |
| +/*! |
| +Tutorial : |
| +---------- |
| +The first step is to count all symbols. FSE_count() does this job very fast. |
| +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. |
| +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] |
| +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) |
| +FSE_count() will return the number of occurrence of the most frequent symbol. |
| +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. |
| +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). |
| + |
| +The next step is to normalize the frequencies. |
| +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. |
| +It also guarantees a minimum of 1 to any Symbol with frequency >= 1. |
| +You can use 'tableLog'==0 to mean "use default tableLog value". |
| +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), |
| +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). |
| + |
| +The result of FSE_normalizeCount() will be saved into a table, |
| +called 'normalizedCounter', which is a table of signed short. |
| +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. |
| +The return value is tableLog if everything proceeded as expected. |
| +It is 0 if there is a single symbol within distribution. |
| +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). |
| + |
| +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). |
| +'buffer' must be already allocated. |
| +For guaranteed success, buffer size must be at least FSE_headerBound(). |
| +The result of the function is the number of bytes written into 'buffer'. |
| +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). |
| + |
| +'normalizedCounter' can then be used to create the compression table 'CTable'. |
| +The space required by 'CTable' must be already allocated, using FSE_createCTable(). |
| +You can then use FSE_buildCTable() to fill 'CTable'. |
| +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). |
| + |
| +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). |
| +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' |
| +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. |
| +If it returns '0', compressed data could not fit into 'dst'. |
| +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). |
| +*/ |
| + |
| +/* *** DECOMPRESSION *** */ |
| + |
| +/*! FSE_readNCount(): |
| + Read compactly saved 'normalizedCounter' from 'rBuffer'. |
| + @return : size read from 'rBuffer', |
| + or an errorCode, which can be tested using FSE_isError(). |
| + maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ |
| +FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize); |
| + |
| +/*! Constructor and Destructor of FSE_DTable. |
| + Note that its size depends on 'tableLog' */ |
| +typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ |
| + |
| +/*! FSE_buildDTable(): |
| + Builds 'dt', which must be already allocated, using FSE_createDTable(). |
| + return : 0, or an errorCode, which can be tested using FSE_isError() */ |
| +FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize); |
| + |
| +/*! FSE_decompress_usingDTable(): |
| + Decompress compressed source `cSrc` of size `cSrcSize` using `dt` |
| + into `dst` which must be already allocated. |
| + @return : size of regenerated data (necessarily <= `dstCapacity`), |
| + or an errorCode, which can be tested using FSE_isError() */ |
| +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt); |
| + |
| +/*! |
| +Tutorial : |
| +---------- |
| +(Note : these functions only decompress FSE-compressed blocks. |
| + If block is uncompressed, use memcpy() instead |
| + If block is a single repeated byte, use memset() instead ) |
| + |
| +The first step is to obtain the normalized frequencies of symbols. |
| +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). |
| +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. |
| +In practice, that means it's necessary to know 'maxSymbolValue' beforehand, |
| +or size the table to handle worst case situations (typically 256). |
| +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. |
| +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. |
| +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. |
| +If there is an error, the function will return an error code, which can be tested using FSE_isError(). |
| + |
| +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. |
| +This is performed by the function FSE_buildDTable(). |
| +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). |
| +If there is an error, the function will return an error code, which can be tested using FSE_isError(). |
| + |
| +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). |
| +`cSrcSize` must be strictly correct, otherwise decompression will fail. |
| +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). |
| +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) |
| +*/ |
| + |
| +/* *** Dependency *** */ |
| +#include "bitstream.h" |
| + |
| +/* ***************************************** |
| +* Static allocation |
| +*******************************************/ |
| +/* FSE buffer bounds */ |
| +#define FSE_NCOUNTBOUND 512 |
| +#define FSE_BLOCKBOUND(size) (size + (size >> 7)) |
| +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ |
| + |
| +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ |
| +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2)) |
| +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog)) |
| + |
| +/* ***************************************** |
| +* FSE advanced API |
| +*******************************************/ |
| +/* FSE_count_wksp() : |
| + * Same as FSE_count(), but using an externally provided scratch buffer. |
| + * `workSpace` size must be table of >= `1024` unsigned |
| + */ |
| +size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace); |
| + |
| +/* FSE_countFast_wksp() : |
| + * Same as FSE_countFast(), but using an externally provided scratch buffer. |
| + * `workSpace` must be a table of minimum `1024` unsigned |
| + */ |
| +size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace); |
| + |
| +/*! FSE_count_simple |
| + * Same as FSE_countFast(), but does not use any additional memory (not even on stack). |
| + * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). |
| +*/ |
| +size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize); |
| + |
| +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); |
| +/**< same as FSE_optimalTableLog(), which used `minus==2` */ |
| + |
| +size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits); |
| +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ |
| + |
| +size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue); |
| +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ |
| + |
| +/* FSE_buildCTable_wksp() : |
| + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). |
| + * `wkspSize` must be >= `(1<<tableLog)`. |
| + */ |
| +size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize); |
| + |
| +size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits); |
| +/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */ |
| + |
| +size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue); |
| +/**< build a fake FSE_DTable, designed to always generate the same symbolValue */ |
| + |
| +size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize); |
| +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */ |
| + |
| +/* ***************************************** |
| +* FSE symbol compression API |
| +*******************************************/ |
| +/*! |
| + This API consists of small unitary functions, which highly benefit from being inlined. |
| + Hence their body are included in next section. |
| +*/ |
| +typedef struct { |
| + ptrdiff_t value; |
| + const void *stateTable; |
| + const void *symbolTT; |
| + unsigned stateLog; |
| +} FSE_CState_t; |
| + |
| +static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct); |
| + |
| +static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol); |
| + |
| +static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr); |
| + |
| +/**< |
| +These functions are inner components of FSE_compress_usingCTable(). |
| +They allow the creation of custom streams, mixing multiple tables and bit sources. |
| + |
| +A key property to keep in mind is that encoding and decoding are done **in reverse direction**. |
| +So the first symbol you will encode is the last you will decode, like a LIFO stack. |
| + |
| +You will need a few variables to track your CStream. They are : |
| + |
| +FSE_CTable ct; // Provided by FSE_buildCTable() |
| +BIT_CStream_t bitStream; // bitStream tracking structure |
| +FSE_CState_t state; // State tracking structure (can have several) |
| + |
| + |
| +The first thing to do is to init bitStream and state. |
| + size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize); |
| + FSE_initCState(&state, ct); |
| + |
| +Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError(); |
| +You can then encode your input data, byte after byte. |
| +FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time. |
| +Remember decoding will be done in reverse direction. |
| + FSE_encodeByte(&bitStream, &state, symbol); |
| + |
| +At any time, you can also add any bit sequence. |
| +Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders |
| + BIT_addBits(&bitStream, bitField, nbBits); |
| + |
| +The above methods don't commit data to memory, they just store it into local register, for speed. |
| +Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). |
| +Writing data to memory is a manual operation, performed by the flushBits function. |
| + BIT_flushBits(&bitStream); |
| + |
| +Your last FSE encoding operation shall be to flush your last state value(s). |
| + FSE_flushState(&bitStream, &state); |
| + |
| +Finally, you must close the bitStream. |
| +The function returns the size of CStream in bytes. |
| +If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible) |
| +If there is an error, it returns an errorCode (which can be tested using FSE_isError()). |
| + size_t size = BIT_closeCStream(&bitStream); |
| +*/ |
| + |
| +/* ***************************************** |
| +* FSE symbol decompression API |
| +*******************************************/ |
| +typedef struct { |
| + size_t state; |
| + const void *table; /* precise table may vary, depending on U16 */ |
| +} FSE_DState_t; |
| + |
| +static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt); |
| + |
| +static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD); |
| + |
| +static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr); |
| + |
| +/**< |
| +Let's now decompose FSE_decompress_usingDTable() into its unitary components. |
| +You will decode FSE-encoded symbols from the bitStream, |
| +and also any other bitFields you put in, **in reverse order**. |
| + |
| +You will need a few variables to track your bitStream. They are : |
| + |
| +BIT_DStream_t DStream; // Stream context |
| +FSE_DState_t DState; // State context. Multiple ones are possible |
| +FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable() |
| + |
| +The first thing to do is to init the bitStream. |
| + errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize); |
| + |
| +You should then retrieve your initial state(s) |
| +(in reverse flushing order if you have several ones) : |
| + errorCode = FSE_initDState(&DState, &DStream, DTablePtr); |
| + |
| +You can then decode your data, symbol after symbol. |
| +For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'. |
| +Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out). |
| + unsigned char symbol = FSE_decodeSymbol(&DState, &DStream); |
| + |
| +You can retrieve any bitfield you eventually stored into the bitStream (in reverse order) |
| +Note : maximum allowed nbBits is 25, for 32-bits compatibility |
| + size_t bitField = BIT_readBits(&DStream, nbBits); |
| + |
| +All above operations only read from local register (which size depends on size_t). |
| +Refueling the register from memory is manually performed by the reload method. |
| + endSignal = FSE_reloadDStream(&DStream); |
| + |
| +BIT_reloadDStream() result tells if there is still some more data to read from DStream. |
| +BIT_DStream_unfinished : there is still some data left into the DStream. |
| +BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled. |
| +BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed. |
| +BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted. |
| + |
| +When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop, |
| +to properly detect the exact end of stream. |
| +After each decoded symbol, check if DStream is fully consumed using this simple test : |
| + BIT_reloadDStream(&DStream) >= BIT_DStream_completed |
| + |
| +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. |
| +Checking if DStream has reached its end is performed by : |
| + BIT_endOfDStream(&DStream); |
| +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. |
| + FSE_endOfDState(&DState); |
| +*/ |
| + |
| +/* ***************************************** |
| +* FSE unsafe API |
| +*******************************************/ |
| +static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD); |
| +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ |
| + |
| +/* ***************************************** |
| +* Implementation of inlined functions |
| +*******************************************/ |
| +typedef struct { |
| + int deltaFindState; |
| + U32 deltaNbBits; |
| +} FSE_symbolCompressionTransform; /* total 8 bytes */ |
| + |
| +ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct) |
| +{ |
| + const void *ptr = ct; |
| + const U16 *u16ptr = (const U16 *)ptr; |
| + const U32 tableLog = ZSTD_read16(ptr); |
| + statePtr->value = (ptrdiff_t)1 << tableLog; |
| + statePtr->stateTable = u16ptr + 2; |
| + statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1)); |
| + statePtr->stateLog = tableLog; |
| +} |
| + |
| +/*! FSE_initCState2() : |
| +* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) |
| +* uses the smallest state value possible, saving the cost of this symbol */ |
| +ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol) |
| +{ |
| + FSE_initCState(statePtr, ct); |
| + { |
| + const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol]; |
| + const U16 *stateTable = (const U16 *)(statePtr->stateTable); |
| + U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16); |
| + statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; |
| + statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; |
| + } |
| +} |
| + |
| +ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol) |
| +{ |
| + const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol]; |
| + const U16 *const stateTable = (const U16 *)(statePtr->stateTable); |
| + U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); |
| + BIT_addBits(bitC, statePtr->value, nbBitsOut); |
| + statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; |
| +} |
| + |
| +ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr) |
| +{ |
| + BIT_addBits(bitC, statePtr->value, statePtr->stateLog); |
| + BIT_flushBits(bitC); |
| +} |
| + |
| +/* ====== Decompression ====== */ |
| + |
| +typedef struct { |
| + U16 tableLog; |
| + U16 fastMode; |
| +} FSE_DTableHeader; /* sizeof U32 */ |
| + |
| +typedef struct { |
| + unsigned short newState; |
| + unsigned char symbol; |
| + unsigned char nbBits; |
| +} FSE_decode_t; /* size == U32 */ |
| + |
| +ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt) |
| +{ |
| + const void *ptr = dt; |
| + const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr; |
| + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); |
| + BIT_reloadDStream(bitD); |
| + DStatePtr->table = dt + 1; |
| +} |
| + |
| +ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr) |
| +{ |
| + FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; |
| + return DInfo.symbol; |
| +} |
| + |
| +ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) |
| +{ |
| + FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; |
| + U32 const nbBits = DInfo.nbBits; |
| + size_t const lowBits = BIT_readBits(bitD, nbBits); |
| + DStatePtr->state = DInfo.newState + lowBits; |
| +} |
| + |
| +ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) |
| +{ |
| + FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; |
| + U32 const nbBits = DInfo.nbBits; |
| + BYTE const symbol = DInfo.symbol; |
| + size_t const lowBits = BIT_readBits(bitD, nbBits); |
| + |
| + DStatePtr->state = DInfo.newState + lowBits; |
| + return symbol; |
| +} |
| + |
| +/*! FSE_decodeSymbolFast() : |
| + unsafe, only works if no symbol has a probability > 50% */ |
| +ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD) |
| +{ |
| + FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; |
| + U32 const nbBits = DInfo.nbBits; |
| + BYTE const symbol = DInfo.symbol; |
| + size_t const lowBits = BIT_readBitsFast(bitD, nbBits); |
| + |
| + DStatePtr->state = DInfo.newState + lowBits; |
| + return symbol; |
| +} |
| + |
| +ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; } |
| + |
| +/* ************************************************************** |
| +* Tuning parameters |
| +****************************************************************/ |
| +/*!MEMORY_USAGE : |
| +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) |
| +* Increasing memory usage improves compression ratio |
| +* Reduced memory usage can improve speed, due to cache effect |
| +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ |
| +#ifndef FSE_MAX_MEMORY_USAGE |
| +#define FSE_MAX_MEMORY_USAGE 14 |
| +#endif |
| +#ifndef FSE_DEFAULT_MEMORY_USAGE |
| +#define FSE_DEFAULT_MEMORY_USAGE 13 |
| +#endif |
| + |
| +/*!FSE_MAX_SYMBOL_VALUE : |
| +* Maximum symbol value authorized. |
| +* Required for proper stack allocation */ |
| +#ifndef FSE_MAX_SYMBOL_VALUE |
| +#define FSE_MAX_SYMBOL_VALUE 255 |
| +#endif |
| + |
| +/* ************************************************************** |
| +* template functions type & suffix |
| +****************************************************************/ |
| +#define FSE_FUNCTION_TYPE BYTE |
| +#define FSE_FUNCTION_EXTENSION |
| +#define FSE_DECODE_TYPE FSE_decode_t |
| + |
| +/* *************************************************************** |
| +* Constants |
| +*****************************************************************/ |
| +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2) |
| +#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG) |
| +#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1) |
| +#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2) |
| +#define FSE_MIN_TABLELOG 5 |
| + |
| +#define FSE_TABLELOG_ABSOLUTE_MAX 15 |
| +#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX |
| +#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" |
| +#endif |
| + |
| +#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3) |
| + |
| +#endif /* FSE_H */ |
| diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c |
| new file mode 100644 |
| index 0000000..ef3d174 |
| --- /dev/null |
| +++ b/lib/zstd/fse_compress.c |
| @@ -0,0 +1,795 @@ |
| +/* |
| + * FSE : Finite State Entropy encoder |
| + * Copyright (C) 2013-2015, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| + |
| +/* ************************************************************** |
| +* Compiler specifics |
| +****************************************************************/ |
| +#define FORCE_INLINE static __always_inline |
| + |
| +/* ************************************************************** |
| +* Includes |
| +****************************************************************/ |
| +#include "bitstream.h" |
| +#include "fse.h" |
| +#include <linux/compiler.h> |
| +#include <linux/kernel.h> |
| +#include <linux/math64.h> |
| +#include <linux/string.h> /* memcpy, memset */ |
| + |
| +/* ************************************************************** |
| +* Error Management |
| +****************************************************************/ |
| +#define FSE_STATIC_ASSERT(c) \ |
| + { \ |
| + enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ |
| + } /* use only *after* variable declarations */ |
| + |
| +/* ************************************************************** |
| +* Templates |
| +****************************************************************/ |
| +/* |
| + designed to be included |
| + for type-specific functions (template emulation in C) |
| + Objective is to write these functions only once, for improved maintenance |
| +*/ |
| + |
| +/* safety checks */ |
| +#ifndef FSE_FUNCTION_EXTENSION |
| +#error "FSE_FUNCTION_EXTENSION must be defined" |
| +#endif |
| +#ifndef FSE_FUNCTION_TYPE |
| +#error "FSE_FUNCTION_TYPE must be defined" |
| +#endif |
| + |
| +/* Function names */ |
| +#define FSE_CAT(X, Y) X##Y |
| +#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y) |
| +#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y) |
| + |
| +/* Function templates */ |
| + |
| +/* FSE_buildCTable_wksp() : |
| + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). |
| + * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)` |
| + * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements |
| + */ |
| +size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize) |
| +{ |
| + U32 const tableSize = 1 << tableLog; |
| + U32 const tableMask = tableSize - 1; |
| + void *const ptr = ct; |
| + U16 *const tableU16 = ((U16 *)ptr) + 2; |
| + void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1); |
| + FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT); |
| + U32 const step = FSE_TABLESTEP(tableSize); |
| + U32 highThreshold = tableSize - 1; |
| + |
| + U32 *cumul; |
| + FSE_FUNCTION_TYPE *tableSymbol; |
| + size_t spaceUsed32 = 0; |
| + |
| + cumul = (U32 *)workspace + spaceUsed32; |
| + spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2; |
| + tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(tableLog_tooLarge); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + |
| + /* CTable header */ |
| + tableU16[-2] = (U16)tableLog; |
| + tableU16[-1] = (U16)maxSymbolValue; |
| + |
| + /* For explanations on how to distribute symbol values over the table : |
| + * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ |
| + |
| + /* symbol start positions */ |
| + { |
| + U32 u; |
| + cumul[0] = 0; |
| + for (u = 1; u <= maxSymbolValue + 1; u++) { |
| + if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */ |
| + cumul[u] = cumul[u - 1] + 1; |
| + tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1); |
| + } else { |
| + cumul[u] = cumul[u - 1] + normalizedCounter[u - 1]; |
| + } |
| + } |
| + cumul[maxSymbolValue + 1] = tableSize + 1; |
| + } |
| + |
| + /* Spread symbols */ |
| + { |
| + U32 position = 0; |
| + U32 symbol; |
| + for (symbol = 0; symbol <= maxSymbolValue; symbol++) { |
| + int nbOccurences; |
| + for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) { |
| + tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol; |
| + position = (position + step) & tableMask; |
| + while (position > highThreshold) |
| + position = (position + step) & tableMask; /* Low proba area */ |
| + } |
| + } |
| + |
| + if (position != 0) |
| + return ERROR(GENERIC); /* Must have gone through all positions */ |
| + } |
| + |
| + /* Build table */ |
| + { |
| + U32 u; |
| + for (u = 0; u < tableSize; u++) { |
| + FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */ |
| + tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */ |
| + } |
| + } |
| + |
| + /* Build Symbol Transformation Table */ |
| + { |
| + unsigned total = 0; |
| + unsigned s; |
| + for (s = 0; s <= maxSymbolValue; s++) { |
| + switch (normalizedCounter[s]) { |
| + case 0: break; |
| + |
| + case -1: |
| + case 1: |
| + symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog); |
| + symbolTT[s].deltaFindState = total - 1; |
| + total++; |
| + break; |
| + default: { |
| + U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1); |
| + U32 const minStatePlus = normalizedCounter[s] << maxBitsOut; |
| + symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; |
| + symbolTT[s].deltaFindState = total - normalizedCounter[s]; |
| + total += normalizedCounter[s]; |
| + } |
| + } |
| + } |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/*-************************************************************** |
| +* FSE NCount encoding-decoding |
| +****************************************************************/ |
| +size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) |
| +{ |
| + size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3; |
| + return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ |
| +} |
| + |
| +static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, |
| + unsigned writeIsSafe) |
| +{ |
| + BYTE *const ostart = (BYTE *)header; |
| + BYTE *out = ostart; |
| + BYTE *const oend = ostart + headerBufferSize; |
| + int nbBits; |
| + const int tableSize = 1 << tableLog; |
| + int remaining; |
| + int threshold; |
| + U32 bitStream; |
| + int bitCount; |
| + unsigned charnum = 0; |
| + int previous0 = 0; |
| + |
| + bitStream = 0; |
| + bitCount = 0; |
| + /* Table Size */ |
| + bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount; |
| + bitCount += 4; |
| + |
| + /* Init */ |
| + remaining = tableSize + 1; /* +1 for extra accuracy */ |
| + threshold = tableSize; |
| + nbBits = tableLog + 1; |
| + |
| + while (remaining > 1) { /* stops at 1 */ |
| + if (previous0) { |
| + unsigned start = charnum; |
| + while (!normalizedCounter[charnum]) |
| + charnum++; |
| + while (charnum >= start + 24) { |
| + start += 24; |
| + bitStream += 0xFFFFU << bitCount; |
| + if ((!writeIsSafe) && (out > oend - 2)) |
| + return ERROR(dstSize_tooSmall); /* Buffer overflow */ |
| + out[0] = (BYTE)bitStream; |
| + out[1] = (BYTE)(bitStream >> 8); |
| + out += 2; |
| + bitStream >>= 16; |
| + } |
| + while (charnum >= start + 3) { |
| + start += 3; |
| + bitStream += 3 << bitCount; |
| + bitCount += 2; |
| + } |
| + bitStream += (charnum - start) << bitCount; |
| + bitCount += 2; |
| + if (bitCount > 16) { |
| + if ((!writeIsSafe) && (out > oend - 2)) |
| + return ERROR(dstSize_tooSmall); /* Buffer overflow */ |
| + out[0] = (BYTE)bitStream; |
| + out[1] = (BYTE)(bitStream >> 8); |
| + out += 2; |
| + bitStream >>= 16; |
| + bitCount -= 16; |
| + } |
| + } |
| + { |
| + int count = normalizedCounter[charnum++]; |
| + int const max = (2 * threshold - 1) - remaining; |
| + remaining -= count < 0 ? -count : count; |
| + count++; /* +1 for extra accuracy */ |
| + if (count >= threshold) |
| + count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ |
| + bitStream += count << bitCount; |
| + bitCount += nbBits; |
| + bitCount -= (count < max); |
| + previous0 = (count == 1); |
| + if (remaining < 1) |
| + return ERROR(GENERIC); |
| + while (remaining < threshold) |
| + nbBits--, threshold >>= 1; |
| + } |
| + if (bitCount > 16) { |
| + if ((!writeIsSafe) && (out > oend - 2)) |
| + return ERROR(dstSize_tooSmall); /* Buffer overflow */ |
| + out[0] = (BYTE)bitStream; |
| + out[1] = (BYTE)(bitStream >> 8); |
| + out += 2; |
| + bitStream >>= 16; |
| + bitCount -= 16; |
| + } |
| + } |
| + |
| + /* flush remaining bitStream */ |
| + if ((!writeIsSafe) && (out > oend - 2)) |
| + return ERROR(dstSize_tooSmall); /* Buffer overflow */ |
| + out[0] = (BYTE)bitStream; |
| + out[1] = (BYTE)(bitStream >> 8); |
| + out += (bitCount + 7) / 8; |
| + |
| + if (charnum > maxSymbolValue + 1) |
| + return ERROR(GENERIC); |
| + |
| + return (out - ostart); |
| +} |
| + |
| +size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) |
| +{ |
| + if (tableLog > FSE_MAX_TABLELOG) |
| + return ERROR(tableLog_tooLarge); /* Unsupported */ |
| + if (tableLog < FSE_MIN_TABLELOG) |
| + return ERROR(GENERIC); /* Unsupported */ |
| + |
| + if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) |
| + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); |
| + |
| + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); |
| +} |
| + |
| +/*-************************************************************** |
| +* Counting histogram |
| +****************************************************************/ |
| +/*! FSE_count_simple |
| + This function counts byte values within `src`, and store the histogram into table `count`. |
| + It doesn't use any additional memory. |
| + But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. |
| + For this reason, prefer using a table `count` with 256 elements. |
| + @return : count of most numerous element |
| +*/ |
| +size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)src; |
| + const BYTE *const end = ip + srcSize; |
| + unsigned maxSymbolValue = *maxSymbolValuePtr; |
| + unsigned max = 0; |
| + |
| + memset(count, 0, (maxSymbolValue + 1) * sizeof(*count)); |
| + if (srcSize == 0) { |
| + *maxSymbolValuePtr = 0; |
| + return 0; |
| + } |
| + |
| + while (ip < end) |
| + count[*ip++]++; |
| + |
| + while (!count[maxSymbolValue]) |
| + maxSymbolValue--; |
| + *maxSymbolValuePtr = maxSymbolValue; |
| + |
| + { |
| + U32 s; |
| + for (s = 0; s <= maxSymbolValue; s++) |
| + if (count[s] > max) |
| + max = count[s]; |
| + } |
| + |
| + return (size_t)max; |
| +} |
| + |
| +/* FSE_count_parallel_wksp() : |
| + * Same as FSE_count_parallel(), but using an externally provided scratch buffer. |
| + * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */ |
| +static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax, |
| + unsigned *const workSpace) |
| +{ |
| + const BYTE *ip = (const BYTE *)source; |
| + const BYTE *const iend = ip + sourceSize; |
| + unsigned maxSymbolValue = *maxSymbolValuePtr; |
| + unsigned max = 0; |
| + U32 *const Counting1 = workSpace; |
| + U32 *const Counting2 = Counting1 + 256; |
| + U32 *const Counting3 = Counting2 + 256; |
| + U32 *const Counting4 = Counting3 + 256; |
| + |
| + memset(Counting1, 0, 4 * 256 * sizeof(unsigned)); |
| + |
| + /* safety checks */ |
| + if (!sourceSize) { |
| + memset(count, 0, maxSymbolValue + 1); |
| + *maxSymbolValuePtr = 0; |
| + return 0; |
| + } |
| + if (!maxSymbolValue) |
| + maxSymbolValue = 255; /* 0 == default */ |
| + |
| + /* by stripes of 16 bytes */ |
| + { |
| + U32 cached = ZSTD_read32(ip); |
| + ip += 4; |
| + while (ip < iend - 15) { |
| + U32 c = cached; |
| + cached = ZSTD_read32(ip); |
| + ip += 4; |
| + Counting1[(BYTE)c]++; |
| + Counting2[(BYTE)(c >> 8)]++; |
| + Counting3[(BYTE)(c >> 16)]++; |
| + Counting4[c >> 24]++; |
| + c = cached; |
| + cached = ZSTD_read32(ip); |
| + ip += 4; |
| + Counting1[(BYTE)c]++; |
| + Counting2[(BYTE)(c >> 8)]++; |
| + Counting3[(BYTE)(c >> 16)]++; |
| + Counting4[c >> 24]++; |
| + c = cached; |
| + cached = ZSTD_read32(ip); |
| + ip += 4; |
| + Counting1[(BYTE)c]++; |
| + Counting2[(BYTE)(c >> 8)]++; |
| + Counting3[(BYTE)(c >> 16)]++; |
| + Counting4[c >> 24]++; |
| + c = cached; |
| + cached = ZSTD_read32(ip); |
| + ip += 4; |
| + Counting1[(BYTE)c]++; |
| + Counting2[(BYTE)(c >> 8)]++; |
| + Counting3[(BYTE)(c >> 16)]++; |
| + Counting4[c >> 24]++; |
| + } |
| + ip -= 4; |
| + } |
| + |
| + /* finish last symbols */ |
| + while (ip < iend) |
| + Counting1[*ip++]++; |
| + |
| + if (checkMax) { /* verify stats will fit into destination table */ |
| + U32 s; |
| + for (s = 255; s > maxSymbolValue; s--) { |
| + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; |
| + if (Counting1[s]) |
| + return ERROR(maxSymbolValue_tooSmall); |
| + } |
| + } |
| + |
| + { |
| + U32 s; |
| + for (s = 0; s <= maxSymbolValue; s++) { |
| + count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; |
| + if (count[s] > max) |
| + max = count[s]; |
| + } |
| + } |
| + |
| + while (!count[maxSymbolValue]) |
| + maxSymbolValue--; |
| + *maxSymbolValuePtr = maxSymbolValue; |
| + return (size_t)max; |
| +} |
| + |
| +/* FSE_countFast_wksp() : |
| + * Same as FSE_countFast(), but using an externally provided scratch buffer. |
| + * `workSpace` size must be table of >= `1024` unsigned */ |
| +size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace) |
| +{ |
| + if (sourceSize < 1500) |
| + return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); |
| + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); |
| +} |
| + |
| +/* FSE_count_wksp() : |
| + * Same as FSE_count(), but using an externally provided scratch buffer. |
| + * `workSpace` size must be table of >= `1024` unsigned */ |
| +size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace) |
| +{ |
| + if (*maxSymbolValuePtr < 255) |
| + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); |
| + *maxSymbolValuePtr = 255; |
| + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); |
| +} |
| + |
| +/*-************************************************************** |
| +* FSE Compression Code |
| +****************************************************************/ |
| +/*! FSE_sizeof_CTable() : |
| + FSE_CTable is a variable size structure which contains : |
| + `U16 tableLog;` |
| + `U16 maxSymbolValue;` |
| + `U16 nextStateNumber[1 << tableLog];` // This size is variable |
| + `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable |
| +Allocation is manual (C standard does not support variable-size structures). |
| +*/ |
| +size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog) |
| +{ |
| + if (tableLog > FSE_MAX_TABLELOG) |
| + return ERROR(tableLog_tooLarge); |
| + return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32); |
| +} |
| + |
| +/* provides the minimum logSize to safely represent a distribution */ |
| +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) |
| +{ |
| + U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; |
| + U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; |
| + U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; |
| + return minBits; |
| +} |
| + |
| +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) |
| +{ |
| + U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; |
| + U32 tableLog = maxTableLog; |
| + U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); |
| + if (tableLog == 0) |
| + tableLog = FSE_DEFAULT_TABLELOG; |
| + if (maxBitsSrc < tableLog) |
| + tableLog = maxBitsSrc; /* Accuracy can be reduced */ |
| + if (minBits > tableLog) |
| + tableLog = minBits; /* Need a minimum to safely represent all symbol values */ |
| + if (tableLog < FSE_MIN_TABLELOG) |
| + tableLog = FSE_MIN_TABLELOG; |
| + if (tableLog > FSE_MAX_TABLELOG) |
| + tableLog = FSE_MAX_TABLELOG; |
| + return tableLog; |
| +} |
| + |
| +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) |
| +{ |
| + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); |
| +} |
| + |
| +/* Secondary normalization method. |
| + To be used when primary method fails. */ |
| + |
| +static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue) |
| +{ |
| + short const NOT_YET_ASSIGNED = -2; |
| + U32 s; |
| + U32 distributed = 0; |
| + U32 ToDistribute; |
| + |
| + /* Init */ |
| + U32 const lowThreshold = (U32)(total >> tableLog); |
| + U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); |
| + |
| + for (s = 0; s <= maxSymbolValue; s++) { |
| + if (count[s] == 0) { |
| + norm[s] = 0; |
| + continue; |
| + } |
| + if (count[s] <= lowThreshold) { |
| + norm[s] = -1; |
| + distributed++; |
| + total -= count[s]; |
| + continue; |
| + } |
| + if (count[s] <= lowOne) { |
| + norm[s] = 1; |
| + distributed++; |
| + total -= count[s]; |
| + continue; |
| + } |
| + |
| + norm[s] = NOT_YET_ASSIGNED; |
| + } |
| + ToDistribute = (1 << tableLog) - distributed; |
| + |
| + if ((total / ToDistribute) > lowOne) { |
| + /* risk of rounding to zero */ |
| + lowOne = (U32)((total * 3) / (ToDistribute * 2)); |
| + for (s = 0; s <= maxSymbolValue; s++) { |
| + if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { |
| + norm[s] = 1; |
| + distributed++; |
| + total -= count[s]; |
| + continue; |
| + } |
| + } |
| + ToDistribute = (1 << tableLog) - distributed; |
| + } |
| + |
| + if (distributed == maxSymbolValue + 1) { |
| + /* all values are pretty poor; |
| + probably incompressible data (should have already been detected); |
| + find max, then give all remaining points to max */ |
| + U32 maxV = 0, maxC = 0; |
| + for (s = 0; s <= maxSymbolValue; s++) |
| + if (count[s] > maxC) |
| + maxV = s, maxC = count[s]; |
| + norm[maxV] += (short)ToDistribute; |
| + return 0; |
| + } |
| + |
| + if (total == 0) { |
| + /* all of the symbols were low enough for the lowOne or lowThreshold */ |
| + for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1)) |
| + if (norm[s] > 0) |
| + ToDistribute--, norm[s]++; |
| + return 0; |
| + } |
| + |
| + { |
| + U64 const vStepLog = 62 - tableLog; |
| + U64 const mid = (1ULL << (vStepLog - 1)) - 1; |
| + U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */ |
| + U64 tmpTotal = mid; |
| + for (s = 0; s <= maxSymbolValue; s++) { |
| + if (norm[s] == NOT_YET_ASSIGNED) { |
| + U64 const end = tmpTotal + (count[s] * rStep); |
| + U32 const sStart = (U32)(tmpTotal >> vStepLog); |
| + U32 const sEnd = (U32)(end >> vStepLog); |
| + U32 const weight = sEnd - sStart; |
| + if (weight < 1) |
| + return ERROR(GENERIC); |
| + norm[s] = (short)weight; |
| + tmpTotal = end; |
| + } |
| + } |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue) |
| +{ |
| + /* Sanity checks */ |
| + if (tableLog == 0) |
| + tableLog = FSE_DEFAULT_TABLELOG; |
| + if (tableLog < FSE_MIN_TABLELOG) |
| + return ERROR(GENERIC); /* Unsupported size */ |
| + if (tableLog > FSE_MAX_TABLELOG) |
| + return ERROR(tableLog_tooLarge); /* Unsupported size */ |
| + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) |
| + return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ |
| + |
| + { |
| + U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}; |
| + U64 const scale = 62 - tableLog; |
| + U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */ |
| + U64 const vStep = 1ULL << (scale - 20); |
| + int stillToDistribute = 1 << tableLog; |
| + unsigned s; |
| + unsigned largest = 0; |
| + short largestP = 0; |
| + U32 lowThreshold = (U32)(total >> tableLog); |
| + |
| + for (s = 0; s <= maxSymbolValue; s++) { |
| + if (count[s] == total) |
| + return 0; /* rle special case */ |
| + if (count[s] == 0) { |
| + normalizedCounter[s] = 0; |
| + continue; |
| + } |
| + if (count[s] <= lowThreshold) { |
| + normalizedCounter[s] = -1; |
| + stillToDistribute--; |
| + } else { |
| + short proba = (short)((count[s] * step) >> scale); |
| + if (proba < 8) { |
| + U64 restToBeat = vStep * rtbTable[proba]; |
| + proba += (count[s] * step) - ((U64)proba << scale) > restToBeat; |
| + } |
| + if (proba > largestP) |
| + largestP = proba, largest = s; |
| + normalizedCounter[s] = proba; |
| + stillToDistribute -= proba; |
| + } |
| + } |
| + if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { |
| + /* corner case, need another normalization method */ |
| + size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); |
| + if (FSE_isError(errorCode)) |
| + return errorCode; |
| + } else |
| + normalizedCounter[largest] += (short)stillToDistribute; |
| + } |
| + |
| + return tableLog; |
| +} |
| + |
| +/* fake FSE_CTable, for raw (uncompressed) input */ |
| +size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits) |
| +{ |
| + const unsigned tableSize = 1 << nbBits; |
| + const unsigned tableMask = tableSize - 1; |
| + const unsigned maxSymbolValue = tableMask; |
| + void *const ptr = ct; |
| + U16 *const tableU16 = ((U16 *)ptr) + 2; |
| + void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */ |
| + FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT); |
| + unsigned s; |
| + |
| + /* Sanity checks */ |
| + if (nbBits < 1) |
| + return ERROR(GENERIC); /* min size */ |
| + |
| + /* header */ |
| + tableU16[-2] = (U16)nbBits; |
| + tableU16[-1] = (U16)maxSymbolValue; |
| + |
| + /* Build table */ |
| + for (s = 0; s < tableSize; s++) |
| + tableU16[s] = (U16)(tableSize + s); |
| + |
| + /* Build Symbol Transformation Table */ |
| + { |
| + const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits); |
| + for (s = 0; s <= maxSymbolValue; s++) { |
| + symbolTT[s].deltaNbBits = deltaNbBits; |
| + symbolTT[s].deltaFindState = s - 1; |
| + } |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/* fake FSE_CTable, for rle input (always same symbol) */ |
| +size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue) |
| +{ |
| + void *ptr = ct; |
| + U16 *tableU16 = ((U16 *)ptr) + 2; |
| + void *FSCTptr = (U32 *)ptr + 2; |
| + FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr; |
| + |
| + /* header */ |
| + tableU16[-2] = (U16)0; |
| + tableU16[-1] = (U16)symbolValue; |
| + |
| + /* Build table */ |
| + tableU16[0] = 0; |
| + tableU16[1] = 0; /* just in case */ |
| + |
| + /* Build Symbol Transformation Table */ |
| + symbolTT[symbolValue].deltaNbBits = 0; |
| + symbolTT[symbolValue].deltaFindState = 0; |
| + |
| + return 0; |
| +} |
| + |
| +static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast) |
| +{ |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *ip = iend; |
| + |
| + BIT_CStream_t bitC; |
| + FSE_CState_t CState1, CState2; |
| + |
| + /* init */ |
| + if (srcSize <= 2) |
| + return 0; |
| + { |
| + size_t const initError = BIT_initCStream(&bitC, dst, dstSize); |
| + if (FSE_isError(initError)) |
| + return 0; /* not enough space available to write a bitstream */ |
| + } |
| + |
| +#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) |
| + |
| + if (srcSize & 1) { |
| + FSE_initCState2(&CState1, ct, *--ip); |
| + FSE_initCState2(&CState2, ct, *--ip); |
| + FSE_encodeSymbol(&bitC, &CState1, *--ip); |
| + FSE_FLUSHBITS(&bitC); |
| + } else { |
| + FSE_initCState2(&CState2, ct, *--ip); |
| + FSE_initCState2(&CState1, ct, *--ip); |
| + } |
| + |
| + /* join to mod 4 */ |
| + srcSize -= 2; |
| + if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */ |
| + FSE_encodeSymbol(&bitC, &CState2, *--ip); |
| + FSE_encodeSymbol(&bitC, &CState1, *--ip); |
| + FSE_FLUSHBITS(&bitC); |
| + } |
| + |
| + /* 2 or 4 encoding per loop */ |
| + while (ip > istart) { |
| + |
| + FSE_encodeSymbol(&bitC, &CState2, *--ip); |
| + |
| + if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */ |
| + FSE_FLUSHBITS(&bitC); |
| + |
| + FSE_encodeSymbol(&bitC, &CState1, *--ip); |
| + |
| + if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */ |
| + FSE_encodeSymbol(&bitC, &CState2, *--ip); |
| + FSE_encodeSymbol(&bitC, &CState1, *--ip); |
| + } |
| + |
| + FSE_FLUSHBITS(&bitC); |
| + } |
| + |
| + FSE_flushCState(&bitC, &CState2); |
| + FSE_flushCState(&bitC, &CState1); |
| + return BIT_closeCStream(&bitC); |
| +} |
| + |
| +size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct) |
| +{ |
| + unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); |
| + |
| + if (fast) |
| + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); |
| + else |
| + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); |
| +} |
| + |
| +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } |
| diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c |
| new file mode 100644 |
| index 0000000..a84300e |
| --- /dev/null |
| +++ b/lib/zstd/fse_decompress.c |
| @@ -0,0 +1,332 @@ |
| +/* |
| + * FSE : Finite State Entropy decoder |
| + * Copyright (C) 2013-2015, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| + |
| +/* ************************************************************** |
| +* Compiler specifics |
| +****************************************************************/ |
| +#define FORCE_INLINE static __always_inline |
| + |
| +/* ************************************************************** |
| +* Includes |
| +****************************************************************/ |
| +#include "bitstream.h" |
| +#include "fse.h" |
| +#include <linux/compiler.h> |
| +#include <linux/kernel.h> |
| +#include <linux/string.h> /* memcpy, memset */ |
| + |
| +/* ************************************************************** |
| +* Error Management |
| +****************************************************************/ |
| +#define FSE_isError ERR_isError |
| +#define FSE_STATIC_ASSERT(c) \ |
| + { \ |
| + enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ |
| + } /* use only *after* variable declarations */ |
| + |
| +/* check and forward error code */ |
| +#define CHECK_F(f) \ |
| + { \ |
| + size_t const e = f; \ |
| + if (FSE_isError(e)) \ |
| + return e; \ |
| + } |
| + |
| +/* ************************************************************** |
| +* Templates |
| +****************************************************************/ |
| +/* |
| + designed to be included |
| + for type-specific functions (template emulation in C) |
| + Objective is to write these functions only once, for improved maintenance |
| +*/ |
| + |
| +/* safety checks */ |
| +#ifndef FSE_FUNCTION_EXTENSION |
| +#error "FSE_FUNCTION_EXTENSION must be defined" |
| +#endif |
| +#ifndef FSE_FUNCTION_TYPE |
| +#error "FSE_FUNCTION_TYPE must be defined" |
| +#endif |
| + |
| +/* Function names */ |
| +#define FSE_CAT(X, Y) X##Y |
| +#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y) |
| +#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y) |
| + |
| +/* Function templates */ |
| + |
| +size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize) |
| +{ |
| + void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ |
| + FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr); |
| + U16 *symbolNext = (U16 *)workspace; |
| + |
| + U32 const maxSV1 = maxSymbolValue + 1; |
| + U32 const tableSize = 1 << tableLog; |
| + U32 highThreshold = tableSize - 1; |
| + |
| + /* Sanity Checks */ |
| + if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1)) |
| + return ERROR(tableLog_tooLarge); |
| + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) |
| + return ERROR(maxSymbolValue_tooLarge); |
| + if (tableLog > FSE_MAX_TABLELOG) |
| + return ERROR(tableLog_tooLarge); |
| + |
| + /* Init, lay down lowprob symbols */ |
| + { |
| + FSE_DTableHeader DTableH; |
| + DTableH.tableLog = (U16)tableLog; |
| + DTableH.fastMode = 1; |
| + { |
| + S16 const largeLimit = (S16)(1 << (tableLog - 1)); |
| + U32 s; |
| + for (s = 0; s < maxSV1; s++) { |
| + if (normalizedCounter[s] == -1) { |
| + tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; |
| + symbolNext[s] = 1; |
| + } else { |
| + if (normalizedCounter[s] >= largeLimit) |
| + DTableH.fastMode = 0; |
| + symbolNext[s] = normalizedCounter[s]; |
| + } |
| + } |
| + } |
| + memcpy(dt, &DTableH, sizeof(DTableH)); |
| + } |
| + |
| + /* Spread symbols */ |
| + { |
| + U32 const tableMask = tableSize - 1; |
| + U32 const step = FSE_TABLESTEP(tableSize); |
| + U32 s, position = 0; |
| + for (s = 0; s < maxSV1; s++) { |
| + int i; |
| + for (i = 0; i < normalizedCounter[s]; i++) { |
| + tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s; |
| + position = (position + step) & tableMask; |
| + while (position > highThreshold) |
| + position = (position + step) & tableMask; /* lowprob area */ |
| + } |
| + } |
| + if (position != 0) |
| + return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ |
| + } |
| + |
| + /* Build Decoding table */ |
| + { |
| + U32 u; |
| + for (u = 0; u < tableSize; u++) { |
| + FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol); |
| + U16 nextState = symbolNext[symbol]++; |
| + tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState)); |
| + tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize); |
| + } |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/*-******************************************************* |
| +* Decompression (Byte symbols) |
| +*********************************************************/ |
| +size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue) |
| +{ |
| + void *ptr = dt; |
| + FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr; |
| + void *dPtr = dt + 1; |
| + FSE_decode_t *const cell = (FSE_decode_t *)dPtr; |
| + |
| + DTableH->tableLog = 0; |
| + DTableH->fastMode = 0; |
| + |
| + cell->newState = 0; |
| + cell->symbol = symbolValue; |
| + cell->nbBits = 0; |
| + |
| + return 0; |
| +} |
| + |
| +size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits) |
| +{ |
| + void *ptr = dt; |
| + FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr; |
| + void *dPtr = dt + 1; |
| + FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr; |
| + const unsigned tableSize = 1 << nbBits; |
| + const unsigned tableMask = tableSize - 1; |
| + const unsigned maxSV1 = tableMask + 1; |
| + unsigned s; |
| + |
| + /* Sanity checks */ |
| + if (nbBits < 1) |
| + return ERROR(GENERIC); /* min size */ |
| + |
| + /* Build Decoding Table */ |
| + DTableH->tableLog = (U16)nbBits; |
| + DTableH->fastMode = 1; |
| + for (s = 0; s < maxSV1; s++) { |
| + dinfo[s].newState = 0; |
| + dinfo[s].symbol = (BYTE)s; |
| + dinfo[s].nbBits = (BYTE)nbBits; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt, |
| + const unsigned fast) |
| +{ |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *op = ostart; |
| + BYTE *const omax = op + maxDstSize; |
| + BYTE *const olimit = omax - 3; |
| + |
| + BIT_DStream_t bitD; |
| + FSE_DState_t state1; |
| + FSE_DState_t state2; |
| + |
| + /* Init */ |
| + CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize)); |
| + |
| + FSE_initDState(&state1, &bitD, dt); |
| + FSE_initDState(&state2, &bitD, dt); |
| + |
| +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) |
| + |
| + /* 4 symbols per loop */ |
| + for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) { |
| + op[0] = FSE_GETSYMBOL(&state1); |
| + |
| + if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ |
| + BIT_reloadDStream(&bitD); |
| + |
| + op[1] = FSE_GETSYMBOL(&state2); |
| + |
| + if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ |
| + { |
| + if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { |
| + op += 2; |
| + break; |
| + } |
| + } |
| + |
| + op[2] = FSE_GETSYMBOL(&state1); |
| + |
| + if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */ |
| + BIT_reloadDStream(&bitD); |
| + |
| + op[3] = FSE_GETSYMBOL(&state2); |
| + } |
| + |
| + /* tail */ |
| + /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ |
| + while (1) { |
| + if (op > (omax - 2)) |
| + return ERROR(dstSize_tooSmall); |
| + *op++ = FSE_GETSYMBOL(&state1); |
| + if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) { |
| + *op++ = FSE_GETSYMBOL(&state2); |
| + break; |
| + } |
| + |
| + if (op > (omax - 2)) |
| + return ERROR(dstSize_tooSmall); |
| + *op++ = FSE_GETSYMBOL(&state2); |
| + if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) { |
| + *op++ = FSE_GETSYMBOL(&state1); |
| + break; |
| + } |
| + } |
| + |
| + return op - ostart; |
| +} |
| + |
| +size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt) |
| +{ |
| + const void *ptr = dt; |
| + const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr; |
| + const U32 fastMode = DTableH->fastMode; |
| + |
| + /* select fast mode (static) */ |
| + if (fastMode) |
| + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); |
| + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); |
| +} |
| + |
| +size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize) |
| +{ |
| + const BYTE *const istart = (const BYTE *)cSrc; |
| + const BYTE *ip = istart; |
| + unsigned tableLog; |
| + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; |
| + size_t NCountLength; |
| + |
| + FSE_DTable *dt; |
| + short *counting; |
| + size_t spaceUsed32 = 0; |
| + |
| + FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32)); |
| + |
| + dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog); |
| + counting = (short *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(tableLog_tooLarge); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + |
| + /* normal FSE decoding mode */ |
| + NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize); |
| + if (FSE_isError(NCountLength)) |
| + return NCountLength; |
| + // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining |
| + // case : NCountLength==cSrcSize */ |
| + if (tableLog > maxLog) |
| + return ERROR(tableLog_tooLarge); |
| + ip += NCountLength; |
| + cSrcSize -= NCountLength; |
| + |
| + CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize)); |
| + |
| + return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */ |
| +} |
| diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h |
| new file mode 100644 |
| index 0000000..2143da2 |
| --- /dev/null |
| +++ b/lib/zstd/huf.h |
| @@ -0,0 +1,212 @@ |
| +/* |
| + * Huffman coder, part of New Generation Entropy library |
| + * header file |
| + * Copyright (C) 2013-2016, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| +#ifndef HUF_H_298734234 |
| +#define HUF_H_298734234 |
| + |
| +/* *** Dependencies *** */ |
| +#include <linux/types.h> /* size_t */ |
| + |
| +/* *** Tool functions *** */ |
| +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ |
| +size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ |
| + |
| +/* Error Management */ |
| +unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ |
| + |
| +/* *** Advanced function *** */ |
| + |
| +/** HUF_compress4X_wksp() : |
| +* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */ |
| +size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, |
| + size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ |
| + |
| +/* *** Dependencies *** */ |
| +#include "mem.h" /* U32 */ |
| + |
| +/* *** Constants *** */ |
| +#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ |
| +#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ |
| +#define HUF_SYMBOLVALUE_MAX 255 |
| + |
| +#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ |
| +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) |
| +#error "HUF_TABLELOG_MAX is too large !" |
| +#endif |
| + |
| +/* **************************************** |
| +* Static allocation |
| +******************************************/ |
| +/* HUF buffer bounds */ |
| +#define HUF_CTABLEBOUND 129 |
| +#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ |
| +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ |
| + |
| +/* static allocation of HUF's Compression Table */ |
| +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ |
| + U32 name##hb[maxSymbolValue + 1]; \ |
| + void *name##hv = &(name##hb); \ |
| + HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */ |
| + |
| +/* static allocation of HUF's DTable */ |
| +typedef U32 HUF_DTable; |
| +#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog))) |
| +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)} |
| +#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)} |
| + |
| +/* The workspace must have alignment at least 4 and be at least this large */ |
| +#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10) |
| +#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32)) |
| + |
| +/* The workspace must have alignment at least 4 and be at least this large */ |
| +#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10) |
| +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) |
| + |
| +/* **************************************** |
| +* Advanced decompression functions |
| +******************************************/ |
| +size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */ |
| +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, |
| + size_t workspaceSize); /**< considers RLE and uncompressed as errors */ |
| +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, |
| + size_t workspaceSize); /**< single-symbol decoder */ |
| +size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, |
| + size_t workspaceSize); /**< double-symbols decoder */ |
| + |
| +/* **************************************** |
| +* HUF detailed API |
| +******************************************/ |
| +/*! |
| +HUF_compress() does the following: |
| +1. count symbol occurrence from source[] into table count[] using FSE_count() |
| +2. (optional) refine tableLog using HUF_optimalTableLog() |
| +3. build Huffman table from count using HUF_buildCTable() |
| +4. save Huffman table to memory buffer using HUF_writeCTable_wksp() |
| +5. encode the data stream using HUF_compress4X_usingCTable() |
| + |
| +The following API allows targeting specific sub-functions for advanced tasks. |
| +For example, it's possible to compress several blocks using the same 'CTable', |
| +or to save and regenerate 'CTable' using external methods. |
| +*/ |
| +/* FSE_count() : find it within "fse.h" */ |
| +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); |
| +typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ |
| +size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize); |
| +size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable); |
| + |
| +typedef enum { |
| + HUF_repeat_none, /**< Cannot use the previous table */ |
| + HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, |
| + 4}X_repeat */ |
| + HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ |
| +} HUF_repeat; |
| +/** HUF_compress4X_repeat() : |
| +* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. |
| +* If it uses hufTable it does not modify hufTable or repeat. |
| +* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. |
| +* If preferRepeat then the old table will always be used if valid. */ |
| +size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, |
| + size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, |
| + int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ |
| + |
| +/** HUF_buildCTable_wksp() : |
| + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. |
| + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. |
| + */ |
| +size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize); |
| + |
| +/*! HUF_readStats() : |
| + Read compact Huffman tree, saved by HUF_writeCTable(). |
| + `huffWeight` is destination buffer. |
| + @return : size read from `src` , or an error Code . |
| + Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ |
| +size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, |
| + void *workspace, size_t workspaceSize); |
| + |
| +/** HUF_readCTable() : |
| +* Loading a CTable saved with HUF_writeCTable() */ |
| +size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); |
| + |
| +/* |
| +HUF_decompress() does the following: |
| +1. select the decompression algorithm (X2, X4) based on pre-computed heuristics |
| +2. build Huffman table from save, using HUF_readDTableXn() |
| +3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable |
| +*/ |
| + |
| +/** HUF_selectDecoder() : |
| +* Tells which decoder is likely to decode faster, |
| +* based on a set of pre-determined metrics. |
| +* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . |
| +* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ |
| +U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize); |
| + |
| +size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); |
| +size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize); |
| + |
| +size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); |
| +size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); |
| +size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); |
| + |
| +/* single stream variants */ |
| + |
| +size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, |
| + size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ |
| +size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable); |
| +/** HUF_compress1X_repeat() : |
| +* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. |
| +* If it uses hufTable it does not modify hufTable or repeat. |
| +* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. |
| +* If preferRepeat then the old table will always be used if valid. */ |
| +size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, |
| + size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, |
| + int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */ |
| + |
| +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); |
| +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, |
| + size_t workspaceSize); /**< single-symbol decoder */ |
| +size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, |
| + size_t workspaceSize); /**< double-symbols decoder */ |
| + |
| +size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, |
| + const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ |
| +size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); |
| +size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); |
| + |
| +#endif /* HUF_H_298734234 */ |
| diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c |
| new file mode 100644 |
| index 0000000..0361f38 |
| --- /dev/null |
| +++ b/lib/zstd/huf_compress.c |
| @@ -0,0 +1,771 @@ |
| +/* |
| + * Huffman encoder, part of New Generation Entropy library |
| + * Copyright (C) 2013-2016, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| + |
| +/* ************************************************************** |
| +* Includes |
| +****************************************************************/ |
| +#include "bitstream.h" |
| +#include "fse.h" /* header compression */ |
| +#include "huf.h" |
| +#include <linux/kernel.h> |
| +#include <linux/string.h> /* memcpy, memset */ |
| + |
| +/* ************************************************************** |
| +* Error Management |
| +****************************************************************/ |
| +#define HUF_STATIC_ASSERT(c) \ |
| + { \ |
| + enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ |
| + } /* use only *after* variable declarations */ |
| +#define CHECK_V_F(e, f) \ |
| + size_t const e = f; \ |
| + if (ERR_isError(e)) \ |
| + return f |
| +#define CHECK_F(f) \ |
| + { \ |
| + CHECK_V_F(_var_err__, f); \ |
| + } |
| + |
| +/* ************************************************************** |
| +* Utils |
| +****************************************************************/ |
| +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) |
| +{ |
| + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); |
| +} |
| + |
| +/* ******************************************************* |
| +* HUF : Huffman block compression |
| +*********************************************************/ |
| +/* HUF_compressWeights() : |
| + * Same as FSE_compress(), but dedicated to huff0's weights compression. |
| + * The use case needs much less stack memory. |
| + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. |
| + */ |
| +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 |
| +size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize) |
| +{ |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *op = ostart; |
| + BYTE *const oend = ostart + dstSize; |
| + |
| + U32 maxSymbolValue = HUF_TABLELOG_MAX; |
| + U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; |
| + |
| + FSE_CTable *CTable; |
| + U32 *count; |
| + S16 *norm; |
| + size_t spaceUsed32 = 0; |
| + |
| + HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32)); |
| + |
| + CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX); |
| + count = (U32 *)workspace + spaceUsed32; |
| + spaceUsed32 += HUF_TABLELOG_MAX + 1; |
| + norm = (S16 *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(tableLog_tooLarge); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + |
| + /* init conditions */ |
| + if (wtSize <= 1) |
| + return 0; /* Not compressible */ |
| + |
| + /* Scan input and build symbol stats */ |
| + { |
| + CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize)); |
| + if (maxCount == wtSize) |
| + return 1; /* only a single symbol in src : rle */ |
| + if (maxCount == 1) |
| + return 0; /* each symbol present maximum once => not compressible */ |
| + } |
| + |
| + tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); |
| + CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue)); |
| + |
| + /* Write table description header */ |
| + { |
| + CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog)); |
| + op += hSize; |
| + } |
| + |
| + /* Compress */ |
| + CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize)); |
| + { |
| + CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable)); |
| + if (cSize == 0) |
| + return 0; /* not enough space for compressed data */ |
| + op += cSize; |
| + } |
| + |
| + return op - ostart; |
| +} |
| + |
| +struct HUF_CElt_s { |
| + U16 val; |
| + BYTE nbBits; |
| +}; /* typedef'd to HUF_CElt within "huf.h" */ |
| + |
| +/*! HUF_writeCTable_wksp() : |
| + `CTable` : Huffman tree to save, using huf representation. |
| + @return : size of saved CTable */ |
| +size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize) |
| +{ |
| + BYTE *op = (BYTE *)dst; |
| + U32 n; |
| + |
| + BYTE *bitsToWeight; |
| + BYTE *huffWeight; |
| + size_t spaceUsed32 = 0; |
| + |
| + bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2; |
| + huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(tableLog_tooLarge); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + |
| + /* check conditions */ |
| + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) |
| + return ERROR(maxSymbolValue_tooLarge); |
| + |
| + /* convert to weight */ |
| + bitsToWeight[0] = 0; |
| + for (n = 1; n < huffLog + 1; n++) |
| + bitsToWeight[n] = (BYTE)(huffLog + 1 - n); |
| + for (n = 0; n < maxSymbolValue; n++) |
| + huffWeight[n] = bitsToWeight[CTable[n].nbBits]; |
| + |
| + /* attempt weights compression by FSE */ |
| + { |
| + CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize)); |
| + if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */ |
| + op[0] = (BYTE)hSize; |
| + return hSize + 1; |
| + } |
| + } |
| + |
| + /* write raw values as 4-bits (max : 15) */ |
| + if (maxSymbolValue > (256 - 128)) |
| + return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ |
| + if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize) |
| + return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ |
| + op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1)); |
| + huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ |
| + for (n = 0; n < maxSymbolValue; n += 2) |
| + op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]); |
| + return ((maxSymbolValue + 1) / 2) + 1; |
| +} |
| + |
| +size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + U32 *rankVal; |
| + BYTE *huffWeight; |
| + U32 tableLog = 0; |
| + U32 nbSymbols = 0; |
| + size_t readSize; |
| + size_t spaceUsed32 = 0; |
| + |
| + rankVal = (U32 *)workspace + spaceUsed32; |
| + spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; |
| + huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(tableLog_tooLarge); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + |
| + /* get symbol weights */ |
| + readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); |
| + if (ERR_isError(readSize)) |
| + return readSize; |
| + |
| + /* check result */ |
| + if (tableLog > HUF_TABLELOG_MAX) |
| + return ERROR(tableLog_tooLarge); |
| + if (nbSymbols > maxSymbolValue + 1) |
| + return ERROR(maxSymbolValue_tooSmall); |
| + |
| + /* Prepare base value per rank */ |
| + { |
| + U32 n, nextRankStart = 0; |
| + for (n = 1; n <= tableLog; n++) { |
| + U32 curr = nextRankStart; |
| + nextRankStart += (rankVal[n] << (n - 1)); |
| + rankVal[n] = curr; |
| + } |
| + } |
| + |
| + /* fill nbBits */ |
| + { |
| + U32 n; |
| + for (n = 0; n < nbSymbols; n++) { |
| + const U32 w = huffWeight[n]; |
| + CTable[n].nbBits = (BYTE)(tableLog + 1 - w); |
| + } |
| + } |
| + |
| + /* fill val */ |
| + { |
| + U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */ |
| + U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0}; |
| + { |
| + U32 n; |
| + for (n = 0; n < nbSymbols; n++) |
| + nbPerRank[CTable[n].nbBits]++; |
| + } |
| + /* determine stating value per rank */ |
| + valPerRank[tableLog + 1] = 0; /* for w==0 */ |
| + { |
| + U16 min = 0; |
| + U32 n; |
| + for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */ |
| + valPerRank[n] = min; /* get starting value within each rank */ |
| + min += nbPerRank[n]; |
| + min >>= 1; |
| + } |
| + } |
| + /* assign value within rank, symbol order */ |
| + { |
| + U32 n; |
| + for (n = 0; n <= maxSymbolValue; n++) |
| + CTable[n].val = valPerRank[CTable[n].nbBits]++; |
| + } |
| + } |
| + |
| + return readSize; |
| +} |
| + |
| +typedef struct nodeElt_s { |
| + U32 count; |
| + U16 parent; |
| + BYTE byte; |
| + BYTE nbBits; |
| +} nodeElt; |
| + |
| +static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits) |
| +{ |
| + const U32 largestBits = huffNode[lastNonNull].nbBits; |
| + if (largestBits <= maxNbBits) |
| + return largestBits; /* early exit : no elt > maxNbBits */ |
| + |
| + /* there are several too large elements (at least >= 2) */ |
| + { |
| + int totalCost = 0; |
| + const U32 baseCost = 1 << (largestBits - maxNbBits); |
| + U32 n = lastNonNull; |
| + |
| + while (huffNode[n].nbBits > maxNbBits) { |
| + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); |
| + huffNode[n].nbBits = (BYTE)maxNbBits; |
| + n--; |
| + } /* n stops at huffNode[n].nbBits <= maxNbBits */ |
| + while (huffNode[n].nbBits == maxNbBits) |
| + n--; /* n end at index of smallest symbol using < maxNbBits */ |
| + |
| + /* renorm totalCost */ |
| + totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ |
| + |
| + /* repay normalized cost */ |
| + { |
| + U32 const noSymbol = 0xF0F0F0F0; |
| + U32 rankLast[HUF_TABLELOG_MAX + 2]; |
| + int pos; |
| + |
| + /* Get pos of last (smallest) symbol per rank */ |
| + memset(rankLast, 0xF0, sizeof(rankLast)); |
| + { |
| + U32 currNbBits = maxNbBits; |
| + for (pos = n; pos >= 0; pos--) { |
| + if (huffNode[pos].nbBits >= currNbBits) |
| + continue; |
| + currNbBits = huffNode[pos].nbBits; /* < maxNbBits */ |
| + rankLast[maxNbBits - currNbBits] = pos; |
| + } |
| + } |
| + |
| + while (totalCost > 0) { |
| + U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; |
| + for (; nBitsToDecrease > 1; nBitsToDecrease--) { |
| + U32 highPos = rankLast[nBitsToDecrease]; |
| + U32 lowPos = rankLast[nBitsToDecrease - 1]; |
| + if (highPos == noSymbol) |
| + continue; |
| + if (lowPos == noSymbol) |
| + break; |
| + { |
| + U32 const highTotal = huffNode[highPos].count; |
| + U32 const lowTotal = 2 * huffNode[lowPos].count; |
| + if (highTotal <= lowTotal) |
| + break; |
| + } |
| + } |
| + /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ |
| + /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ |
| + while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) |
| + nBitsToDecrease++; |
| + totalCost -= 1 << (nBitsToDecrease - 1); |
| + if (rankLast[nBitsToDecrease - 1] == noSymbol) |
| + rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ |
| + huffNode[rankLast[nBitsToDecrease]].nbBits++; |
| + if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ |
| + rankLast[nBitsToDecrease] = noSymbol; |
| + else { |
| + rankLast[nBitsToDecrease]--; |
| + if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease) |
| + rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ |
| + } |
| + } /* while (totalCost > 0) */ |
| + |
| + while (totalCost < 0) { /* Sometimes, cost correction overshoot */ |
| + if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 |
| + (using maxNbBits) */ |
| + while (huffNode[n].nbBits == maxNbBits) |
| + n--; |
| + huffNode[n + 1].nbBits--; |
| + rankLast[1] = n + 1; |
| + totalCost++; |
| + continue; |
| + } |
| + huffNode[rankLast[1] + 1].nbBits--; |
| + rankLast[1]++; |
| + totalCost++; |
| + } |
| + } |
| + } /* there are several too large elements (at least >= 2) */ |
| + |
| + return maxNbBits; |
| +} |
| + |
| +typedef struct { |
| + U32 base; |
| + U32 curr; |
| +} rankPos; |
| + |
| +static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue) |
| +{ |
| + rankPos rank[32]; |
| + U32 n; |
| + |
| + memset(rank, 0, sizeof(rank)); |
| + for (n = 0; n <= maxSymbolValue; n++) { |
| + U32 r = BIT_highbit32(count[n] + 1); |
| + rank[r].base++; |
| + } |
| + for (n = 30; n > 0; n--) |
| + rank[n - 1].base += rank[n].base; |
| + for (n = 0; n < 32; n++) |
| + rank[n].curr = rank[n].base; |
| + for (n = 0; n <= maxSymbolValue; n++) { |
| + U32 const c = count[n]; |
| + U32 const r = BIT_highbit32(c + 1) + 1; |
| + U32 pos = rank[r].curr++; |
| + while ((pos > rank[r].base) && (c > huffNode[pos - 1].count)) |
| + huffNode[pos] = huffNode[pos - 1], pos--; |
| + huffNode[pos].count = c; |
| + huffNode[pos].byte = (BYTE)n; |
| + } |
| +} |
| + |
| +/** HUF_buildCTable_wksp() : |
| + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. |
| + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. |
| + */ |
| +#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1) |
| +typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1]; |
| +size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize) |
| +{ |
| + nodeElt *const huffNode0 = (nodeElt *)workSpace; |
| + nodeElt *const huffNode = huffNode0 + 1; |
| + U32 n, nonNullRank; |
| + int lowS, lowN; |
| + U16 nodeNb = STARTNODE; |
| + U32 nodeRoot; |
| + |
| + /* safety checks */ |
| + if (wkspSize < sizeof(huffNodeTable)) |
| + return ERROR(GENERIC); /* workSpace is not large enough */ |
| + if (maxNbBits == 0) |
| + maxNbBits = HUF_TABLELOG_DEFAULT; |
| + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) |
| + return ERROR(GENERIC); |
| + memset(huffNode0, 0, sizeof(huffNodeTable)); |
| + |
| + /* sort, decreasing order */ |
| + HUF_sort(huffNode, count, maxSymbolValue); |
| + |
| + /* init for parents */ |
| + nonNullRank = maxSymbolValue; |
| + while (huffNode[nonNullRank].count == 0) |
| + nonNullRank--; |
| + lowS = nonNullRank; |
| + nodeRoot = nodeNb + lowS - 1; |
| + lowN = nodeNb; |
| + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count; |
| + huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb; |
| + nodeNb++; |
| + lowS -= 2; |
| + for (n = nodeNb; n <= nodeRoot; n++) |
| + huffNode[n].count = (U32)(1U << 30); |
| + huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */ |
| + |
| + /* create parents */ |
| + while (nodeNb <= nodeRoot) { |
| + U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
| + U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
| + huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; |
| + huffNode[n1].parent = huffNode[n2].parent = nodeNb; |
| + nodeNb++; |
| + } |
| + |
| + /* distribute weights (unlimited tree height) */ |
| + huffNode[nodeRoot].nbBits = 0; |
| + for (n = nodeRoot - 1; n >= STARTNODE; n--) |
| + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; |
| + for (n = 0; n <= nonNullRank; n++) |
| + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; |
| + |
| + /* enforce maxTableLog */ |
| + maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); |
| + |
| + /* fill result into tree (val, nbBits) */ |
| + { |
| + U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0}; |
| + U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0}; |
| + if (maxNbBits > HUF_TABLELOG_MAX) |
| + return ERROR(GENERIC); /* check fit into table */ |
| + for (n = 0; n <= nonNullRank; n++) |
| + nbPerRank[huffNode[n].nbBits]++; |
| + /* determine stating value per rank */ |
| + { |
| + U16 min = 0; |
| + for (n = maxNbBits; n > 0; n--) { |
| + valPerRank[n] = min; /* get starting value within each rank */ |
| + min += nbPerRank[n]; |
| + min >>= 1; |
| + } |
| + } |
| + for (n = 0; n <= maxSymbolValue; n++) |
| + tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ |
| + for (n = 0; n <= maxSymbolValue; n++) |
| + tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ |
| + } |
| + |
| + return maxNbBits; |
| +} |
| + |
| +static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) |
| +{ |
| + size_t nbBits = 0; |
| + int s; |
| + for (s = 0; s <= (int)maxSymbolValue; ++s) { |
| + nbBits += CTable[s].nbBits * count[s]; |
| + } |
| + return nbBits >> 3; |
| +} |
| + |
| +static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue) |
| +{ |
| + int bad = 0; |
| + int s; |
| + for (s = 0; s <= (int)maxSymbolValue; ++s) { |
| + bad |= (count[s] != 0) & (CTable[s].nbBits == 0); |
| + } |
| + return !bad; |
| +} |
| + |
| +static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable) |
| +{ |
| + BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); |
| +} |
| + |
| +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } |
| + |
| +#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) |
| + |
| +#define HUF_FLUSHBITS_1(stream) \ |
| + if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \ |
| + HUF_FLUSHBITS(stream) |
| + |
| +#define HUF_FLUSHBITS_2(stream) \ |
| + if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \ |
| + HUF_FLUSHBITS(stream) |
| + |
| +size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable) |
| +{ |
| + const BYTE *ip = (const BYTE *)src; |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *const oend = ostart + dstSize; |
| + BYTE *op = ostart; |
| + size_t n; |
| + const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize)); |
| + BIT_CStream_t bitC; |
| + |
| + /* init */ |
| + if (dstSize < 8) |
| + return 0; /* not enough space to compress */ |
| + { |
| + size_t const initErr = BIT_initCStream(&bitC, op, oend - op); |
| + if (HUF_isError(initErr)) |
| + return 0; |
| + } |
| + |
| + n = srcSize & ~3; /* join to mod 4 */ |
| + switch (srcSize & 3) { |
| + case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC); |
| + case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC); |
| + case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC); |
| + case 0: |
| + default:; |
| + } |
| + |
| + for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */ |
| + HUF_encodeSymbol(&bitC, ip[n - 1], CTable); |
| + HUF_FLUSHBITS_1(&bitC); |
| + HUF_encodeSymbol(&bitC, ip[n - 2], CTable); |
| + HUF_FLUSHBITS_2(&bitC); |
| + HUF_encodeSymbol(&bitC, ip[n - 3], CTable); |
| + HUF_FLUSHBITS_1(&bitC); |
| + HUF_encodeSymbol(&bitC, ip[n - 4], CTable); |
| + HUF_FLUSHBITS(&bitC); |
| + } |
| + |
| + return BIT_closeCStream(&bitC); |
| +} |
| + |
| +size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable) |
| +{ |
| + size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */ |
| + const BYTE *ip = (const BYTE *)src; |
| + const BYTE *const iend = ip + srcSize; |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *const oend = ostart + dstSize; |
| + BYTE *op = ostart; |
| + |
| + if (dstSize < 6 + 1 + 1 + 1 + 8) |
| + return 0; /* minimum space to compress successfully */ |
| + if (srcSize < 12) |
| + return 0; /* no saving possible : too small input */ |
| + op += 6; /* jumpTable */ |
| + |
| + { |
| + CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); |
| + if (cSize == 0) |
| + return 0; |
| + ZSTD_writeLE16(ostart, (U16)cSize); |
| + op += cSize; |
| + } |
| + |
| + ip += segmentSize; |
| + { |
| + CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); |
| + if (cSize == 0) |
| + return 0; |
| + ZSTD_writeLE16(ostart + 2, (U16)cSize); |
| + op += cSize; |
| + } |
| + |
| + ip += segmentSize; |
| + { |
| + CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable)); |
| + if (cSize == 0) |
| + return 0; |
| + ZSTD_writeLE16(ostart + 4, (U16)cSize); |
| + op += cSize; |
| + } |
| + |
| + ip += segmentSize; |
| + { |
| + CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable)); |
| + if (cSize == 0) |
| + return 0; |
| + op += cSize; |
| + } |
| + |
| + return op - ostart; |
| +} |
| + |
| +static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream, |
| + const HUF_CElt *CTable) |
| +{ |
| + size_t const cSize = |
| + singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); |
| + if (HUF_isError(cSize)) { |
| + return cSize; |
| + } |
| + if (cSize == 0) { |
| + return 0; |
| + } /* uncompressible */ |
| + op += cSize; |
| + /* check compressibility */ |
| + if ((size_t)(op - ostart) >= srcSize - 1) { |
| + return 0; |
| + } |
| + return op - ostart; |
| +} |
| + |
| +/* `workSpace` must a table of at least 1024 unsigned */ |
| +static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, |
| + unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat) |
| +{ |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *const oend = ostart + dstSize; |
| + BYTE *op = ostart; |
| + |
| + U32 *count; |
| + size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); |
| + HUF_CElt *CTable; |
| + size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); |
| + |
| + /* checks & inits */ |
| + if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) |
| + return ERROR(GENERIC); |
| + if (!srcSize) |
| + return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ |
| + if (!dstSize) |
| + return 0; /* cannot fit within dst budget */ |
| + if (srcSize > HUF_BLOCKSIZE_MAX) |
| + return ERROR(srcSize_wrong); /* curr block size limit */ |
| + if (huffLog > HUF_TABLELOG_MAX) |
| + return ERROR(tableLog_tooLarge); |
| + if (!maxSymbolValue) |
| + maxSymbolValue = HUF_SYMBOLVALUE_MAX; |
| + if (!huffLog) |
| + huffLog = HUF_TABLELOG_DEFAULT; |
| + |
| + count = (U32 *)workSpace; |
| + workSpace = (BYTE *)workSpace + countSize; |
| + wkspSize -= countSize; |
| + CTable = (HUF_CElt *)workSpace; |
| + workSpace = (BYTE *)workSpace + CTableSize; |
| + wkspSize -= CTableSize; |
| + |
| + /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ |
| + if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { |
| + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); |
| + } |
| + |
| + /* Scan input and build symbol stats */ |
| + { |
| + CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace)); |
| + if (largest == srcSize) { |
| + *ostart = ((const BYTE *)src)[0]; |
| + return 1; |
| + } /* single symbol, rle */ |
| + if (largest <= (srcSize >> 7) + 1) |
| + return 0; /* Fast heuristic : not compressible enough */ |
| + } |
| + |
| + /* Check validity of previous table */ |
| + if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { |
| + *repeat = HUF_repeat_none; |
| + } |
| + /* Heuristic : use existing table for small inputs */ |
| + if (preferRepeat && repeat && *repeat != HUF_repeat_none) { |
| + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); |
| + } |
| + |
| + /* Build Huffman Tree */ |
| + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); |
| + { |
| + CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize)); |
| + huffLog = (U32)maxBits; |
| + /* Zero the unused symbols so we can check it for validity */ |
| + memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); |
| + } |
| + |
| + /* Write table description header */ |
| + { |
| + CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize)); |
| + /* Check if using the previous table will be beneficial */ |
| + if (repeat && *repeat != HUF_repeat_none) { |
| + size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); |
| + size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); |
| + if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { |
| + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); |
| + } |
| + } |
| + /* Use the new table */ |
| + if (hSize + 12ul >= srcSize) { |
| + return 0; |
| + } |
| + op += hSize; |
| + if (repeat) { |
| + *repeat = HUF_repeat_none; |
| + } |
| + if (oldHufTable) { |
| + memcpy(oldHufTable, CTable, CTableSize); |
| + } /* Save the new table */ |
| + } |
| + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); |
| +} |
| + |
| +size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, |
| + size_t wkspSize) |
| +{ |
| + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); |
| +} |
| + |
| +size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, |
| + size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat) |
| +{ |
| + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, |
| + preferRepeat); |
| +} |
| + |
| +size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, |
| + size_t wkspSize) |
| +{ |
| + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); |
| +} |
| + |
| +size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace, |
| + size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat) |
| +{ |
| + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, |
| + preferRepeat); |
| +} |
| diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c |
| new file mode 100644 |
| index 0000000..6526482 |
| --- /dev/null |
| +++ b/lib/zstd/huf_decompress.c |
| @@ -0,0 +1,960 @@ |
| +/* |
| + * Huffman decoder, part of New Generation Entropy library |
| + * Copyright (C) 2013-2016, Yann Collet. |
| + * |
| + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions are |
| + * met: |
| + * |
| + * * Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions and the following disclaimer. |
| + * * Redistributions in binary form must reproduce the above |
| + * copyright notice, this list of conditions and the following disclaimer |
| + * in the documentation and/or other materials provided with the |
| + * distribution. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + * |
| + * You can contact the author at : |
| + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy |
| + */ |
| + |
| +/* ************************************************************** |
| +* Compiler specifics |
| +****************************************************************/ |
| +#define FORCE_INLINE static __always_inline |
| + |
| +/* ************************************************************** |
| +* Dependencies |
| +****************************************************************/ |
| +#include "bitstream.h" /* BIT_* */ |
| +#include "fse.h" /* header compression */ |
| +#include "huf.h" |
| +#include <linux/compiler.h> |
| +#include <linux/kernel.h> |
| +#include <linux/string.h> /* memcpy, memset */ |
| + |
| +/* ************************************************************** |
| +* Error Management |
| +****************************************************************/ |
| +#define HUF_STATIC_ASSERT(c) \ |
| + { \ |
| + enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ |
| + } /* use only *after* variable declarations */ |
| + |
| +/*-***************************/ |
| +/* generic DTableDesc */ |
| +/*-***************************/ |
| + |
| +typedef struct { |
| + BYTE maxTableLog; |
| + BYTE tableType; |
| + BYTE tableLog; |
| + BYTE reserved; |
| +} DTableDesc; |
| + |
| +static DTableDesc HUF_getDTableDesc(const HUF_DTable *table) |
| +{ |
| + DTableDesc dtd; |
| + memcpy(&dtd, table, sizeof(dtd)); |
| + return dtd; |
| +} |
| + |
| +/*-***************************/ |
| +/* single-symbol decoding */ |
| +/*-***************************/ |
| + |
| +typedef struct { |
| + BYTE byte; |
| + BYTE nbBits; |
| +} HUF_DEltX2; /* single-symbol decoding */ |
| + |
| +size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + U32 tableLog = 0; |
| + U32 nbSymbols = 0; |
| + size_t iSize; |
| + void *const dtPtr = DTable + 1; |
| + HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr; |
| + |
| + U32 *rankVal; |
| + BYTE *huffWeight; |
| + size_t spaceUsed32 = 0; |
| + |
| + rankVal = (U32 *)workspace + spaceUsed32; |
| + spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; |
| + huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(tableLog_tooLarge); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + |
| + HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); |
| + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ |
| + |
| + iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); |
| + if (HUF_isError(iSize)) |
| + return iSize; |
| + |
| + /* Table header */ |
| + { |
| + DTableDesc dtd = HUF_getDTableDesc(DTable); |
| + if (tableLog > (U32)(dtd.maxTableLog + 1)) |
| + return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ |
| + dtd.tableType = 0; |
| + dtd.tableLog = (BYTE)tableLog; |
| + memcpy(DTable, &dtd, sizeof(dtd)); |
| + } |
| + |
| + /* Calculate starting value for each rank */ |
| + { |
| + U32 n, nextRankStart = 0; |
| + for (n = 1; n < tableLog + 1; n++) { |
| + U32 const curr = nextRankStart; |
| + nextRankStart += (rankVal[n] << (n - 1)); |
| + rankVal[n] = curr; |
| + } |
| + } |
| + |
| + /* fill DTable */ |
| + { |
| + U32 n; |
| + for (n = 0; n < nbSymbols; n++) { |
| + U32 const w = huffWeight[n]; |
| + U32 const length = (1 << w) >> 1; |
| + U32 u; |
| + HUF_DEltX2 D; |
| + D.byte = (BYTE)n; |
| + D.nbBits = (BYTE)(tableLog + 1 - w); |
| + for (u = rankVal[w]; u < rankVal[w] + length; u++) |
| + dt[u] = D; |
| + rankVal[w] += length; |
| + } |
| + } |
| + |
| + return iSize; |
| +} |
| + |
| +static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog) |
| +{ |
| + size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ |
| + BYTE const c = dt[val].byte; |
| + BIT_skipBits(Dstream, dt[val].nbBits); |
| + return c; |
| +} |
| + |
| +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) |
| + |
| +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ |
| + if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \ |
| + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) |
| + |
| +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ |
| + if (ZSTD_64bits()) \ |
| + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) |
| + |
| +FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog) |
| +{ |
| + BYTE *const pStart = p; |
| + |
| + /* up to 4 symbols at a time */ |
| + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) { |
| + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); |
| + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); |
| + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); |
| + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); |
| + } |
| + |
| + /* closer to the end */ |
| + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) |
| + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); |
| + |
| + /* no more data to retrieve from bitstream, hence no need to reload */ |
| + while (p < pEnd) |
| + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); |
| + |
| + return pEnd - pStart; |
| +} |
| + |
| +static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + BYTE *op = (BYTE *)dst; |
| + BYTE *const oend = op + dstSize; |
| + const void *dtPtr = DTable + 1; |
| + const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr; |
| + BIT_DStream_t bitD; |
| + DTableDesc const dtd = HUF_getDTableDesc(DTable); |
| + U32 const dtLog = dtd.tableLog; |
| + |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + |
| + HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); |
| + |
| + /* check */ |
| + if (!BIT_endOfDStream(&bitD)) |
| + return ERROR(corruption_detected); |
| + |
| + return dstSize; |
| +} |
| + |
| +size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + DTableDesc dtd = HUF_getDTableDesc(DTable); |
| + if (dtd.tableType != 0) |
| + return ERROR(GENERIC); |
| + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); |
| +} |
| + |
| +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)cSrc; |
| + |
| + size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize); |
| + if (HUF_isError(hSize)) |
| + return hSize; |
| + if (hSize >= cSrcSize) |
| + return ERROR(srcSize_wrong); |
| + ip += hSize; |
| + cSrcSize -= hSize; |
| + |
| + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx); |
| +} |
| + |
| +static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + /* Check */ |
| + if (cSrcSize < 10) |
| + return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ |
| + |
| + { |
| + const BYTE *const istart = (const BYTE *)cSrc; |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *const oend = ostart + dstSize; |
| + const void *const dtPtr = DTable + 1; |
| + const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr; |
| + |
| + /* Init */ |
| + BIT_DStream_t bitD1; |
| + BIT_DStream_t bitD2; |
| + BIT_DStream_t bitD3; |
| + BIT_DStream_t bitD4; |
| + size_t const length1 = ZSTD_readLE16(istart); |
| + size_t const length2 = ZSTD_readLE16(istart + 2); |
| + size_t const length3 = ZSTD_readLE16(istart + 4); |
| + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); |
| + const BYTE *const istart1 = istart + 6; /* jumpTable */ |
| + const BYTE *const istart2 = istart1 + length1; |
| + const BYTE *const istart3 = istart2 + length2; |
| + const BYTE *const istart4 = istart3 + length3; |
| + const size_t segmentSize = (dstSize + 3) / 4; |
| + BYTE *const opStart2 = ostart + segmentSize; |
| + BYTE *const opStart3 = opStart2 + segmentSize; |
| + BYTE *const opStart4 = opStart3 + segmentSize; |
| + BYTE *op1 = ostart; |
| + BYTE *op2 = opStart2; |
| + BYTE *op3 = opStart3; |
| + BYTE *op4 = opStart4; |
| + U32 endSignal; |
| + DTableDesc const dtd = HUF_getDTableDesc(DTable); |
| + U32 const dtLog = dtd.tableLog; |
| + |
| + if (length4 > cSrcSize) |
| + return ERROR(corruption_detected); /* overflow */ |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + |
| + /* 16-32 symbols per loop (4-8 symbols per stream) */ |
| + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); |
| + for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) { |
| + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); |
| + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); |
| + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); |
| + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); |
| + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); |
| + } |
| + |
| + /* check corruption */ |
| + if (op1 > opStart2) |
| + return ERROR(corruption_detected); |
| + if (op2 > opStart3) |
| + return ERROR(corruption_detected); |
| + if (op3 > opStart4) |
| + return ERROR(corruption_detected); |
| + /* note : op4 supposed already verified within main loop */ |
| + |
| + /* finish bitStreams one by one */ |
| + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); |
| + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); |
| + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); |
| + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); |
| + |
| + /* check */ |
| + endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); |
| + if (!endSignal) |
| + return ERROR(corruption_detected); |
| + |
| + /* decoded size */ |
| + return dstSize; |
| + } |
| +} |
| + |
| +size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + DTableDesc dtd = HUF_getDTableDesc(DTable); |
| + if (dtd.tableType != 0) |
| + return ERROR(GENERIC); |
| + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); |
| +} |
| + |
| +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)cSrc; |
| + |
| + size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize); |
| + if (HUF_isError(hSize)) |
| + return hSize; |
| + if (hSize >= cSrcSize) |
| + return ERROR(srcSize_wrong); |
| + ip += hSize; |
| + cSrcSize -= hSize; |
| + |
| + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); |
| +} |
| + |
| +/* *************************/ |
| +/* double-symbols decoding */ |
| +/* *************************/ |
| +typedef struct { |
| + U16 sequence; |
| + BYTE nbBits; |
| + BYTE length; |
| +} HUF_DEltX4; /* double-symbols decoding */ |
| + |
| +typedef struct { |
| + BYTE symbol; |
| + BYTE weight; |
| +} sortedSymbol_t; |
| + |
| +/* HUF_fillDTableX4Level2() : |
| + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ |
| +static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight, |
| + const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) |
| +{ |
| + HUF_DEltX4 DElt; |
| + U32 rankVal[HUF_TABLELOG_MAX + 1]; |
| + |
| + /* get pre-calculated rankVal */ |
| + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); |
| + |
| + /* fill skipped values */ |
| + if (minWeight > 1) { |
| + U32 i, skipSize = rankVal[minWeight]; |
| + ZSTD_writeLE16(&(DElt.sequence), baseSeq); |
| + DElt.nbBits = (BYTE)(consumed); |
| + DElt.length = 1; |
| + for (i = 0; i < skipSize; i++) |
| + DTable[i] = DElt; |
| + } |
| + |
| + /* fill DTable */ |
| + { |
| + U32 s; |
| + for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */ |
| + const U32 symbol = sortedSymbols[s].symbol; |
| + const U32 weight = sortedSymbols[s].weight; |
| + const U32 nbBits = nbBitsBaseline - weight; |
| + const U32 length = 1 << (sizeLog - nbBits); |
| + const U32 start = rankVal[weight]; |
| + U32 i = start; |
| + const U32 end = start + length; |
| + |
| + ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8))); |
| + DElt.nbBits = (BYTE)(nbBits + consumed); |
| + DElt.length = 2; |
| + do { |
| + DTable[i++] = DElt; |
| + } while (i < end); /* since length >= 1 */ |
| + |
| + rankVal[weight] += length; |
| + } |
| + } |
| +} |
| + |
| +typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1]; |
| +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; |
| + |
| +static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart, |
| + rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) |
| +{ |
| + U32 rankVal[HUF_TABLELOG_MAX + 1]; |
| + const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ |
| + const U32 minBits = nbBitsBaseline - maxWeight; |
| + U32 s; |
| + |
| + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); |
| + |
| + /* fill DTable */ |
| + for (s = 0; s < sortedListSize; s++) { |
| + const U16 symbol = sortedList[s].symbol; |
| + const U32 weight = sortedList[s].weight; |
| + const U32 nbBits = nbBitsBaseline - weight; |
| + const U32 start = rankVal[weight]; |
| + const U32 length = 1 << (targetLog - nbBits); |
| + |
| + if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */ |
| + U32 sortedRank; |
| + int minWeight = nbBits + scaleLog; |
| + if (minWeight < 1) |
| + minWeight = 1; |
| + sortedRank = rankStart[minWeight]; |
| + HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank, |
| + sortedListSize - sortedRank, nbBitsBaseline, symbol); |
| + } else { |
| + HUF_DEltX4 DElt; |
| + ZSTD_writeLE16(&(DElt.sequence), symbol); |
| + DElt.nbBits = (BYTE)(nbBits); |
| + DElt.length = 1; |
| + { |
| + U32 const end = start + length; |
| + U32 u; |
| + for (u = start; u < end; u++) |
| + DTable[u] = DElt; |
| + } |
| + } |
| + rankVal[weight] += length; |
| + } |
| +} |
| + |
| +size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + U32 tableLog, maxW, sizeOfSort, nbSymbols; |
| + DTableDesc dtd = HUF_getDTableDesc(DTable); |
| + U32 const maxTableLog = dtd.maxTableLog; |
| + size_t iSize; |
| + void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */ |
| + HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr; |
| + U32 *rankStart; |
| + |
| + rankValCol_t *rankVal; |
| + U32 *rankStats; |
| + U32 *rankStart0; |
| + sortedSymbol_t *sortedSymbol; |
| + BYTE *weightList; |
| + size_t spaceUsed32 = 0; |
| + |
| + HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0); |
| + |
| + rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; |
| + rankStats = (U32 *)workspace + spaceUsed32; |
| + spaceUsed32 += HUF_TABLELOG_MAX + 1; |
| + rankStart0 = (U32 *)workspace + spaceUsed32; |
| + spaceUsed32 += HUF_TABLELOG_MAX + 2; |
| + sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; |
| + weightList = (BYTE *)((U32 *)workspace + spaceUsed32); |
| + spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; |
| + |
| + if ((spaceUsed32 << 2) > workspaceSize) |
| + return ERROR(tableLog_tooLarge); |
| + workspace = (U32 *)workspace + spaceUsed32; |
| + workspaceSize -= (spaceUsed32 << 2); |
| + |
| + rankStart = rankStart0 + 1; |
| + memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); |
| + |
| + HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ |
| + if (maxTableLog > HUF_TABLELOG_MAX) |
| + return ERROR(tableLog_tooLarge); |
| + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ |
| + |
| + iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize); |
| + if (HUF_isError(iSize)) |
| + return iSize; |
| + |
| + /* check result */ |
| + if (tableLog > maxTableLog) |
| + return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ |
| + |
| + /* find maxWeight */ |
| + for (maxW = tableLog; rankStats[maxW] == 0; maxW--) { |
| + } /* necessarily finds a solution before 0 */ |
| + |
| + /* Get start index of each weight */ |
| + { |
| + U32 w, nextRankStart = 0; |
| + for (w = 1; w < maxW + 1; w++) { |
| + U32 curr = nextRankStart; |
| + nextRankStart += rankStats[w]; |
| + rankStart[w] = curr; |
| + } |
| + rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ |
| + sizeOfSort = nextRankStart; |
| + } |
| + |
| + /* sort symbols by weight */ |
| + { |
| + U32 s; |
| + for (s = 0; s < nbSymbols; s++) { |
| + U32 const w = weightList[s]; |
| + U32 const r = rankStart[w]++; |
| + sortedSymbol[r].symbol = (BYTE)s; |
| + sortedSymbol[r].weight = (BYTE)w; |
| + } |
| + rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ |
| + } |
| + |
| + /* Build rankVal */ |
| + { |
| + U32 *const rankVal0 = rankVal[0]; |
| + { |
| + int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */ |
| + U32 nextRankVal = 0; |
| + U32 w; |
| + for (w = 1; w < maxW + 1; w++) { |
| + U32 curr = nextRankVal; |
| + nextRankVal += rankStats[w] << (w + rescale); |
| + rankVal0[w] = curr; |
| + } |
| + } |
| + { |
| + U32 const minBits = tableLog + 1 - maxW; |
| + U32 consumed; |
| + for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { |
| + U32 *const rankValPtr = rankVal[consumed]; |
| + U32 w; |
| + for (w = 1; w < maxW + 1; w++) { |
| + rankValPtr[w] = rankVal0[w] >> consumed; |
| + } |
| + } |
| + } |
| + } |
| + |
| + HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1); |
| + |
| + dtd.tableLog = (BYTE)maxTableLog; |
| + dtd.tableType = 1; |
| + memcpy(DTable, &dtd, sizeof(dtd)); |
| + return iSize; |
| +} |
| + |
| +static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog) |
| +{ |
| + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ |
| + memcpy(op, dt + val, 2); |
| + BIT_skipBits(DStream, dt[val].nbBits); |
| + return dt[val].length; |
| +} |
| + |
| +static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog) |
| +{ |
| + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ |
| + memcpy(op, dt + val, 1); |
| + if (dt[val].length == 1) |
| + BIT_skipBits(DStream, dt[val].nbBits); |
| + else { |
| + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) { |
| + BIT_skipBits(DStream, dt[val].nbBits); |
| + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8)) |
| + /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ |
| + DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8); |
| + } |
| + } |
| + return 1; |
| +} |
| + |
| +#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) |
| + |
| +#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ |
| + if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \ |
| + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) |
| + |
| +#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ |
| + if (ZSTD_64bits()) \ |
| + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) |
| + |
| +FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog) |
| +{ |
| + BYTE *const pStart = p; |
| + |
| + /* up to 8 symbols at a time */ |
| + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) { |
| + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); |
| + HUF_DECODE_SYMBOLX4_1(p, bitDPtr); |
| + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); |
| + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); |
| + } |
| + |
| + /* closer to end : up to 2 symbols at a time */ |
| + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2)) |
| + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); |
| + |
| + while (p <= pEnd - 2) |
| + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ |
| + |
| + if (p < pEnd) |
| + p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); |
| + |
| + return p - pStart; |
| +} |
| + |
| +static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + BIT_DStream_t bitD; |
| + |
| + /* Init */ |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + |
| + /* decode */ |
| + { |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *const oend = ostart + dstSize; |
| + const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */ |
| + const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr; |
| + DTableDesc const dtd = HUF_getDTableDesc(DTable); |
| + HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); |
| + } |
| + |
| + /* check */ |
| + if (!BIT_endOfDStream(&bitD)) |
| + return ERROR(corruption_detected); |
| + |
| + /* decoded size */ |
| + return dstSize; |
| +} |
| + |
| +size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + DTableDesc dtd = HUF_getDTableDesc(DTable); |
| + if (dtd.tableType != 1) |
| + return ERROR(GENERIC); |
| + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); |
| +} |
| + |
| +size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)cSrc; |
| + |
| + size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize); |
| + if (HUF_isError(hSize)) |
| + return hSize; |
| + if (hSize >= cSrcSize) |
| + return ERROR(srcSize_wrong); |
| + ip += hSize; |
| + cSrcSize -= hSize; |
| + |
| + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx); |
| +} |
| + |
| +static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + if (cSrcSize < 10) |
| + return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ |
| + |
| + { |
| + const BYTE *const istart = (const BYTE *)cSrc; |
| + BYTE *const ostart = (BYTE *)dst; |
| + BYTE *const oend = ostart + dstSize; |
| + const void *const dtPtr = DTable + 1; |
| + const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr; |
| + |
| + /* Init */ |
| + BIT_DStream_t bitD1; |
| + BIT_DStream_t bitD2; |
| + BIT_DStream_t bitD3; |
| + BIT_DStream_t bitD4; |
| + size_t const length1 = ZSTD_readLE16(istart); |
| + size_t const length2 = ZSTD_readLE16(istart + 2); |
| + size_t const length3 = ZSTD_readLE16(istart + 4); |
| + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); |
| + const BYTE *const istart1 = istart + 6; /* jumpTable */ |
| + const BYTE *const istart2 = istart1 + length1; |
| + const BYTE *const istart3 = istart2 + length2; |
| + const BYTE *const istart4 = istart3 + length3; |
| + size_t const segmentSize = (dstSize + 3) / 4; |
| + BYTE *const opStart2 = ostart + segmentSize; |
| + BYTE *const opStart3 = opStart2 + segmentSize; |
| + BYTE *const opStart4 = opStart3 + segmentSize; |
| + BYTE *op1 = ostart; |
| + BYTE *op2 = opStart2; |
| + BYTE *op3 = opStart3; |
| + BYTE *op4 = opStart4; |
| + U32 endSignal; |
| + DTableDesc const dtd = HUF_getDTableDesc(DTable); |
| + U32 const dtLog = dtd.tableLog; |
| + |
| + if (length4 > cSrcSize) |
| + return ERROR(corruption_detected); /* overflow */ |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + { |
| + size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); |
| + if (HUF_isError(errorCode)) |
| + return errorCode; |
| + } |
| + |
| + /* 16-32 symbols per loop (4-8 symbols per stream) */ |
| + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); |
| + for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) { |
| + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); |
| + HUF_DECODE_SYMBOLX4_1(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX4_1(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX4_1(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX4_1(op4, &bitD4); |
| + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); |
| + HUF_DECODE_SYMBOLX4_0(op1, &bitD1); |
| + HUF_DECODE_SYMBOLX4_0(op2, &bitD2); |
| + HUF_DECODE_SYMBOLX4_0(op3, &bitD3); |
| + HUF_DECODE_SYMBOLX4_0(op4, &bitD4); |
| + |
| + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); |
| + } |
| + |
| + /* check corruption */ |
| + if (op1 > opStart2) |
| + return ERROR(corruption_detected); |
| + if (op2 > opStart3) |
| + return ERROR(corruption_detected); |
| + if (op3 > opStart4) |
| + return ERROR(corruption_detected); |
| + /* note : op4 already verified within main loop */ |
| + |
| + /* finish bitStreams one by one */ |
| + HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); |
| + HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); |
| + HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); |
| + HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); |
| + |
| + /* check */ |
| + { |
| + U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); |
| + if (!endCheck) |
| + return ERROR(corruption_detected); |
| + } |
| + |
| + /* decoded size */ |
| + return dstSize; |
| + } |
| +} |
| + |
| +size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + DTableDesc dtd = HUF_getDTableDesc(DTable); |
| + if (dtd.tableType != 1) |
| + return ERROR(GENERIC); |
| + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); |
| +} |
| + |
| +size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + const BYTE *ip = (const BYTE *)cSrc; |
| + |
| + size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize); |
| + if (HUF_isError(hSize)) |
| + return hSize; |
| + if (hSize >= cSrcSize) |
| + return ERROR(srcSize_wrong); |
| + ip += hSize; |
| + cSrcSize -= hSize; |
| + |
| + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); |
| +} |
| + |
| +/* ********************************/ |
| +/* Generic decompression selector */ |
| +/* ********************************/ |
| + |
| +size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + DTableDesc const dtd = HUF_getDTableDesc(DTable); |
| + return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) |
| + : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); |
| +} |
| + |
| +size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable) |
| +{ |
| + DTableDesc const dtd = HUF_getDTableDesc(DTable); |
| + return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) |
| + : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); |
| +} |
| + |
| +typedef struct { |
| + U32 tableTime; |
| + U32 decode256Time; |
| +} algo_time_t; |
| +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { |
| + /* single, double, quad */ |
| + {{0, 0}, {1, 1}, {2, 2}}, /* Q==0 : impossible */ |
| + {{0, 0}, {1, 1}, {2, 2}}, /* Q==1 : impossible */ |
| + {{38, 130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ |
| + {{448, 128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ |
| + {{556, 128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ |
| + {{714, 128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ |
| + {{883, 128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ |
| + {{897, 128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ |
| + {{926, 128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ |
| + {{947, 128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ |
| + {{1107, 128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ |
| + {{1177, 128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ |
| + {{1242, 128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ |
| + {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */ |
| + {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */ |
| + {{722, 128}, {1891, 145}, {1936, 146}}, /* Q ==15 : 93-99% */ |
| +}; |
| + |
| +/** HUF_selectDecoder() : |
| +* Tells which decoder is likely to decode faster, |
| +* based on a set of pre-determined metrics. |
| +* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . |
| +* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ |
| +U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize) |
| +{ |
| + /* decoder timing evaluation */ |
| + U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ |
| + U32 const D256 = (U32)(dstSize >> 8); |
| + U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); |
| + U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); |
| + DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ |
| + |
| + return DTime1 < DTime0; |
| +} |
| + |
| +typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize); |
| + |
| +size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + /* validation checks */ |
| + if (dstSize == 0) |
| + return ERROR(dstSize_tooSmall); |
| + if (cSrcSize > dstSize) |
| + return ERROR(corruption_detected); /* invalid */ |
| + if (cSrcSize == dstSize) { |
| + memcpy(dst, cSrc, dstSize); |
| + return dstSize; |
| + } /* not compressed */ |
| + if (cSrcSize == 1) { |
| + memset(dst, *(const BYTE *)cSrc, dstSize); |
| + return dstSize; |
| + } /* RLE */ |
| + |
| + { |
| + U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); |
| + return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) |
| + : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); |
| + } |
| +} |
| + |
| +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + /* validation checks */ |
| + if (dstSize == 0) |
| + return ERROR(dstSize_tooSmall); |
| + if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) |
| + return ERROR(corruption_detected); /* invalid */ |
| + |
| + { |
| + U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); |
| + return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) |
| + : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); |
| + } |
| +} |
| + |
| +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize) |
| +{ |
| + /* validation checks */ |
| + if (dstSize == 0) |
| + return ERROR(dstSize_tooSmall); |
| + if (cSrcSize > dstSize) |
| + return ERROR(corruption_detected); /* invalid */ |
| + if (cSrcSize == dstSize) { |
| + memcpy(dst, cSrc, dstSize); |
| + return dstSize; |
| + } /* not compressed */ |
| + if (cSrcSize == 1) { |
| + memset(dst, *(const BYTE *)cSrc, dstSize); |
| + return dstSize; |
| + } /* RLE */ |
| + |
| + { |
| + U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); |
| + return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize) |
| + : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize); |
| + } |
| +} |
| diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h |
| new file mode 100644 |
| index 0000000..3a0f34c |
| --- /dev/null |
| +++ b/lib/zstd/mem.h |
| @@ -0,0 +1,151 @@ |
| +/** |
| + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +#ifndef MEM_H_MODULE |
| +#define MEM_H_MODULE |
| + |
| +/*-**************************************** |
| +* Dependencies |
| +******************************************/ |
| +#include <asm/unaligned.h> |
| +#include <linux/string.h> /* memcpy */ |
| +#include <linux/types.h> /* size_t, ptrdiff_t */ |
| + |
| +/*-**************************************** |
| +* Compiler specifics |
| +******************************************/ |
| +#define ZSTD_STATIC static __inline __attribute__((unused)) |
| + |
| +/*-************************************************************** |
| +* Basic Types |
| +*****************************************************************/ |
| +typedef uint8_t BYTE; |
| +typedef uint16_t U16; |
| +typedef int16_t S16; |
| +typedef uint32_t U32; |
| +typedef int32_t S32; |
| +typedef uint64_t U64; |
| +typedef int64_t S64; |
| +typedef ptrdiff_t iPtrDiff; |
| +typedef uintptr_t uPtrDiff; |
| + |
| +/*-************************************************************** |
| +* Memory I/O |
| +*****************************************************************/ |
| +ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; } |
| +ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; } |
| + |
| +#if defined(__LITTLE_ENDIAN) |
| +#define ZSTD_LITTLE_ENDIAN 1 |
| +#else |
| +#define ZSTD_LITTLE_ENDIAN 0 |
| +#endif |
| + |
| +ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; } |
| + |
| +ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); } |
| + |
| +ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); } |
| + |
| +ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); } |
| + |
| +ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); } |
| + |
| +/*=== Little endian r/w ===*/ |
| + |
| +ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); } |
| + |
| +ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); } |
| + |
| +ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val) |
| +{ |
| + ZSTD_writeLE16(memPtr, (U16)val); |
| + ((BYTE *)memPtr)[2] = (BYTE)(val >> 16); |
| +} |
| + |
| +ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); } |
| + |
| +ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); } |
| + |
| +ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr) |
| +{ |
| + if (ZSTD_32bits()) |
| + return (size_t)ZSTD_readLE32(memPtr); |
| + else |
| + return (size_t)ZSTD_readLE64(memPtr); |
| +} |
| + |
| +ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val) |
| +{ |
| + if (ZSTD_32bits()) |
| + ZSTD_writeLE32(memPtr, (U32)val); |
| + else |
| + ZSTD_writeLE64(memPtr, (U64)val); |
| +} |
| + |
| +/*=== Big endian r/w ===*/ |
| + |
| +ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); } |
| + |
| +ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); } |
| + |
| +ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); } |
| + |
| +ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr) |
| +{ |
| + if (ZSTD_32bits()) |
| + return (size_t)ZSTD_readBE32(memPtr); |
| + else |
| + return (size_t)ZSTD_readBE64(memPtr); |
| +} |
| + |
| +ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val) |
| +{ |
| + if (ZSTD_32bits()) |
| + ZSTD_writeBE32(memPtr, (U32)val); |
| + else |
| + ZSTD_writeBE64(memPtr, (U64)val); |
| +} |
| + |
| +/* function safe only for comparisons */ |
| +ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length) |
| +{ |
| + switch (length) { |
| + default: |
| + case 4: return ZSTD_read32(memPtr); |
| + case 3: |
| + if (ZSTD_isLittleEndian()) |
| + return ZSTD_read32(memPtr) << 8; |
| + else |
| + return ZSTD_read32(memPtr) >> 8; |
| + } |
| +} |
| + |
| +#endif /* MEM_H_MODULE */ |
| diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c |
| new file mode 100644 |
| index 0000000..a282624 |
| --- /dev/null |
| +++ b/lib/zstd/zstd_common.c |
| @@ -0,0 +1,75 @@ |
| +/** |
| + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +/*-************************************* |
| +* Dependencies |
| +***************************************/ |
| +#include "error_private.h" |
| +#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */ |
| +#include <linux/kernel.h> |
| + |
| +/*=************************************************************** |
| +* Custom allocator |
| +****************************************************************/ |
| + |
| +#define stack_push(stack, size) \ |
| + ({ \ |
| + void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \ |
| + (stack)->ptr = (char *)ptr + (size); \ |
| + (stack)->ptr <= (stack)->end ? ptr : NULL; \ |
| + }) |
| + |
| +ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize) |
| +{ |
| + ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace}; |
| + ZSTD_stack *stack = (ZSTD_stack *)workspace; |
| + /* Verify preconditions */ |
| + if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) { |
| + ZSTD_customMem error = {NULL, NULL, NULL}; |
| + return error; |
| + } |
| + /* Initialize the stack */ |
| + stack->ptr = workspace; |
| + stack->end = (char *)workspace + workspaceSize; |
| + stack_push(stack, sizeof(ZSTD_stack)); |
| + return stackMem; |
| +} |
| + |
| +void *ZSTD_stackAllocAll(void *opaque, size_t *size) |
| +{ |
| + ZSTD_stack *stack = (ZSTD_stack *)opaque; |
| + *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr); |
| + return stack_push(stack, *size); |
| +} |
| + |
| +void *ZSTD_stackAlloc(void *opaque, size_t size) |
| +{ |
| + ZSTD_stack *stack = (ZSTD_stack *)opaque; |
| + return stack_push(stack, size); |
| +} |
| +void ZSTD_stackFree(void *opaque, void *address) |
| +{ |
| + (void)opaque; |
| + (void)address; |
| +} |
| + |
| +void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); } |
| + |
| +void ZSTD_free(void *ptr, ZSTD_customMem customMem) |
| +{ |
| + if (ptr != NULL) |
| + customMem.customFree(customMem.opaque, ptr); |
| +} |
| diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h |
| new file mode 100644 |
| index 0000000..6748719 |
| --- /dev/null |
| +++ b/lib/zstd/zstd_internal.h |
| @@ -0,0 +1,269 @@ |
| +/** |
| + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +#ifndef ZSTD_CCOMMON_H_MODULE |
| +#define ZSTD_CCOMMON_H_MODULE |
| + |
| +/*-******************************************************* |
| +* Compiler specifics |
| +*********************************************************/ |
| +#define FORCE_INLINE static __always_inline |
| +#define FORCE_NOINLINE static noinline |
| + |
| +/*-************************************* |
| +* Dependencies |
| +***************************************/ |
| +#include "error_private.h" |
| +#include "mem.h" |
| +#include <linux/compiler.h> |
| +#include <linux/kernel.h> |
| +#include <linux/xxhash.h> |
| +#include <linux/zstd.h> |
| + |
| +/*-************************************* |
| +* shared macros |
| +***************************************/ |
| +#define MIN(a, b) ((a) < (b) ? (a) : (b)) |
| +#define MAX(a, b) ((a) > (b) ? (a) : (b)) |
| +#define CHECK_F(f) \ |
| + { \ |
| + size_t const errcod = f; \ |
| + if (ERR_isError(errcod)) \ |
| + return errcod; \ |
| + } /* check and Forward error code */ |
| +#define CHECK_E(f, e) \ |
| + { \ |
| + size_t const errcod = f; \ |
| + if (ERR_isError(errcod)) \ |
| + return ERROR(e); \ |
| + } /* check and send Error code */ |
| +#define ZSTD_STATIC_ASSERT(c) \ |
| + { \ |
| + enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \ |
| + } |
| + |
| +/*-************************************* |
| +* Common constants |
| +***************************************/ |
| +#define ZSTD_OPT_NUM (1 << 12) |
| +#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */ |
| + |
| +#define ZSTD_REP_NUM 3 /* number of repcodes */ |
| +#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ |
| +#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1) |
| +#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) |
| +static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8}; |
| + |
| +#define KB *(1 << 10) |
| +#define MB *(1 << 20) |
| +#define GB *(1U << 30) |
| + |
| +#define BIT7 128 |
| +#define BIT6 64 |
| +#define BIT5 32 |
| +#define BIT4 16 |
| +#define BIT1 2 |
| +#define BIT0 1 |
| + |
| +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 |
| +static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8}; |
| +static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4}; |
| + |
| +#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ |
| +static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; |
| +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; |
| + |
| +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ |
| +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ |
| + |
| +#define HufLog 12 |
| +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; |
| + |
| +#define LONGNBSEQ 0x7F00 |
| + |
| +#define MINMATCH 3 |
| +#define EQUAL_READ32 4 |
| + |
| +#define Litbits 8 |
| +#define MaxLit ((1 << Litbits) - 1) |
| +#define MaxML 52 |
| +#define MaxLL 35 |
| +#define MaxOff 28 |
| +#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ |
| +#define MLFSELog 9 |
| +#define LLFSELog 9 |
| +#define OffFSELog 8 |
| + |
| +static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; |
| +static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1}; |
| +#define LL_DEFAULTNORMLOG 6 /* for static allocation */ |
| +static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; |
| + |
| +static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
| + 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; |
| +static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
| + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1}; |
| +#define ML_DEFAULTNORMLOG 6 /* for static allocation */ |
| +static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG; |
| + |
| +static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}; |
| +#define OF_DEFAULTNORMLOG 5 /* for static allocation */ |
| +static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; |
| + |
| +/*-******************************************* |
| +* Shared functions to include for inlining |
| +*********************************************/ |
| +static void ZSTD_copy8(void *dst, const void *src) { memcpy(dst, src, 8); } |
| +#define COPY8(d, s) \ |
| + { \ |
| + ZSTD_copy8(d, s); \ |
| + d += 8; \ |
| + s += 8; \ |
| + } |
| + |
| +/*! ZSTD_wildcopy() : |
| +* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ |
| +#define WILDCOPY_OVERLENGTH 8 |
| +ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length) |
| +{ |
| + const BYTE *ip = (const BYTE *)src; |
| + BYTE *op = (BYTE *)dst; |
| + BYTE *const oend = op + length; |
| + do |
| + COPY8(op, ip) |
| + while (op < oend); |
| +} |
| + |
| +ZSTD_STATIC void ZSTD_wildcopy_e(void *dst, const void *src, void *dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */ |
| +{ |
| + const BYTE *ip = (const BYTE *)src; |
| + BYTE *op = (BYTE *)dst; |
| + BYTE *const oend = (BYTE *)dstEnd; |
| + do |
| + COPY8(op, ip) |
| + while (op < oend); |
| +} |
| + |
| +/*-******************************************* |
| +* Private interfaces |
| +*********************************************/ |
| +typedef struct ZSTD_stats_s ZSTD_stats_t; |
| + |
| +typedef struct { |
| + U32 off; |
| + U32 len; |
| +} ZSTD_match_t; |
| + |
| +typedef struct { |
| + U32 price; |
| + U32 off; |
| + U32 mlen; |
| + U32 litlen; |
| + U32 rep[ZSTD_REP_NUM]; |
| +} ZSTD_optimal_t; |
| + |
| +typedef struct seqDef_s { |
| + U32 offset; |
| + U16 litLength; |
| + U16 matchLength; |
| +} seqDef; |
| + |
| +typedef struct { |
| + seqDef *sequencesStart; |
| + seqDef *sequences; |
| + BYTE *litStart; |
| + BYTE *lit; |
| + BYTE *llCode; |
| + BYTE *mlCode; |
| + BYTE *ofCode; |
| + U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ |
| + U32 longLengthPos; |
| + /* opt */ |
| + ZSTD_optimal_t *priceTable; |
| + ZSTD_match_t *matchTable; |
| + U32 *matchLengthFreq; |
| + U32 *litLengthFreq; |
| + U32 *litFreq; |
| + U32 *offCodeFreq; |
| + U32 matchLengthSum; |
| + U32 matchSum; |
| + U32 litLengthSum; |
| + U32 litSum; |
| + U32 offCodeSum; |
| + U32 log2matchLengthSum; |
| + U32 log2matchSum; |
| + U32 log2litLengthSum; |
| + U32 log2litSum; |
| + U32 log2offCodeSum; |
| + U32 factor; |
| + U32 staticPrices; |
| + U32 cachedPrice; |
| + U32 cachedLitLength; |
| + const BYTE *cachedLiterals; |
| +} seqStore_t; |
| + |
| +const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx); |
| +void ZSTD_seqToCodes(const seqStore_t *seqStorePtr); |
| +int ZSTD_isSkipFrame(ZSTD_DCtx *dctx); |
| + |
| +/*= Custom memory allocation functions */ |
| +typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size); |
| +typedef void (*ZSTD_freeFunction)(void *opaque, void *address); |
| +typedef struct { |
| + ZSTD_allocFunction customAlloc; |
| + ZSTD_freeFunction customFree; |
| + void *opaque; |
| +} ZSTD_customMem; |
| + |
| +void *ZSTD_malloc(size_t size, ZSTD_customMem customMem); |
| +void ZSTD_free(void *ptr, ZSTD_customMem customMem); |
| + |
| +/*====== stack allocation ======*/ |
| + |
| +typedef struct { |
| + void *ptr; |
| + const void *end; |
| +} ZSTD_stack; |
| + |
| +#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t)) |
| +#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t)) |
| + |
| +ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize); |
| + |
| +void *ZSTD_stackAllocAll(void *opaque, size_t *size); |
| +void *ZSTD_stackAlloc(void *opaque, size_t size); |
| +void ZSTD_stackFree(void *opaque, void *address); |
| + |
| +/*====== common function ======*/ |
| + |
| +ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); } |
| + |
| +/* hidden functions */ |
| + |
| +/* ZSTD_invalidateRepCodes() : |
| + * ensures next compression will not use repcodes from previous block. |
| + * Note : only works with regular variant; |
| + * do not use with extDict variant ! */ |
| +void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx); |
| + |
| +size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx); |
| +size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx); |
| +size_t ZSTD_freeCDict(ZSTD_CDict *cdict); |
| +size_t ZSTD_freeDDict(ZSTD_DDict *cdict); |
| +size_t ZSTD_freeCStream(ZSTD_CStream *zcs); |
| +size_t ZSTD_freeDStream(ZSTD_DStream *zds); |
| + |
| +#endif /* ZSTD_CCOMMON_H_MODULE */ |
| diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h |
| new file mode 100644 |
| index 0000000..55e1b4c |
| --- /dev/null |
| +++ b/lib/zstd/zstd_opt.h |
| @@ -0,0 +1,1014 @@ |
| +/** |
| + * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. |
| + * All rights reserved. |
| + * |
| + * This source code is licensed under the BSD-style license found in the |
| + * LICENSE file in the root directory of https://github.com/facebook/zstd. |
| + * An additional grant of patent rights can be found in the PATENTS file in the |
| + * same directory. |
| + * |
| + * This program is free software; you can redistribute it and/or modify it under |
| + * the terms of the GNU General Public License version 2 as published by the |
| + * Free Software Foundation. This program is dual-licensed; you may select |
| + * either version 2 of the GNU General Public License ("GPL") or BSD license |
| + * ("BSD"). |
| + */ |
| + |
| +/* Note : this file is intended to be included within zstd_compress.c */ |
| + |
| +#ifndef ZSTD_OPT_H_91842398743 |
| +#define ZSTD_OPT_H_91842398743 |
| + |
| +#define ZSTD_LITFREQ_ADD 2 |
| +#define ZSTD_FREQ_DIV 4 |
| +#define ZSTD_MAX_PRICE (1 << 30) |
| + |
| +/*-************************************* |
| +* Price functions for optimal parser |
| +***************************************/ |
| +FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr) |
| +{ |
| + ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1); |
| + ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1); |
| + ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1); |
| + ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1); |
| + ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum)); |
| +} |
| + |
| +ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize) |
| +{ |
| + unsigned u; |
| + |
| + ssPtr->cachedLiterals = NULL; |
| + ssPtr->cachedPrice = ssPtr->cachedLitLength = 0; |
| + ssPtr->staticPrices = 0; |
| + |
| + if (ssPtr->litLengthSum == 0) { |
| + if (srcSize <= 1024) |
| + ssPtr->staticPrices = 1; |
| + |
| + for (u = 0; u <= MaxLit; u++) |
| + ssPtr->litFreq[u] = 0; |
| + for (u = 0; u < srcSize; u++) |
| + ssPtr->litFreq[src[u]]++; |
| + |
| + ssPtr->litSum = 0; |
| + ssPtr->litLengthSum = MaxLL + 1; |
| + ssPtr->matchLengthSum = MaxML + 1; |
| + ssPtr->offCodeSum = (MaxOff + 1); |
| + ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits); |
| + |
| + for (u = 0; u <= MaxLit; u++) { |
| + ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV); |
| + ssPtr->litSum += ssPtr->litFreq[u]; |
| + } |
| + for (u = 0; u <= MaxLL; u++) |
| + ssPtr->litLengthFreq[u] = 1; |
| + for (u = 0; u <= MaxML; u++) |
| + ssPtr->matchLengthFreq[u] = 1; |
| + for (u = 0; u <= MaxOff; u++) |
| + ssPtr->offCodeFreq[u] = 1; |
| + } else { |
| + ssPtr->matchLengthSum = 0; |
| + ssPtr->litLengthSum = 0; |
| + ssPtr->offCodeSum = 0; |
| + ssPtr->matchSum = 0; |
| + ssPtr->litSum = 0; |
| + |
| + for (u = 0; u <= MaxLit; u++) { |
| + ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1)); |
| + ssPtr->litSum += ssPtr->litFreq[u]; |
| + } |
| + for (u = 0; u <= MaxLL; u++) { |
| + ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1)); |
| + ssPtr->litLengthSum += ssPtr->litLengthFreq[u]; |
| + } |
| + for (u = 0; u <= MaxML; u++) { |
| + ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV); |
| + ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u]; |
| + ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3); |
| + } |
| + ssPtr->matchSum *= ZSTD_LITFREQ_ADD; |
| + for (u = 0; u <= MaxOff; u++) { |
| + ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV); |
| + ssPtr->offCodeSum += ssPtr->offCodeFreq[u]; |
| + } |
| + } |
| + |
| + ZSTD_setLog2Prices(ssPtr); |
| +} |
| + |
| +FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals) |
| +{ |
| + U32 price, u; |
| + |
| + if (ssPtr->staticPrices) |
| + return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6); |
| + |
| + if (litLength == 0) |
| + return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1); |
| + |
| + /* literals */ |
| + if (ssPtr->cachedLiterals == literals) { |
| + U32 const additional = litLength - ssPtr->cachedLitLength; |
| + const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength; |
| + price = ssPtr->cachedPrice + additional * ssPtr->log2litSum; |
| + for (u = 0; u < additional; u++) |
| + price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1); |
| + ssPtr->cachedPrice = price; |
| + ssPtr->cachedLitLength = litLength; |
| + } else { |
| + price = litLength * ssPtr->log2litSum; |
| + for (u = 0; u < litLength; u++) |
| + price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1); |
| + |
| + if (litLength >= 12) { |
| + ssPtr->cachedLiterals = literals; |
| + ssPtr->cachedPrice = price; |
| + ssPtr->cachedLitLength = litLength; |
| + } |
| + } |
| + |
| + /* literal Length */ |
| + { |
| + const BYTE LL_deltaCode = 19; |
| + const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; |
| + price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1); |
| + } |
| + |
| + return price; |
| +} |
| + |
| +FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra) |
| +{ |
| + /* offset */ |
| + U32 price; |
| + BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1); |
| + |
| + if (seqStorePtr->staticPrices) |
| + return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode; |
| + |
| + price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1); |
| + if (!ultra && offCode >= 20) |
| + price += (offCode - 19) * 2; |
| + |
| + /* match Length */ |
| + { |
| + const BYTE ML_deltaCode = 36; |
| + const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; |
| + price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1); |
| + } |
| + |
| + return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor; |
| +} |
| + |
| +ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength) |
| +{ |
| + U32 u; |
| + |
| + /* literals */ |
| + seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD; |
| + for (u = 0; u < litLength; u++) |
| + seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; |
| + |
| + /* literal Length */ |
| + { |
| + const BYTE LL_deltaCode = 19; |
| + const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; |
| + seqStorePtr->litLengthFreq[llCode]++; |
| + seqStorePtr->litLengthSum++; |
| + } |
| + |
| + /* match offset */ |
| + { |
| + BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1); |
| + seqStorePtr->offCodeSum++; |
| + seqStorePtr->offCodeFreq[offCode]++; |
| + } |
| + |
| + /* match Length */ |
| + { |
| + const BYTE ML_deltaCode = 36; |
| + const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; |
| + seqStorePtr->matchLengthFreq[mlCode]++; |
| + seqStorePtr->matchLengthSum++; |
| + } |
| + |
| + ZSTD_setLog2Prices(seqStorePtr); |
| +} |
| + |
| +#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ |
| + { \ |
| + while (last_pos < pos) { \ |
| + opt[last_pos + 1].price = ZSTD_MAX_PRICE; \ |
| + last_pos++; \ |
| + } \ |
| + opt[pos].mlen = mlen_; \ |
| + opt[pos].off = offset_; \ |
| + opt[pos].litlen = litlen_; \ |
| + opt[pos].price = price_; \ |
| + } |
| + |
| +/* Update hashTable3 up to ip (excluded) |
| + Assumption : always within prefix (i.e. not within extDict) */ |
| +FORCE_INLINE |
| +U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip) |
| +{ |
| + U32 *const hashTable3 = zc->hashTable3; |
| + U32 const hashLog3 = zc->hashLog3; |
| + const BYTE *const base = zc->base; |
| + U32 idx = zc->nextToUpdate3; |
| + const U32 target = zc->nextToUpdate3 = (U32)(ip - base); |
| + const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); |
| + |
| + while (idx < target) { |
| + hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx; |
| + idx++; |
| + } |
| + |
| + return hashTable3[hash3]; |
| +} |
| + |
| +/*-************************************* |
| +* Binary Tree search |
| +***************************************/ |
| +static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict, |
| + ZSTD_match_t *matches, const U32 minMatchLen) |
| +{ |
| + const BYTE *const base = zc->base; |
| + const U32 curr = (U32)(ip - base); |
| + const U32 hashLog = zc->params.cParams.hashLog; |
| + const size_t h = ZSTD_hashPtr(ip, hashLog, mls); |
| + U32 *const hashTable = zc->hashTable; |
| + U32 matchIndex = hashTable[h]; |
| + U32 *const bt = zc->chainTable; |
| + const U32 btLog = zc->params.cParams.chainLog - 1; |
| + const U32 btMask = (1U << btLog) - 1; |
| + size_t commonLengthSmaller = 0, commonLengthLarger = 0; |
| + const BYTE *const dictBase = zc->dictBase; |
| + const U32 dictLimit = zc->dictLimit; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + const BYTE *const prefixStart = base + dictLimit; |
| + const U32 btLow = btMask >= curr ? 0 : curr - btMask; |
| + const U32 windowLow = zc->lowLimit; |
| + U32 *smallerPtr = bt + 2 * (curr & btMask); |
| + U32 *largerPtr = bt + 2 * (curr & btMask) + 1; |
| + U32 matchEndIdx = curr + 8; |
| + U32 dummy32; /* to be nullified at the end */ |
| + U32 mnum = 0; |
| + |
| + const U32 minMatch = (mls == 3) ? 3 : 4; |
| + size_t bestLength = minMatchLen - 1; |
| + |
| + if (minMatch == 3) { /* HC3 match finder */ |
| + U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip); |
| + if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) { |
| + const BYTE *match; |
| + size_t currMl = 0; |
| + if ((!extDict) || matchIndex3 >= dictLimit) { |
| + match = base + matchIndex3; |
| + if (match[bestLength] == ip[bestLength]) |
| + currMl = ZSTD_count(ip, match, iLimit); |
| + } else { |
| + match = dictBase + matchIndex3; |
| + if (ZSTD_readMINMATCH(match, MINMATCH) == |
| + ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ |
| + currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; |
| + } |
| + |
| + /* save best solution */ |
| + if (currMl > bestLength) { |
| + bestLength = currMl; |
| + matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3; |
| + matches[mnum].len = (U32)currMl; |
| + mnum++; |
| + if (currMl > ZSTD_OPT_NUM) |
| + goto update; |
| + if (ip + currMl == iLimit) |
| + goto update; /* best possible, and avoid read overflow*/ |
| + } |
| + } |
| + } |
| + |
| + hashTable[h] = curr; /* Update Hash Table */ |
| + |
| + while (nbCompares-- && (matchIndex > windowLow)) { |
| + U32 *nextPtr = bt + 2 * (matchIndex & btMask); |
| + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ |
| + const BYTE *match; |
| + |
| + if ((!extDict) || (matchIndex + matchLength >= dictLimit)) { |
| + match = base + matchIndex; |
| + if (match[matchLength] == ip[matchLength]) { |
| + matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1; |
| + } |
| + } else { |
| + match = dictBase + matchIndex; |
| + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart); |
| + if (matchIndex + matchLength >= dictLimit) |
| + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ |
| + } |
| + |
| + if (matchLength > bestLength) { |
| + if (matchLength > matchEndIdx - matchIndex) |
| + matchEndIdx = matchIndex + (U32)matchLength; |
| + bestLength = matchLength; |
| + matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex; |
| + matches[mnum].len = (U32)matchLength; |
| + mnum++; |
| + if (matchLength > ZSTD_OPT_NUM) |
| + break; |
| + if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */ |
| + break; /* drop, to guarantee consistency (miss a little bit of compression) */ |
| + } |
| + |
| + if (match[matchLength] < ip[matchLength]) { |
| + /* match is smaller than curr */ |
| + *smallerPtr = matchIndex; /* update smaller idx */ |
| + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ |
| + if (matchIndex <= btLow) { |
| + smallerPtr = &dummy32; |
| + break; |
| + } /* beyond tree size, stop the search */ |
| + smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */ |
| + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */ |
| + } else { |
| + /* match is larger than curr */ |
| + *largerPtr = matchIndex; |
| + commonLengthLarger = matchLength; |
| + if (matchIndex <= btLow) { |
| + largerPtr = &dummy32; |
| + break; |
| + } /* beyond tree size, stop the search */ |
| + largerPtr = nextPtr; |
| + matchIndex = nextPtr[0]; |
| + } |
| + } |
| + |
| + *smallerPtr = *largerPtr = 0; |
| + |
| +update: |
| + zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1; |
| + return mnum; |
| +} |
| + |
| +/** Tree updater, providing best match */ |
| +static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches, |
| + const U32 minMatchLen) |
| +{ |
| + if (ip < zc->base + zc->nextToUpdate) |
| + return 0; /* skipped area */ |
| + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); |
| + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); |
| +} |
| + |
| +static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */ |
| + const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch, |
| + ZSTD_match_t *matches, const U32 minMatchLen) |
| +{ |
| + switch (matchLengthSearch) { |
| + case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); |
| + default: |
| + case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); |
| + case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); |
| + case 7: |
| + case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); |
| + } |
| +} |
| + |
| +/** Tree updater, providing best match */ |
| +static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, |
| + ZSTD_match_t *matches, const U32 minMatchLen) |
| +{ |
| + if (ip < zc->base + zc->nextToUpdate) |
| + return 0; /* skipped area */ |
| + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); |
| + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); |
| +} |
| + |
| +static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */ |
| + const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch, |
| + ZSTD_match_t *matches, const U32 minMatchLen) |
| +{ |
| + switch (matchLengthSearch) { |
| + case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); |
| + default: |
| + case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); |
| + case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); |
| + case 7: |
| + case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); |
| + } |
| +} |
| + |
| +/*-******************************* |
| +* Optimal parser |
| +*********************************/ |
| +FORCE_INLINE |
| +void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra) |
| +{ |
| + seqStore_t *seqStorePtr = &(ctx->seqStore); |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - 8; |
| + const BYTE *const base = ctx->base; |
| + const BYTE *const prefixStart = base + ctx->dictLimit; |
| + |
| + const U32 maxSearches = 1U << ctx->params.cParams.searchLog; |
| + const U32 sufficient_len = ctx->params.cParams.targetLength; |
| + const U32 mls = ctx->params.cParams.searchLength; |
| + const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; |
| + |
| + ZSTD_optimal_t *opt = seqStorePtr->priceTable; |
| + ZSTD_match_t *matches = seqStorePtr->matchTable; |
| + const BYTE *inr; |
| + U32 offset, rep[ZSTD_REP_NUM]; |
| + |
| + /* init */ |
| + ctx->nextToUpdate3 = ctx->nextToUpdate; |
| + ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize); |
| + ip += (ip == prefixStart); |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + rep[i] = ctx->rep[i]; |
| + } |
| + |
| + /* Match Loop */ |
| + while (ip < ilimit) { |
| + U32 cur, match_num, last_pos, litlen, price; |
| + U32 u, mlen, best_mlen, best_off, litLength; |
| + memset(opt, 0, sizeof(ZSTD_optimal_t)); |
| + last_pos = 0; |
| + litlen = (U32)(ip - anchor); |
| + |
| + /* check repCode */ |
| + { |
| + U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor); |
| + for (i = (ip == anchor); i < last_i; i++) { |
| + const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; |
| + if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) && |
| + (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) { |
| + mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch; |
| + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { |
| + best_mlen = mlen; |
| + best_off = i; |
| + cur = 0; |
| + last_pos = 1; |
| + goto _storeSequence; |
| + } |
| + best_off = i - (ip == anchor); |
| + do { |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); |
| + if (mlen > last_pos || price < opt[mlen].price) |
| + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ |
| + mlen--; |
| + } while (mlen >= minMatch); |
| + } |
| + } |
| + } |
| + |
| + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); |
| + |
| + if (!last_pos && !match_num) { |
| + ip++; |
| + continue; |
| + } |
| + |
| + if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) { |
| + best_mlen = matches[match_num - 1].len; |
| + best_off = matches[match_num - 1].off; |
| + cur = 0; |
| + last_pos = 1; |
| + goto _storeSequence; |
| + } |
| + |
| + /* set prices using matches at position = 0 */ |
| + best_mlen = (last_pos) ? last_pos : minMatch; |
| + for (u = 0; u < match_num; u++) { |
| + mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; |
| + best_mlen = matches[u].len; |
| + while (mlen <= best_mlen) { |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); |
| + if (mlen > last_pos || price < opt[mlen].price) |
| + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ |
| + mlen++; |
| + } |
| + } |
| + |
| + if (last_pos < minMatch) { |
| + ip++; |
| + continue; |
| + } |
| + |
| + /* initialize opt[0] */ |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + opt[0].rep[i] = rep[i]; |
| + } |
| + opt[0].mlen = 1; |
| + opt[0].litlen = litlen; |
| + |
| + /* check further positions */ |
| + for (cur = 1; cur <= last_pos; cur++) { |
| + inr = ip + cur; |
| + |
| + if (opt[cur - 1].mlen == 1) { |
| + litlen = opt[cur - 1].litlen + 1; |
| + if (cur > litlen) { |
| + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen); |
| + } else |
| + price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); |
| + } else { |
| + litlen = 1; |
| + price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1); |
| + } |
| + |
| + if (cur > last_pos || price <= opt[cur].price) |
| + SET_PRICE(cur, 1, 0, litlen, price); |
| + |
| + if (cur == last_pos) |
| + break; |
| + |
| + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ |
| + continue; |
| + |
| + mlen = opt[cur].mlen; |
| + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { |
| + opt[cur].rep[2] = opt[cur - mlen].rep[1]; |
| + opt[cur].rep[1] = opt[cur - mlen].rep[0]; |
| + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; |
| + } else { |
| + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2]; |
| + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1]; |
| + opt[cur].rep[0] = |
| + ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]); |
| + } |
| + |
| + best_mlen = minMatch; |
| + { |
| + U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); |
| + for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */ |
| + const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; |
| + if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) && |
| + (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) { |
| + mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch; |
| + |
| + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { |
| + best_mlen = mlen; |
| + best_off = i; |
| + last_pos = cur + 1; |
| + goto _storeSequence; |
| + } |
| + |
| + best_off = i - (opt[cur].mlen != 1); |
| + if (mlen > best_mlen) |
| + best_mlen = mlen; |
| + |
| + do { |
| + if (opt[cur].mlen == 1) { |
| + litlen = opt[cur].litlen; |
| + if (cur > litlen) { |
| + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen, |
| + best_off, mlen - MINMATCH, ultra); |
| + } else |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); |
| + } else { |
| + litlen = 0; |
| + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); |
| + } |
| + |
| + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) |
| + SET_PRICE(cur + mlen, mlen, i, litlen, price); |
| + mlen--; |
| + } while (mlen >= minMatch); |
| + } |
| + } |
| + } |
| + |
| + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); |
| + |
| + if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) { |
| + best_mlen = matches[match_num - 1].len; |
| + best_off = matches[match_num - 1].off; |
| + last_pos = cur + 1; |
| + goto _storeSequence; |
| + } |
| + |
| + /* set prices using matches at position = cur */ |
| + for (u = 0; u < match_num; u++) { |
| + mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; |
| + best_mlen = matches[u].len; |
| + |
| + while (mlen <= best_mlen) { |
| + if (opt[cur].mlen == 1) { |
| + litlen = opt[cur].litlen; |
| + if (cur > litlen) |
| + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen, |
| + matches[u].off - 1, mlen - MINMATCH, ultra); |
| + else |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); |
| + } else { |
| + litlen = 0; |
| + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra); |
| + } |
| + |
| + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) |
| + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); |
| + |
| + mlen++; |
| + } |
| + } |
| + } |
| + |
| + best_mlen = opt[last_pos].mlen; |
| + best_off = opt[last_pos].off; |
| + cur = last_pos - best_mlen; |
| + |
| + /* store sequence */ |
| +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ |
| + opt[0].mlen = 1; |
| + |
| + while (1) { |
| + mlen = opt[cur].mlen; |
| + offset = opt[cur].off; |
| + opt[cur].mlen = best_mlen; |
| + opt[cur].off = best_off; |
| + best_mlen = mlen; |
| + best_off = offset; |
| + if (mlen > cur) |
| + break; |
| + cur -= mlen; |
| + } |
| + |
| + for (u = 0; u <= last_pos;) { |
| + u += opt[u].mlen; |
| + } |
| + |
| + for (cur = 0; cur < last_pos;) { |
| + mlen = opt[cur].mlen; |
| + if (mlen == 1) { |
| + ip++; |
| + cur++; |
| + continue; |
| + } |
| + offset = opt[cur].off; |
| + cur += mlen; |
| + litLength = (U32)(ip - anchor); |
| + |
| + if (offset > ZSTD_REP_MOVE_OPT) { |
| + rep[2] = rep[1]; |
| + rep[1] = rep[0]; |
| + rep[0] = offset - ZSTD_REP_MOVE_OPT; |
| + offset--; |
| + } else { |
| + if (offset != 0) { |
| + best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); |
| + if (offset != 1) |
| + rep[2] = rep[1]; |
| + rep[1] = rep[0]; |
| + rep[0] = best_off; |
| + } |
| + if (litLength == 0) |
| + offset--; |
| + } |
| + |
| + ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); |
| + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); |
| + anchor = ip = ip + mlen; |
| + } |
| + } /* for (cur=0; cur < last_pos; ) */ |
| + |
| + /* Save reps for next block */ |
| + { |
| + int i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + ctx->repToConfirm[i] = rep[i]; |
| + } |
| + |
| + /* Last Literals */ |
| + { |
| + size_t const lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +FORCE_INLINE |
| +void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra) |
| +{ |
| + seqStore_t *seqStorePtr = &(ctx->seqStore); |
| + const BYTE *const istart = (const BYTE *)src; |
| + const BYTE *ip = istart; |
| + const BYTE *anchor = istart; |
| + const BYTE *const iend = istart + srcSize; |
| + const BYTE *const ilimit = iend - 8; |
| + const BYTE *const base = ctx->base; |
| + const U32 lowestIndex = ctx->lowLimit; |
| + const U32 dictLimit = ctx->dictLimit; |
| + const BYTE *const prefixStart = base + dictLimit; |
| + const BYTE *const dictBase = ctx->dictBase; |
| + const BYTE *const dictEnd = dictBase + dictLimit; |
| + |
| + const U32 maxSearches = 1U << ctx->params.cParams.searchLog; |
| + const U32 sufficient_len = ctx->params.cParams.targetLength; |
| + const U32 mls = ctx->params.cParams.searchLength; |
| + const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; |
| + |
| + ZSTD_optimal_t *opt = seqStorePtr->priceTable; |
| + ZSTD_match_t *matches = seqStorePtr->matchTable; |
| + const BYTE *inr; |
| + |
| + /* init */ |
| + U32 offset, rep[ZSTD_REP_NUM]; |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + rep[i] = ctx->rep[i]; |
| + } |
| + |
| + ctx->nextToUpdate3 = ctx->nextToUpdate; |
| + ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize); |
| + ip += (ip == prefixStart); |
| + |
| + /* Match Loop */ |
| + while (ip < ilimit) { |
| + U32 cur, match_num, last_pos, litlen, price; |
| + U32 u, mlen, best_mlen, best_off, litLength; |
| + U32 curr = (U32)(ip - base); |
| + memset(opt, 0, sizeof(ZSTD_optimal_t)); |
| + last_pos = 0; |
| + opt[0].litlen = (U32)(ip - anchor); |
| + |
| + /* check repCode */ |
| + { |
| + U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor); |
| + for (i = (ip == anchor); i < last_i; i++) { |
| + const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; |
| + const U32 repIndex = (U32)(curr - repCur); |
| + const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *const repMatch = repBase + repIndex; |
| + if ((repCur > 0 && repCur <= (S32)curr) && |
| + (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ |
| + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) { |
| + /* repcode detected we should take it */ |
| + const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; |
| + mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch; |
| + |
| + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { |
| + best_mlen = mlen; |
| + best_off = i; |
| + cur = 0; |
| + last_pos = 1; |
| + goto _storeSequence; |
| + } |
| + |
| + best_off = i - (ip == anchor); |
| + litlen = opt[0].litlen; |
| + do { |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); |
| + if (mlen > last_pos || price < opt[mlen].price) |
| + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ |
| + mlen--; |
| + } while (mlen >= minMatch); |
| + } |
| + } |
| + } |
| + |
| + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ |
| + |
| + if (!last_pos && !match_num) { |
| + ip++; |
| + continue; |
| + } |
| + |
| + { |
| + U32 i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + opt[0].rep[i] = rep[i]; |
| + } |
| + opt[0].mlen = 1; |
| + |
| + if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) { |
| + best_mlen = matches[match_num - 1].len; |
| + best_off = matches[match_num - 1].off; |
| + cur = 0; |
| + last_pos = 1; |
| + goto _storeSequence; |
| + } |
| + |
| + best_mlen = (last_pos) ? last_pos : minMatch; |
| + |
| + /* set prices using matches at position = 0 */ |
| + for (u = 0; u < match_num; u++) { |
| + mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; |
| + best_mlen = matches[u].len; |
| + litlen = opt[0].litlen; |
| + while (mlen <= best_mlen) { |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); |
| + if (mlen > last_pos || price < opt[mlen].price) |
| + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); |
| + mlen++; |
| + } |
| + } |
| + |
| + if (last_pos < minMatch) { |
| + ip++; |
| + continue; |
| + } |
| + |
| + /* check further positions */ |
| + for (cur = 1; cur <= last_pos; cur++) { |
| + inr = ip + cur; |
| + |
| + if (opt[cur - 1].mlen == 1) { |
| + litlen = opt[cur - 1].litlen + 1; |
| + if (cur > litlen) { |
| + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen); |
| + } else |
| + price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); |
| + } else { |
| + litlen = 1; |
| + price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1); |
| + } |
| + |
| + if (cur > last_pos || price <= opt[cur].price) |
| + SET_PRICE(cur, 1, 0, litlen, price); |
| + |
| + if (cur == last_pos) |
| + break; |
| + |
| + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ |
| + continue; |
| + |
| + mlen = opt[cur].mlen; |
| + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { |
| + opt[cur].rep[2] = opt[cur - mlen].rep[1]; |
| + opt[cur].rep[1] = opt[cur - mlen].rep[0]; |
| + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; |
| + } else { |
| + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2]; |
| + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1]; |
| + opt[cur].rep[0] = |
| + ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]); |
| + } |
| + |
| + best_mlen = minMatch; |
| + { |
| + U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); |
| + for (i = (mlen != 1); i < last_i; i++) { |
| + const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; |
| + const U32 repIndex = (U32)(curr + cur - repCur); |
| + const BYTE *const repBase = repIndex < dictLimit ? dictBase : base; |
| + const BYTE *const repMatch = repBase + repIndex; |
| + if ((repCur > 0 && repCur <= (S32)(curr + cur)) && |
| + (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ |
| + && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) { |
| + /* repcode detected */ |
| + const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend; |
| + mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch; |
| + |
| + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { |
| + best_mlen = mlen; |
| + best_off = i; |
| + last_pos = cur + 1; |
| + goto _storeSequence; |
| + } |
| + |
| + best_off = i - (opt[cur].mlen != 1); |
| + if (mlen > best_mlen) |
| + best_mlen = mlen; |
| + |
| + do { |
| + if (opt[cur].mlen == 1) { |
| + litlen = opt[cur].litlen; |
| + if (cur > litlen) { |
| + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen, |
| + best_off, mlen - MINMATCH, ultra); |
| + } else |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); |
| + } else { |
| + litlen = 0; |
| + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); |
| + } |
| + |
| + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) |
| + SET_PRICE(cur + mlen, mlen, i, litlen, price); |
| + mlen--; |
| + } while (mlen >= minMatch); |
| + } |
| + } |
| + } |
| + |
| + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); |
| + |
| + if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) { |
| + best_mlen = matches[match_num - 1].len; |
| + best_off = matches[match_num - 1].off; |
| + last_pos = cur + 1; |
| + goto _storeSequence; |
| + } |
| + |
| + /* set prices using matches at position = cur */ |
| + for (u = 0; u < match_num; u++) { |
| + mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen; |
| + best_mlen = matches[u].len; |
| + |
| + while (mlen <= best_mlen) { |
| + if (opt[cur].mlen == 1) { |
| + litlen = opt[cur].litlen; |
| + if (cur > litlen) |
| + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen, |
| + matches[u].off - 1, mlen - MINMATCH, ultra); |
| + else |
| + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra); |
| + } else { |
| + litlen = 0; |
| + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra); |
| + } |
| + |
| + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) |
| + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); |
| + |
| + mlen++; |
| + } |
| + } |
| + } /* for (cur = 1; cur <= last_pos; cur++) */ |
| + |
| + best_mlen = opt[last_pos].mlen; |
| + best_off = opt[last_pos].off; |
| + cur = last_pos - best_mlen; |
| + |
| + /* store sequence */ |
| +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ |
| + opt[0].mlen = 1; |
| + |
| + while (1) { |
| + mlen = opt[cur].mlen; |
| + offset = opt[cur].off; |
| + opt[cur].mlen = best_mlen; |
| + opt[cur].off = best_off; |
| + best_mlen = mlen; |
| + best_off = offset; |
| + if (mlen > cur) |
| + break; |
| + cur -= mlen; |
| + } |
| + |
| + for (u = 0; u <= last_pos;) { |
| + u += opt[u].mlen; |
| + } |
| + |
| + for (cur = 0; cur < last_pos;) { |
| + mlen = opt[cur].mlen; |
| + if (mlen == 1) { |
| + ip++; |
| + cur++; |
| + continue; |
| + } |
| + offset = opt[cur].off; |
| + cur += mlen; |
| + litLength = (U32)(ip - anchor); |
| + |
| + if (offset > ZSTD_REP_MOVE_OPT) { |
| + rep[2] = rep[1]; |
| + rep[1] = rep[0]; |
| + rep[0] = offset - ZSTD_REP_MOVE_OPT; |
| + offset--; |
| + } else { |
| + if (offset != 0) { |
| + best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); |
| + if (offset != 1) |
| + rep[2] = rep[1]; |
| + rep[1] = rep[0]; |
| + rep[0] = best_off; |
| + } |
| + |
| + if (litLength == 0) |
| + offset--; |
| + } |
| + |
| + ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); |
| + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH); |
| + anchor = ip = ip + mlen; |
| + } |
| + } /* for (cur=0; cur < last_pos; ) */ |
| + |
| + /* Save reps for next block */ |
| + { |
| + int i; |
| + for (i = 0; i < ZSTD_REP_NUM; i++) |
| + ctx->repToConfirm[i] = rep[i]; |
| + } |
| + |
| + /* Last Literals */ |
| + { |
| + size_t lastLLSize = iend - anchor; |
| + memcpy(seqStorePtr->lit, anchor, lastLLSize); |
| + seqStorePtr->lit += lastLLSize; |
| + } |
| +} |
| + |
| +#endif /* ZSTD_OPT_H_91842398743 */ |
| -- |
| 2.9.3 |