blob: 7a63254dea2bff3e2f83b6ba21626fb880ab087f [file] [log] [blame]
Yann Colletf3eca252015-10-22 15:31:46 +01001/*
2 ZSTD HC - High Compression Mode of Zstandard
3 Copyright (C) 2015, Yann Collet.
4
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - Zstd source repository : https://www.zstd.net
32*/
33
34
Yann Collet4b100f42015-10-30 15:49:48 +010035/* *******************************************************
36* Compiler specifics
37*********************************************************/
38#ifdef _MSC_VER /* Visual Studio */
39# define FORCE_INLINE static __forceinline
40# include <intrin.h> /* For Visual 2005 */
41# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
42# pragma warning(disable : 4324) /* disable: C4324: padded structure */
43#else
44# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
45# ifdef __GNUC__
46# define FORCE_INLINE static inline __attribute__((always_inline))
47# else
48# define FORCE_INLINE static inline
49# endif
50#endif
51
52
Yann Colletf3eca252015-10-22 15:31:46 +010053/* *************************************
54* Includes
55***************************************/
56#include <stdlib.h> /* malloc */
57#include <string.h> /* memset */
Yann Collet14983e72015-11-11 21:38:21 +010058#include "mem.h"
59#include "fse_static.h"
60#include "huff0.h"
Yann Colletf3eca252015-10-22 15:31:46 +010061#include "zstd_static.h"
Yann Collet3d9cf7a2015-10-29 17:15:14 +010062#include "zstd_internal.h"
Yann Colletf3eca252015-10-22 15:31:46 +010063
64
65/* *************************************
Yann Collet14983e72015-11-11 21:38:21 +010066* Constants
Yann Colletf3eca252015-10-22 15:31:46 +010067***************************************/
Yann Collet14983e72015-11-11 21:38:21 +010068static const U32 g_searchStrength = 8;
Yann Colletf3eca252015-10-22 15:31:46 +010069
Yann Colletf3eca252015-10-22 15:31:46 +010070
71/* *************************************
Yann Collet14983e72015-11-11 21:38:21 +010072* Sequence storage
Yann Colletf3eca252015-10-22 15:31:46 +010073***************************************/
Yann Collet14983e72015-11-11 21:38:21 +010074typedef struct {
75 void* buffer;
76 U32* offsetStart;
77 U32* offset;
78 BYTE* offCodeStart;
79 BYTE* offCode;
80 BYTE* litStart;
81 BYTE* lit;
82 BYTE* litLengthStart;
83 BYTE* litLength;
84 BYTE* matchLengthStart;
85 BYTE* matchLength;
86 BYTE* dumpsStart;
87 BYTE* dumps;
88} seqStore_t;
89
90static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
91{
92 ssPtr->offset = ssPtr->offsetStart;
93 ssPtr->lit = ssPtr->litStart;
94 ssPtr->litLength = ssPtr->litLengthStart;
95 ssPtr->matchLength = ssPtr->matchLengthStart;
96 ssPtr->dumps = ssPtr->dumpsStart;
97}
98
99
100/* *************************************
101* Context memory management
102***************************************/
Yann Colletf3eca252015-10-22 15:31:46 +0100103#define WORKPLACESIZE (BLOCKSIZE*3)
104
Yann Collet5be2dd22015-11-11 13:43:58 +0100105struct ZSTD_CCtx_s
Yann Colletf3eca252015-10-22 15:31:46 +0100106{
Yann Collet89db5e02015-11-13 11:27:46 +0100107 const BYTE* nextSrc; /* next block here to continue on current prefix */
Yann Colleteeb8ba12015-10-22 16:55:40 +0100108 const BYTE* base; /* All regular indexes relative to this position */
109 const BYTE* dictBase; /* extDict indexes relative to this position */
Yann Colletf3eca252015-10-22 15:31:46 +0100110 U32 dictLimit; /* below that point, need extDict */
Yann Colleteeb8ba12015-10-22 16:55:40 +0100111 U32 lowLimit; /* below that point, no more data */
Yann Colletf3eca252015-10-22 15:31:46 +0100112 U32 nextToUpdate; /* index from which to continue dictionary update */
Yann Collet5be2dd22015-11-11 13:43:58 +0100113 ZSTD_parameters params;
Yann Collet712def92015-10-29 18:41:45 +0100114 void* workSpace;
115 size_t workSpaceSize;
116
117 seqStore_t seqStore; /* sequences storage ptrs */
Yann Collet083fcc82015-10-25 14:06:35 +0100118 U32* hashTable;
Yann Collet1f44b3f2015-11-05 17:32:18 +0100119 U32* contentTable;
Yann Colletf3eca252015-10-22 15:31:46 +0100120};
121
122
Yann Collet5be2dd22015-11-11 13:43:58 +0100123ZSTD_CCtx* ZSTD_createCCtx(void)
Yann Colletf3eca252015-10-22 15:31:46 +0100124{
Yann Collet5be2dd22015-11-11 13:43:58 +0100125 return (ZSTD_CCtx*) calloc(1, sizeof(ZSTD_CCtx));
Yann Colletf3eca252015-10-22 15:31:46 +0100126}
127
Yann Collet5be2dd22015-11-11 13:43:58 +0100128size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
Yann Colletf3eca252015-10-22 15:31:46 +0100129{
Yann Collet712def92015-10-29 18:41:45 +0100130 free(cctx->workSpace);
Yann Collet083fcc82015-10-25 14:06:35 +0100131 free(cctx);
132 return 0;
133}
134
Yann Collet59d70632015-11-04 12:05:27 +0100135
Yann Collet14983e72015-11-11 21:38:21 +0100136static unsigned ZSTD_highbit(U32 val);
137
Yann Collet5be2dd22015-11-11 13:43:58 +0100138/** ZSTD_validateParams
Yann Collet59d70632015-11-04 12:05:27 +0100139 correct params value to remain within authorized range
140 optimize for srcSize if srcSize > 0 */
Yann Collet5be2dd22015-11-11 13:43:58 +0100141void ZSTD_validateParams(ZSTD_parameters* params, U64 srcSizeHint)
Yann Collet59d70632015-11-04 12:05:27 +0100142{
Yann Collet5be2dd22015-11-11 13:43:58 +0100143 const U32 btPlus = (params->strategy == ZSTD_btlazy2);
Yann Collet59d70632015-11-04 12:05:27 +0100144
145 /* validate params */
Yann Collet5be2dd22015-11-11 13:43:58 +0100146 if (params->windowLog > ZSTD_WINDOWLOG_MAX) params->windowLog = ZSTD_WINDOWLOG_MAX;
147 if (params->windowLog < ZSTD_WINDOWLOG_MIN) params->windowLog = ZSTD_WINDOWLOG_MIN;
Yann Collet59d70632015-11-04 12:05:27 +0100148
149 /* correct params, to use less memory */
Yann Collet5be2dd22015-11-11 13:43:58 +0100150 if ((srcSizeHint > 0) && (srcSizeHint < (1<<ZSTD_WINDOWLOG_MAX)))
Yann Collet59d70632015-11-04 12:05:27 +0100151 {
Yann Collet9f432922015-11-09 17:42:17 +0100152 U32 srcLog = ZSTD_highbit((U32)srcSizeHint-1) + 1;
Yann Collet59d70632015-11-04 12:05:27 +0100153 if (params->windowLog > srcLog) params->windowLog = srcLog;
154 }
155
Yann Collet5be2dd22015-11-11 13:43:58 +0100156 if (params->contentLog > params->windowLog+btPlus) params->contentLog = params->windowLog+btPlus; /* <= ZSTD_CONTENTLOG_MAX */
157 if (params->contentLog < ZSTD_CONTENTLOG_MIN) params->contentLog = ZSTD_CONTENTLOG_MIN;
158 if (params->hashLog > ZSTD_HASHLOG_MAX) params->hashLog = ZSTD_HASHLOG_MAX;
159 if (params->hashLog < ZSTD_HASHLOG_MIN) params->hashLog = ZSTD_HASHLOG_MIN;
160 if (params->searchLog > ZSTD_SEARCHLOG_MAX) params->searchLog = ZSTD_SEARCHLOG_MAX;
161 if (params->searchLog < ZSTD_SEARCHLOG_MIN) params->searchLog = ZSTD_SEARCHLOG_MIN;
162 if (params->searchLength> ZSTD_SEARCHLENGTH_MAX) params->searchLength = ZSTD_SEARCHLENGTH_MAX;
163 if (params->searchLength< ZSTD_SEARCHLENGTH_MIN) params->searchLength = ZSTD_SEARCHLENGTH_MIN;
164 if ((U32)params->strategy>(U32)ZSTD_btlazy2) params->strategy = ZSTD_btlazy2;
Yann Collet59d70632015-11-04 12:05:27 +0100165}
166
167
Yann Collet5be2dd22015-11-11 13:43:58 +0100168static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
Yann Collet89db5e02015-11-13 11:27:46 +0100169 ZSTD_parameters params,
170 U64 srcSizeHint)
Yann Collet083fcc82015-10-25 14:06:35 +0100171{
Yann Collet5be2dd22015-11-11 13:43:58 +0100172 ZSTD_validateParams(&params, srcSizeHint);
Yann Collet083fcc82015-10-25 14:06:35 +0100173
Yann Collet2acb5d32015-10-29 16:49:43 +0100174 /* reserve table memory */
Yann Collet083fcc82015-10-25 14:06:35 +0100175 {
Yann Collet743402c2015-11-20 12:03:53 +0100176 const U32 contentLog = (params.strategy == ZSTD_fast) ? 1 : params.contentLog;
Yann Collet1f44b3f2015-11-05 17:32:18 +0100177 const size_t tableSpace = ((1 << contentLog) + (1 << params.hashLog)) * sizeof(U32);
Yann Collet712def92015-10-29 18:41:45 +0100178 const size_t neededSpace = tableSpace + WORKPLACESIZE;
179 if (zc->workSpaceSize < neededSpace)
Yann Collet2acb5d32015-10-29 16:49:43 +0100180 {
Yann Collet712def92015-10-29 18:41:45 +0100181 free(zc->workSpace);
182 zc->workSpaceSize = neededSpace;
183 zc->workSpace = malloc(neededSpace);
Yann Collet4114f952015-10-30 06:40:22 +0100184 if (zc->workSpace == NULL) return ERROR(memory_allocation);
Yann Collet2acb5d32015-10-29 16:49:43 +0100185 }
Yann Collet805a52a2015-11-06 10:52:17 +0100186 memset(zc->workSpace, 0, tableSpace );
187 zc->hashTable = (U32*)(zc->workSpace);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100188 zc->contentTable = zc->hashTable + ((size_t)1 << params.hashLog);
189 zc->seqStore.buffer = (void*) (zc->contentTable + ((size_t)1 << contentLog));
Yann Collet083fcc82015-10-25 14:06:35 +0100190 }
Yann Collet083fcc82015-10-25 14:06:35 +0100191
Yann Colletf48e35c2015-11-07 01:13:31 +0100192 zc->nextToUpdate = 1;
Yann Collet89db5e02015-11-13 11:27:46 +0100193 zc->nextSrc = NULL;
Yann Collet2acb5d32015-10-29 16:49:43 +0100194 zc->base = NULL;
195 zc->dictBase = NULL;
196 zc->dictLimit = 0;
197 zc->lowLimit = 0;
Yann Collet083fcc82015-10-25 14:06:35 +0100198 zc->params = params;
Yann Colletf3eca252015-10-22 15:31:46 +0100199 zc->seqStore.offsetStart = (U32*) (zc->seqStore.buffer);
200 zc->seqStore.offCodeStart = (BYTE*) (zc->seqStore.offsetStart + (BLOCKSIZE>>2));
201 zc->seqStore.litStart = zc->seqStore.offCodeStart + (BLOCKSIZE>>2);
202 zc->seqStore.litLengthStart = zc->seqStore.litStart + BLOCKSIZE;
203 zc->seqStore.matchLengthStart = zc->seqStore.litLengthStart + (BLOCKSIZE>>2);
204 zc->seqStore.dumpsStart = zc->seqStore.matchLengthStart + (BLOCKSIZE>>2);
Yann Collet2acb5d32015-10-29 16:49:43 +0100205
Yann Collet4114f952015-10-30 06:40:22 +0100206 return 0;
Yann Colletf3eca252015-10-22 15:31:46 +0100207}
208
Yann Collet083fcc82015-10-25 14:06:35 +0100209
Yann Collet89db5e02015-11-13 11:27:46 +0100210static void ZSTD_reduceIndex (ZSTD_CCtx* zc,
211 const U32 reducerValue)
212{
213 const U32 contentLog = zc->params.strategy == ZSTD_fast ? 1 : zc->params.contentLog;
214 const U32 tableSpaceU32 = (1 << contentLog) + (1 << zc->params.hashLog);
215 U32* table32 = zc->hashTable;
216 U32 index;
217
218 for (index=0 ; index < tableSpaceU32 ; index++)
219 {
220 if (table32[index] < reducerValue) table32[index] = 0;
221 else table32[index] -= reducerValue;
222 }
223}
224
225
Yann Collet14983e72015-11-11 21:38:21 +0100226/* *******************************************************
227* Block entropic compression
228*********************************************************/
229size_t ZSTD_compressBound(size_t srcSize) /* maximum compressed size */
230{
231 return FSE_compressBound(srcSize) + 12;
232}
233
234
235size_t ZSTD_noCompressBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
236{
237 BYTE* const ostart = (BYTE* const)dst;
238
239 if (srcSize + ZSTD_blockHeaderSize > maxDstSize) return ERROR(dstSize_tooSmall);
240 memcpy(ostart + ZSTD_blockHeaderSize, src, srcSize);
241
242 /* Build header */
243 ostart[0] = (BYTE)(srcSize>>16);
244 ostart[1] = (BYTE)(srcSize>>8);
245 ostart[2] = (BYTE) srcSize;
246 ostart[0] += (BYTE)(bt_raw<<6); /* is a raw (uncompressed) block */
247
248 return ZSTD_blockHeaderSize+srcSize;
249}
250
251
252static size_t ZSTD_noCompressLiterals (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
253{
254 BYTE* const ostart = (BYTE* const)dst;
255
256 if (srcSize + 3 > maxDstSize) return ERROR(dstSize_tooSmall);
257
258 MEM_writeLE32(dst, ((U32)srcSize << 2) | IS_RAW);
259 memcpy(ostart + 3, src, srcSize);
260 return srcSize + 3;
261}
262
263static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
264{
265 BYTE* const ostart = (BYTE* const)dst;
266
267 (void)maxDstSize;
268 MEM_writeLE32(dst, ((U32)srcSize << 2) | IS_RLE); /* note : maxDstSize > litHeaderSize > 4 */
269 ostart[3] = *(const BYTE*)src;
270 return 4;
271}
272
273size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 1; }
274
275static size_t ZSTD_compressLiterals (void* dst, size_t maxDstSize,
276 const void* src, size_t srcSize)
277{
278 const size_t minGain = ZSTD_minGain(srcSize);
279 BYTE* const ostart = (BYTE*)dst;
280 size_t hsize;
281 static const size_t litHeaderSize = 5;
282
283 if (maxDstSize < litHeaderSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
284
285 hsize = HUF_compress(ostart+litHeaderSize, maxDstSize-litHeaderSize, src, srcSize);
286
287 if ((hsize==0) || (hsize >= srcSize - minGain)) return ZSTD_noCompressLiterals(dst, maxDstSize, src, srcSize);
288 if (hsize==1) return ZSTD_compressRleLiteralsBlock(dst, maxDstSize, src, srcSize);
289
290 /* Build header */
291 {
292 ostart[0] = (BYTE)(srcSize << 2); /* is a block, is compressed */
293 ostart[1] = (BYTE)(srcSize >> 6);
294 ostart[2] = (BYTE)(srcSize >>14);
295 ostart[2] += (BYTE)(hsize << 5);
296 ostart[3] = (BYTE)(hsize >> 3);
297 ostart[4] = (BYTE)(hsize >>11);
298 }
299
300 return hsize+litHeaderSize;
301}
302
303
304#define LITERAL_NOENTROPY 63 /* cheap heuristic */
305
306size_t ZSTD_compressSequences(BYTE* dst, size_t maxDstSize,
307 const seqStore_t* seqStorePtr,
308 size_t srcSize)
309{
310 U32 count[MaxSeq+1];
311 S16 norm[MaxSeq+1];
312 size_t mostFrequent;
313 U32 max = 255;
314 U32 tableLog = 11;
315 U32 CTable_LitLength [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL )];
316 U32 CTable_OffsetBits [FSE_CTABLE_SIZE_U32(OffFSELog,MaxOff)];
317 U32 CTable_MatchLength[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML )];
318 U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
319 const BYTE* const op_lit_start = seqStorePtr->litStart;
320 const BYTE* const llTable = seqStorePtr->litLengthStart;
321 const BYTE* const llPtr = seqStorePtr->litLength;
322 const BYTE* const mlTable = seqStorePtr->matchLengthStart;
323 const U32* const offsetTable = seqStorePtr->offsetStart;
324 BYTE* const offCodeTable = seqStorePtr->offCodeStart;
325 BYTE* op = dst;
326 BYTE* const oend = dst + maxDstSize;
327 const size_t nbSeq = llPtr - llTable;
328 const size_t minGain = ZSTD_minGain(srcSize);
329 const size_t maxCSize = srcSize - minGain;
330 BYTE* seqHead;
331
332
333 /* Compress literals */
334 {
335 size_t cSize;
336 size_t litSize = seqStorePtr->lit - op_lit_start;
337
338 if (litSize <= LITERAL_NOENTROPY)
339 cSize = ZSTD_noCompressLiterals(op, maxDstSize, op_lit_start, litSize);
340 else
341 cSize = ZSTD_compressLiterals(op, maxDstSize, op_lit_start, litSize);
342 if (ZSTD_isError(cSize)) return cSize;
343 op += cSize;
344 }
345
346 /* Sequences Header */
347 if ((oend-op) < MIN_SEQUENCES_SIZE)
348 return ERROR(dstSize_tooSmall);
349 MEM_writeLE16(op, (U16)nbSeq); op+=2;
350 seqHead = op;
351
352 /* dumps : contains too large lengths */
353 {
354 size_t dumpsLength = seqStorePtr->dumps - seqStorePtr->dumpsStart;
355 if (dumpsLength < 512)
356 {
357 op[0] = (BYTE)(dumpsLength >> 8);
358 op[1] = (BYTE)(dumpsLength);
359 op += 2;
360 }
361 else
362 {
363 op[0] = 2;
364 op[1] = (BYTE)(dumpsLength>>8);
365 op[2] = (BYTE)(dumpsLength);
366 op += 3;
367 }
368 if ((size_t)(oend-op) < dumpsLength+6) return ERROR(dstSize_tooSmall);
369 memcpy(op, seqStorePtr->dumpsStart, dumpsLength);
370 op += dumpsLength;
371 }
372
373 /* CTable for Literal Lengths */
374 max = MaxLL;
375 mostFrequent = FSE_countFast(count, &max, seqStorePtr->litLengthStart, nbSeq);
376 if ((mostFrequent == nbSeq) && (nbSeq > 2))
377 {
378 *op++ = *(seqStorePtr->litLengthStart);
379 FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
380 LLtype = bt_rle;
381 }
382 else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (LLbits-1))))
383 {
384 FSE_buildCTable_raw(CTable_LitLength, LLbits);
385 LLtype = bt_raw;
386 }
387 else
388 {
389 size_t NCountSize;
390 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
391 FSE_normalizeCount(norm, tableLog, count, nbSeq, max);
392 NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
393 if (FSE_isError(NCountSize)) return ERROR(GENERIC);
394 op += NCountSize;
395 FSE_buildCTable(CTable_LitLength, norm, max, tableLog);
396 LLtype = bt_compressed;
397 }
398
399 /* CTable for Offsets codes */
400 {
401 /* create Offset codes */
402 size_t i;
403 max = MaxOff;
404 for (i=0; i<nbSeq; i++)
405 {
406 offCodeTable[i] = (BYTE)ZSTD_highbit(offsetTable[i]) + 1;
407 if (offsetTable[i]==0) offCodeTable[i]=0;
408 }
409 mostFrequent = FSE_countFast(count, &max, offCodeTable, nbSeq);
410 }
411 if ((mostFrequent == nbSeq) && (nbSeq > 2))
412 {
413 *op++ = *offCodeTable;
414 FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
415 Offtype = bt_rle;
416 }
417 else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (Offbits-1))))
418 {
419 FSE_buildCTable_raw(CTable_OffsetBits, Offbits);
420 Offtype = bt_raw;
421 }
422 else
423 {
424 size_t NCountSize;
425 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
426 FSE_normalizeCount(norm, tableLog, count, nbSeq, max);
427 NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
428 if (FSE_isError(NCountSize)) return ERROR(GENERIC);
429 op += NCountSize;
430 FSE_buildCTable(CTable_OffsetBits, norm, max, tableLog);
431 Offtype = bt_compressed;
432 }
433
434 /* CTable for MatchLengths */
435 max = MaxML;
436 mostFrequent = FSE_countFast(count, &max, seqStorePtr->matchLengthStart, nbSeq);
437 if ((mostFrequent == nbSeq) && (nbSeq > 2))
438 {
439 *op++ = *seqStorePtr->matchLengthStart;
440 FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
441 MLtype = bt_rle;
442 }
443 else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (MLbits-1))))
444 {
445 FSE_buildCTable_raw(CTable_MatchLength, MLbits);
446 MLtype = bt_raw;
447 }
448 else
449 {
450 size_t NCountSize;
451 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
452 FSE_normalizeCount(norm, tableLog, count, nbSeq, max);
453 NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
454 if (FSE_isError(NCountSize)) return ERROR(GENERIC);
455 op += NCountSize;
456 FSE_buildCTable(CTable_MatchLength, norm, max, tableLog);
457 MLtype = bt_compressed;
458 }
459
460 seqHead[0] += (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
461
462 /* Encoding Sequences */
463 {
464 size_t streamSize, errorCode;
465 BIT_CStream_t blockStream;
466 FSE_CState_t stateMatchLength;
467 FSE_CState_t stateOffsetBits;
468 FSE_CState_t stateLitLength;
469 int i;
470
471 errorCode = BIT_initCStream(&blockStream, op, oend-op);
472 if (ERR_isError(errorCode)) return ERROR(dstSize_tooSmall); /* not enough space remaining */
473 FSE_initCState(&stateMatchLength, CTable_MatchLength);
474 FSE_initCState(&stateOffsetBits, CTable_OffsetBits);
475 FSE_initCState(&stateLitLength, CTable_LitLength);
476
477 for (i=(int)nbSeq-1; i>=0; i--)
478 {
479 BYTE matchLength = mlTable[i];
480 U32 offset = offsetTable[i];
481 BYTE offCode = offCodeTable[i]; /* 32b*/ /* 64b*/
482 U32 nbBits = (offCode-1) * (!!offCode);
483 BYTE litLength = llTable[i]; /* (7)*/ /* (7)*/
484 FSE_encodeSymbol(&blockStream, &stateMatchLength, matchLength); /* 17 */ /* 17 */
Yann Collet743402c2015-11-20 12:03:53 +0100485 if (MEM_32bits()) BIT_flushBits(&blockStream); /* 7 */
Yann Collet14983e72015-11-11 21:38:21 +0100486 BIT_addBits(&blockStream, offset, nbBits); /* 32 */ /* 42 */
Yann Collet743402c2015-11-20 12:03:53 +0100487 if (MEM_32bits()) BIT_flushBits(&blockStream); /* 7 */
Yann Collet14983e72015-11-11 21:38:21 +0100488 FSE_encodeSymbol(&blockStream, &stateOffsetBits, offCode); /* 16 */ /* 51 */
489 FSE_encodeSymbol(&blockStream, &stateLitLength, litLength); /* 26 */ /* 61 */
490 BIT_flushBits(&blockStream); /* 7 */ /* 7 */
491 }
492
493 FSE_flushCState(&blockStream, &stateMatchLength);
494 FSE_flushCState(&blockStream, &stateOffsetBits);
495 FSE_flushCState(&blockStream, &stateLitLength);
496
497 streamSize = BIT_closeCStream(&blockStream);
498 if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
499 op += streamSize;
500 }
501
502 /* check compressibility */
503 if ((size_t)(op-dst) >= maxCSize) return 0;
504
505 return op - dst;
506}
507
508
509/** ZSTD_storeSeq
510 Store a sequence (literal length, literals, offset code and match length) into seqStore_t
511 @offsetCode : distance to match, or 0 == repCode
512 @matchCode : matchLength - MINMATCH
513*/
514MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, size_t offsetCode, size_t matchCode)
515{
516#if 0
517 static const BYTE* g_start = NULL;
518 if (g_start==NULL) g_start = literals;
519 if (literals - g_start == 8695)
520 printf("pos %6u : %3u literals & match %3u bytes at distance %6u \n",
521 (U32)(literals - g_start), (U32)litLength, (U32)matchCode+4, (U32)offsetCode);
522#endif
523
524 /* copy Literals */
525 ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
526 seqStorePtr->lit += litLength;
527
528 /* literal Length */
529 if (litLength >= MaxLL)
530 {
531 *(seqStorePtr->litLength++) = MaxLL;
532 if (litLength<255 + MaxLL)
533 *(seqStorePtr->dumps++) = (BYTE)(litLength - MaxLL);
534 else
535 {
536 *(seqStorePtr->dumps++) = 255;
537 MEM_writeLE32(seqStorePtr->dumps, (U32)litLength); seqStorePtr->dumps += 3;
538 }
539 }
540 else *(seqStorePtr->litLength++) = (BYTE)litLength;
541
542 /* match offset */
543 *(seqStorePtr->offset++) = (U32)offsetCode;
544
545 /* match Length */
546 if (matchCode >= MaxML)
547 {
548 *(seqStorePtr->matchLength++) = MaxML;
549 if (matchCode < 255+MaxML)
550 *(seqStorePtr->dumps++) = (BYTE)(matchCode - MaxML);
551 else
552 {
553 *(seqStorePtr->dumps++) = 255;
554 MEM_writeLE32(seqStorePtr->dumps, (U32)matchCode); seqStorePtr->dumps += 3;
555 }
556 }
557 else *(seqStorePtr->matchLength++) = (BYTE)matchCode;
558}
559
560
Yann Colletf3eca252015-10-22 15:31:46 +0100561/* *************************************
Yann Collet14983e72015-11-11 21:38:21 +0100562* Match length counter
563***************************************/
564static size_t ZSTD_read_ARCH(const void* p) { size_t r; memcpy(&r, p, sizeof(r)); return r; }
565
566static unsigned ZSTD_highbit(U32 val)
567{
568# if defined(_MSC_VER) /* Visual */
569 unsigned long r=0;
570 _BitScanReverse(&r, val);
571 return (unsigned)r;
572# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
573 return 31 - __builtin_clz(val);
574# else /* Software version */
575 static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
576 U32 v = val;
577 int r;
578 v |= v >> 1;
579 v |= v >> 2;
580 v |= v >> 4;
581 v |= v >> 8;
582 v |= v >> 16;
583 r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27];
584 return r;
585# endif
586}
587
588MEM_STATIC unsigned ZSTD_NbCommonBytes (register size_t val)
589{
590 if (MEM_isLittleEndian())
591 {
592 if (MEM_64bits())
593 {
594# if defined(_MSC_VER) && defined(_WIN64)
595 unsigned long r = 0;
596 _BitScanForward64( &r, (U64)val );
597 return (int)(r>>3);
598# elif defined(__GNUC__) && (__GNUC__ >= 3)
599 return (__builtin_ctzll((U64)val) >> 3);
600# else
601 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
602 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
603# endif
604 }
605 else /* 32 bits */
606 {
607# if defined(_MSC_VER)
608 unsigned long r=0;
609 _BitScanForward( &r, (U32)val );
610 return (int)(r>>3);
611# elif defined(__GNUC__) && (__GNUC__ >= 3)
612 return (__builtin_ctz((U32)val) >> 3);
613# else
614 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
615 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
616# endif
617 }
618 }
619 else /* Big Endian CPU */
620 {
621 if (MEM_32bits())
622 {
623# if defined(_MSC_VER) && defined(_WIN64)
624 unsigned long r = 0;
625 _BitScanReverse64( &r, val );
626 return (unsigned)(r>>3);
627# elif defined(__GNUC__) && (__GNUC__ >= 3)
628 return (__builtin_clzll(val) >> 3);
629# else
630 unsigned r;
631 const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
632 if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
633 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
634 r += (!val);
635 return r;
636# endif
637 }
638 else /* 32 bits */
639 {
640# if defined(_MSC_VER)
641 unsigned long r = 0;
642 _BitScanReverse( &r, (unsigned long)val );
643 return (unsigned)(r>>3);
644# elif defined(__GNUC__) && (__GNUC__ >= 3)
645 return (__builtin_clz((U32)val) >> 3);
646# else
647 unsigned r;
648 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
649 r += (!val);
650 return r;
651# endif
652 }
653 }
654}
655
656
657MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
658{
659 const BYTE* const pStart = pIn;
660
661 while ((pIn<pInLimit-(sizeof(size_t)-1)))
662 {
663 size_t diff = ZSTD_read_ARCH(pMatch) ^ ZSTD_read_ARCH(pIn);
664 if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
665 pIn += ZSTD_NbCommonBytes(diff);
666 return (size_t)(pIn - pStart);
667 }
668
669 if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
670 if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
671 if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
672 return (size_t)(pIn - pStart);
673}
674
675
676
677/* *************************************
678* Hashes
Yann Colletf3eca252015-10-22 15:31:46 +0100679***************************************/
Yann Collet2acb5d32015-10-29 16:49:43 +0100680
Yann Collet4b100f42015-10-30 15:49:48 +0100681static const U32 prime4bytes = 2654435761U;
Yann Collet5be2dd22015-11-11 13:43:58 +0100682static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
683static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
Yann Collet2acb5d32015-10-29 16:49:43 +0100684
Yann Collet4b100f42015-10-30 15:49:48 +0100685static const U64 prime5bytes = 889523592379ULL;
Yann Collet5be2dd22015-11-11 13:43:58 +0100686static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)((u * prime5bytes) << (64-40) >> (64-h)) ; }
687static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_read64(p), h); }
Yann Collet4b100f42015-10-30 15:49:48 +0100688
689static const U64 prime6bytes = 227718039650203ULL;
Yann Collet5be2dd22015-11-11 13:43:58 +0100690static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)((u * prime6bytes) << (64-48) >> (64-h)) ; }
691static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_read64(p), h); }
Yann Collet4b100f42015-10-30 15:49:48 +0100692
Yann Collet14983e72015-11-11 21:38:21 +0100693static const U64 prime7bytes = 58295818150454627ULL;
Yann Collet5be2dd22015-11-11 13:43:58 +0100694static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)((u * prime7bytes) << (64-56) >> (64-h)) ; }
695static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_read64(p), h); }
Yann Collet1f44b3f2015-11-05 17:32:18 +0100696
Yann Collet5be2dd22015-11-11 13:43:58 +0100697static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
Yann Collet4b100f42015-10-30 15:49:48 +0100698{
699 switch(mls)
700 {
701 default:
Yann Collet5be2dd22015-11-11 13:43:58 +0100702 case 4: return ZSTD_hash4Ptr(p, hBits);
703 case 5: return ZSTD_hash5Ptr(p, hBits);
704 case 6: return ZSTD_hash6Ptr(p, hBits);
705 case 7: return ZSTD_hash7Ptr(p, hBits);
Yann Collet4b100f42015-10-30 15:49:48 +0100706 }
707}
Yann Collet2acb5d32015-10-29 16:49:43 +0100708
Yann Collet1f44b3f2015-11-05 17:32:18 +0100709/* *************************************
710* Fast Scan
711***************************************/
712
713FORCE_INLINE
Yann Collet5be2dd22015-11-11 13:43:58 +0100714size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* ctx,
Yann Collet89db5e02015-11-13 11:27:46 +0100715 void* dst, size_t maxDstSize,
716 const void* src, size_t srcSize,
717 const U32 mls)
Yann Collet1f44b3f2015-11-05 17:32:18 +0100718{
719 U32* hashTable = ctx->hashTable;
720 const U32 hBits = ctx->params.hashLog;
721 seqStore_t* seqStorePtr = &(ctx->seqStore);
722 const BYTE* const base = ctx->base;
Yann Collet1f44b3f2015-11-05 17:32:18 +0100723 const BYTE* const istart = (const BYTE*)src;
Yann Collet805a52a2015-11-06 10:52:17 +0100724 const BYTE* ip = istart;
Yann Collet1f44b3f2015-11-05 17:32:18 +0100725 const BYTE* anchor = istart;
Yann Collet89db5e02015-11-13 11:27:46 +0100726 const BYTE* const lowest = base + ctx->lowLimit;
Yann Collet1f44b3f2015-11-05 17:32:18 +0100727 const BYTE* const iend = istart + srcSize;
728 const BYTE* const ilimit = iend - 8;
729
Yann Collet89db5e02015-11-13 11:27:46 +0100730 size_t offset_2=REPCODE_STARTVALUE, offset_1=REPCODE_STARTVALUE;
Yann Collet1f44b3f2015-11-05 17:32:18 +0100731
732
733 /* init */
Yann Collet743402c2015-11-20 12:03:53 +0100734 ZSTD_resetSeqStore(seqStorePtr);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100735 if (ip == base)
736 {
Yann Collet5be2dd22015-11-11 13:43:58 +0100737 hashTable[ZSTD_hashPtr(base+1, hBits, mls)] = 1;
738 hashTable[ZSTD_hashPtr(base+2, hBits, mls)] = 2;
739 hashTable[ZSTD_hashPtr(base+3, hBits, mls)] = 3;
Yann Collet1f44b3f2015-11-05 17:32:18 +0100740 ip = base+4;
741 }
Yann Collet1f44b3f2015-11-05 17:32:18 +0100742
743 /* Main Search Loop */
Yann Collet743402c2015-11-20 12:03:53 +0100744 while (ip < ilimit) /* < instead of <=, because repcode check at (ip+1) */
Yann Collet1f44b3f2015-11-05 17:32:18 +0100745 {
Yann Collet743402c2015-11-20 12:03:53 +0100746 size_t matchLength;
747 size_t offset;
Yann Collet5be2dd22015-11-11 13:43:58 +0100748 const size_t h = ZSTD_hashPtr(ip, hBits, mls);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100749 const BYTE* match = base + hashTable[h];
750 hashTable[h] = (U32)(ip-base);
751
Yann Collet743402c2015-11-20 12:03:53 +0100752 if (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))
Yann Collet1f44b3f2015-11-05 17:32:18 +0100753 {
Yann Collet402fdcf2015-11-20 12:46:08 +0100754 matchLength = ZSTD_count(ip+1+MINMATCH, ip+1+MINMATCH-offset_1, iend);
755 ip++;
756 offset = 0;
757 }
758 else
759 {
760 if ( (match < lowest) ||
761 (MEM_read32(match) != MEM_read32(ip)) )
762 {
763 ip += ((ip-anchor) >> g_searchStrength) + 1;
764 continue;
765 }
766 matchLength = ZSTD_count(ip+MINMATCH, match+MINMATCH, iend);
767 while ((ip>anchor) && (match>lowest) && (ip[-1] == match[-1])) { ip--; match--; matchLength++; } /* catch up */
768 offset = ip-match;
769 offset_2 = offset_1;
770 offset_1 = offset;
771 }
Yann Collet1f44b3f2015-11-05 17:32:18 +0100772
Yann Collet402fdcf2015-11-20 12:46:08 +0100773 /* match found */
774 ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset, matchLength);
775 ip += matchLength + MINMATCH;
776 anchor = ip;
777
778 if (ip <= ilimit)
779 {
780 /* Fill Table */
781 hashTable[ZSTD_hashPtr(ip-2-matchLength, hBits, mls)] = (U32)(ip-2-matchLength-base);
782 hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
783 /* check immediate repcode */
784 while ( (ip <= ilimit)
785 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) )
786 {
787 /* store sequence */
788 size_t ml = ZSTD_count(ip+MINMATCH, ip+MINMATCH-offset_2, iend);
789 size_t tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
790 hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
791 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, ml);
792 ip += ml+MINMATCH;
793 anchor = ip;
794 continue; /* faster when present ... (?) */
795 }
796 }
Yann Collet1f44b3f2015-11-05 17:32:18 +0100797 }
798
799 /* Last Literals */
800 {
801 size_t lastLLSize = iend - anchor;
802 memcpy(seqStorePtr->lit, anchor, lastLLSize);
803 seqStorePtr->lit += lastLLSize;
804 }
805
806 /* Finale compression stage */
807 return ZSTD_compressSequences((BYTE*)dst, maxDstSize,
808 seqStorePtr, srcSize);
809}
810
811
Yann Collet5be2dd22015-11-11 13:43:58 +0100812size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
Yann Collet1f44b3f2015-11-05 17:32:18 +0100813 void* dst, size_t maxDstSize,
814 const void* src, size_t srcSize)
815{
816 const U32 mls = ctx->params.searchLength;
817 switch(mls)
818 {
819 default:
820 case 4 :
Yann Collet5be2dd22015-11-11 13:43:58 +0100821 return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 4);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100822 case 5 :
Yann Collet5be2dd22015-11-11 13:43:58 +0100823 return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 5);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100824 case 6 :
Yann Collet5be2dd22015-11-11 13:43:58 +0100825 return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 6);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100826 case 7 :
Yann Collet5be2dd22015-11-11 13:43:58 +0100827 return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 7);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100828 }
829}
Yann Colletf3eca252015-10-22 15:31:46 +0100830
Yann Colletf3eca252015-10-22 15:31:46 +0100831
Yann Collet93a823c2015-11-13 15:08:43 +0100832//FORCE_INLINE
Yann Collet89db5e02015-11-13 11:27:46 +0100833size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
834 void* dst, size_t maxDstSize,
835 const void* src, size_t srcSize,
836 const U32 mls)
837{
838 U32* hashTable = ctx->hashTable;
839 const U32 hBits = ctx->params.hashLog;
840 seqStore_t* seqStorePtr = &(ctx->seqStore);
841 const BYTE* const base = ctx->base;
842 const BYTE* const dictBase = ctx->dictBase;
843 const BYTE* const istart = (const BYTE*)src;
844 const BYTE* ip = istart;
845 const BYTE* anchor = istart;
846 const U32 lowLimit = ctx->lowLimit;
847 const U32 dictLimit = ctx->dictLimit;
Yann Collet743402c2015-11-20 12:03:53 +0100848 const BYTE* const lowPrefixPtr = base + dictLimit;
849 const BYTE* const dictEnd = dictBase + dictLimit;
Yann Collet89db5e02015-11-13 11:27:46 +0100850 const BYTE* const iend = istart + srcSize;
851 const BYTE* const ilimit = iend - 8;
852
Yann Collet138e89c2015-11-17 14:26:54 +0100853 U32 offset_2=REPCODE_STARTVALUE, offset_1=REPCODE_STARTVALUE;
Yann Collet89db5e02015-11-13 11:27:46 +0100854
855
856 /* init */
857 ZSTD_resetSeqStore(seqStorePtr);
858 {
859 /* skip first 4 positions to avoid read overflow during repcode match check */
860 hashTable[ZSTD_hashPtr(ip+0, hBits, mls)] = (U32)(ip-base+0);
861 hashTable[ZSTD_hashPtr(ip+1, hBits, mls)] = (U32)(ip-base+1);
862 hashTable[ZSTD_hashPtr(ip+2, hBits, mls)] = (U32)(ip-base+2);
863 hashTable[ZSTD_hashPtr(ip+3, hBits, mls)] = (U32)(ip-base+3);
864 ip += 4;
865 }
866
867 /* Main Search Loop */
Yann Collet743402c2015-11-20 12:03:53 +0100868 while (ip < ilimit) /* < instead of <=, because (ip+1) */
Yann Collet89db5e02015-11-13 11:27:46 +0100869 {
870 const size_t h = ZSTD_hashPtr(ip, hBits, mls);
Yann Collet743402c2015-11-20 12:03:53 +0100871 const U32 matchIndex = hashTable[h];
Yann Collet89db5e02015-11-13 11:27:46 +0100872 const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
Yann Collet89db5e02015-11-13 11:27:46 +0100873 const BYTE* match = matchBase + matchIndex;
Yann Collet743402c2015-11-20 12:03:53 +0100874 const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictBase + lowLimit : lowPrefixPtr;
Yann Collet89db5e02015-11-13 11:27:46 +0100875 const U32 current = (U32)(ip-base);
Yann Collet743402c2015-11-20 12:03:53 +0100876 const U32 repIndex = current + 1 - offset_1;
Yann Collet402fdcf2015-11-20 12:46:08 +0100877 const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
Yann Collet89db5e02015-11-13 11:27:46 +0100878 const BYTE* repMatch = repBase + repIndex;
Yann Collet743402c2015-11-20 12:03:53 +0100879 size_t matchLength;
880 U32 offset;
Yann Collet89db5e02015-11-13 11:27:46 +0100881 hashTable[h] = current; /* update hash table */
882
883 if ( ((repIndex <= dictLimit-4) || (repIndex >= dictLimit))
Yann Collet743402c2015-11-20 12:03:53 +0100884 && (MEM_read32(repMatch) == MEM_read32(ip+1)) )
Yann Collet89db5e02015-11-13 11:27:46 +0100885 {
Yann Collet402fdcf2015-11-20 12:46:08 +0100886 const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
887 const BYTE* iEndCount = (repMatchEnd - repMatch < iend - ip - 1) ? ip + 1 + (repMatchEnd - repMatch) : iend;
888 matchLength = ZSTD_count(ip+1+MINMATCH, repMatch+MINMATCH, iEndCount);
Yann Collet743402c2015-11-20 12:03:53 +0100889 if (match + matchLength + MINMATCH == dictEnd)
890 matchLength += ZSTD_count(ip + matchLength + MINMATCH, lowPrefixPtr, iend);
891 ip++;
Yann Collet402fdcf2015-11-20 12:46:08 +0100892 offset = 0;
893 }
894 else
895 {
896 if ( (matchIndex < lowLimit) ||
897 (MEM_read32(match) != MEM_read32(ip)) )
898 { ip += ((ip-anchor) >> g_searchStrength) + 1; continue; }
899 {
900 const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
901 const BYTE* iEndCount = (matchEnd - match < iend - ip) ? ip + (matchEnd - match) : iend;
902 matchLength = ZSTD_count(ip+MINMATCH, match+MINMATCH, iEndCount);
903 if (match + matchLength + MINMATCH == dictEnd)
904 matchLength += ZSTD_count(ip + matchLength + MINMATCH, lowPrefixPtr, iend);
905 while ((ip>anchor) && (match>lowMatchPtr) && (ip[-1] == match[-1])) { ip--; match--; matchLength++; } /* catch up */
906 offset = current - matchIndex;
907 offset_2 = offset_1;
908 offset_1 = offset;
909 }
910 }
Yann Collet89db5e02015-11-13 11:27:46 +0100911
Yann Collet402fdcf2015-11-20 12:46:08 +0100912 /* found a match */
913 ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset, matchLength);
914 ip += matchLength + MINMATCH;
915 anchor = ip;
916
917 if (ip <= ilimit)
918 {
919 /* Fill Table */
920 hashTable[ZSTD_hashPtr(ip-2-matchLength, hBits, mls)] = (U32)(ip-2-matchLength-base);
921 hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
922 /* check immediate repcode */
923 while (ip <= ilimit)
924 {
925 U32 current2 = (U32)(ip-base);
926 const U32 repIndex2 = current2 - offset_2;
927 const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
Yann Collet743402c2015-11-20 12:03:53 +0100928 if ( ((repIndex2 <= dictLimit-4) || (repIndex2 >= dictLimit))
929 && (MEM_read32(repMatch2) == MEM_read32(ip)) )
Yann Collet402fdcf2015-11-20 12:46:08 +0100930 {
931 size_t maxIlength = iend - ip;
932 size_t maxMlength = repIndex2 < dictLimit ? (size_t)(dictLimit - repIndex2) : (size_t)(iend - repMatch2);
933 size_t maxML = MIN(maxMlength, maxIlength);
Yann Collet743402c2015-11-20 12:03:53 +0100934 size_t ml = ZSTD_count(ip+MINMATCH, repMatch2+MINMATCH, ip + maxML);
Yann Collet402fdcf2015-11-20 12:46:08 +0100935 U32 tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
Yann Collet743402c2015-11-20 12:03:53 +0100936 if (ml+MINMATCH == maxMlength) /* reached end of extDict */
Yann Collet402fdcf2015-11-20 12:46:08 +0100937 ml += ZSTD_count(ip+MINMATCH+ml, lowPrefixPtr, iend);
938 hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
939 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, ml);
940 ip += ml+MINMATCH;
941 anchor = ip;
942 continue;
943 }
Yann Collet743402c2015-11-20 12:03:53 +0100944 break;
Yann Collet402fdcf2015-11-20 12:46:08 +0100945 }
946 }
Yann Collet89db5e02015-11-13 11:27:46 +0100947 }
948
949 /* Last Literals */
950 {
951 size_t lastLLSize = iend - anchor;
952 memcpy(seqStorePtr->lit, anchor, lastLLSize);
953 seqStorePtr->lit += lastLLSize;
954 }
955
956 /* Finale compression stage */
957 return ZSTD_compressSequences((BYTE*)dst, maxDstSize,
958 seqStorePtr, srcSize);
959}
960
961
962size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
963 void* dst, size_t maxDstSize,
964 const void* src, size_t srcSize)
965{
966 const U32 mls = ctx->params.searchLength;
967 switch(mls)
968 {
969 default:
970 case 4 :
971 return ZSTD_compressBlock_fast_extDict_generic(ctx, dst, maxDstSize, src, srcSize, 4);
972 case 5 :
973 return ZSTD_compressBlock_fast_extDict_generic(ctx, dst, maxDstSize, src, srcSize, 5);
974 case 6 :
975 return ZSTD_compressBlock_fast_extDict_generic(ctx, dst, maxDstSize, src, srcSize, 6);
976 case 7 :
977 return ZSTD_compressBlock_fast_extDict_generic(ctx, dst, maxDstSize, src, srcSize, 7);
978 }
979}
980
981
Yann Colletf3eca252015-10-22 15:31:46 +0100982/* *************************************
Yann Collet96b9f0b2015-11-04 03:52:54 +0100983* Binary Tree search
Yann Colletf3eca252015-10-22 15:31:46 +0100984***************************************/
Yann Collet5be2dd22015-11-11 13:43:58 +0100985/** ZSTD_insertBt1 : add one ptr to tree
Yann Collet96b9f0b2015-11-04 03:52:54 +0100986 @ip : assumed <= iend-8 */
Yann Collet5be2dd22015-11-11 13:43:58 +0100987static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares)
Yann Collet96b9f0b2015-11-04 03:52:54 +0100988{
989 U32* const hashTable = zc->hashTable;
990 const U32 hashLog = zc->params.hashLog;
Yann Collet5be2dd22015-11-11 13:43:58 +0100991 const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
Yann Collet1f44b3f2015-11-05 17:32:18 +0100992 U32* const bt = zc->contentTable;
993 const U32 btLog = zc->params.contentLog - 1;
Yann Collet96b9f0b2015-11-04 03:52:54 +0100994 const U32 btMask= (1 << btLog) - 1;
995 U32 matchIndex = hashTable[h];
996 size_t commonLengthSmaller=0, commonLengthLarger=0;
997 const BYTE* const base = zc->base;
Yann Colletf48e35c2015-11-07 01:13:31 +0100998 const BYTE* match = base + matchIndex;
999 U32 current = (U32)(ip-base);
Yann Collete9eba602015-11-08 15:08:03 +01001000 const U32 btLow = btMask >= current ? 0 : current - btMask;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001001 U32* smallerPtr = bt + 2*(current&btMask);
1002 U32* largerPtr = bt + 2*(current&btMask) + 1;
Yann Collet59d70632015-11-04 12:05:27 +01001003 U32 dummy32; /* to be nullified at the end */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001004 const U32 windowSize = 1 << zc->params.windowLog;
1005 const U32 windowLow = windowSize >= current ? 0 : current - windowSize;
1006
Yann Collete9eba602015-11-08 15:08:03 +01001007 if ((current-matchIndex == 1) /* RLE */
Yann Colletd1ade5a2015-11-08 15:49:20 +01001008 && MEM_read64(match) == MEM_read64(ip))
Yann Colletf48e35c2015-11-07 01:13:31 +01001009 {
Yann Collete9eba602015-11-08 15:08:03 +01001010 size_t rleLength = ZSTD_count(ip+sizeof(size_t), match+sizeof(size_t), iend) + sizeof(size_t);
1011 return (U32)(rleLength - mls);
Yann Colletf48e35c2015-11-07 01:13:31 +01001012 }
1013
1014 hashTable[h] = (U32)(ip - base); /* Update Hash Table */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001015
1016 while (nbCompares-- && (matchIndex > windowLow))
1017 {
1018 U32* nextPtr = bt + 2*(matchIndex & btMask);
Yann Collet96b9f0b2015-11-04 03:52:54 +01001019 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
1020
Yann Colletf48e35c2015-11-07 01:13:31 +01001021 match = base + matchIndex;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001022 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
1023
Yann Collet59d70632015-11-04 12:05:27 +01001024 if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
Yann Colletf48e35c2015-11-07 01:13:31 +01001025 break; /* just drop , to guarantee consistency (miss a bit of compression; if someone knows better, please tell) */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001026
1027 if (match[matchLength] < ip[matchLength])
Yann Collet59d70632015-11-04 12:05:27 +01001028 {
Yann Collet96b9f0b2015-11-04 03:52:54 +01001029 /* match is smaller than current */
1030 *smallerPtr = matchIndex; /* update smaller idx */
1031 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
Yann Colletf48e35c2015-11-07 01:13:31 +01001032 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001033 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
Yann Colletf48e35c2015-11-07 01:13:31 +01001034 matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
Yann Collet59d70632015-11-04 12:05:27 +01001035 }
Yann Collet96b9f0b2015-11-04 03:52:54 +01001036 else
Yann Collet59d70632015-11-04 12:05:27 +01001037 {
Yann Collet96b9f0b2015-11-04 03:52:54 +01001038 /* match is larger than current */
1039 *largerPtr = matchIndex;
1040 commonLengthLarger = matchLength;
Yann Colletf48e35c2015-11-07 01:13:31 +01001041 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001042 largerPtr = nextPtr;
Yann Colletf48e35c2015-11-07 01:13:31 +01001043 matchIndex = nextPtr[0];
Yann Collet59d70632015-11-04 12:05:27 +01001044 }
Yann Collet96b9f0b2015-11-04 03:52:54 +01001045 }
1046
Yann Collet59d70632015-11-04 12:05:27 +01001047 *smallerPtr = *largerPtr = 0;
Yann Collete9eba602015-11-08 15:08:03 +01001048 return 1;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001049}
1050
1051
1052FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
Yann Collet5be2dd22015-11-11 13:43:58 +01001053size_t ZSTD_insertBtAndFindBestMatch (
1054 ZSTD_CCtx* zc,
Yann Collet96b9f0b2015-11-04 03:52:54 +01001055 const BYTE* const ip, const BYTE* const iend,
1056 size_t* offsetPtr,
1057 U32 nbCompares, const U32 mls)
1058{
1059 U32* const hashTable = zc->hashTable;
1060 const U32 hashLog = zc->params.hashLog;
Yann Collet5be2dd22015-11-11 13:43:58 +01001061 const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
Yann Collet1f44b3f2015-11-05 17:32:18 +01001062 U32* const bt = zc->contentTable;
1063 const U32 btLog = zc->params.contentLog - 1;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001064 const U32 btMask= (1 << btLog) - 1;
1065 U32 matchIndex = hashTable[h];
1066 size_t commonLengthSmaller=0, commonLengthLarger=0;
1067 const BYTE* const base = zc->base;
1068 const U32 current = (U32)(ip-base);
1069 const U32 btLow = btMask >= current ? 0 : current - btMask;
1070 const U32 windowSize = 1 << zc->params.windowLog;
1071 const U32 windowLow = windowSize >= current ? 0 : current - windowSize;
1072 U32* smallerPtr = bt + 2*(current&btMask);
1073 U32* largerPtr = bt + 2*(current&btMask) + 1;
Yann Colletb241e9d2015-11-04 13:57:24 +01001074 size_t bestLength = 0;
Yann Collet59d70632015-11-04 12:05:27 +01001075 U32 dummy32; /* to be nullified at the end */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001076
Yann Collet59d70632015-11-04 12:05:27 +01001077 hashTable[h] = (U32)(ip-base); /* Update Hash Table */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001078
1079 while (nbCompares-- && (matchIndex > windowLow))
1080 {
1081 U32* nextPtr = bt + 2*(matchIndex & btMask);
1082 const BYTE* match = base + matchIndex;
1083 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
1084
1085 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
1086
1087 if (matchLength > bestLength)
1088 {
Yann Collete8455f52015-11-04 17:41:20 +01001089 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit(current-matchIndex+1) - ZSTD_highbit((U32)offsetPtr[0]+1)) )
Yann Colletb241e9d2015-11-04 13:57:24 +01001090 bestLength = matchLength, *offsetPtr = current - matchIndex;
Yann Collet59d70632015-11-04 12:05:27 +01001091 if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
Yann Colleta81d9ac2015-11-06 19:03:59 +01001092 break; /* just drop, to guarantee consistency (miss a little bit of compression) */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001093 }
1094
1095 if (match[matchLength] < ip[matchLength])
Yann Collet59d70632015-11-04 12:05:27 +01001096 {
Yann Collet96b9f0b2015-11-04 03:52:54 +01001097 /* match is smaller than current */
1098 *smallerPtr = matchIndex; /* update smaller idx */
1099 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
Yann Collet59d70632015-11-04 12:05:27 +01001100 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
Yann Colleta81d9ac2015-11-06 19:03:59 +01001101 if (matchIndex <= btLow) smallerPtr=&dummy32; /* beyond tree size, stop the search */
1102 matchIndex = (matchIndex <= btLow) ? windowLow : nextPtr[1];
Yann Collet59d70632015-11-04 12:05:27 +01001103 }
Yann Collet96b9f0b2015-11-04 03:52:54 +01001104 else
Yann Collet59d70632015-11-04 12:05:27 +01001105 {
Yann Collet96b9f0b2015-11-04 03:52:54 +01001106 /* match is larger than current */
1107 *largerPtr = matchIndex;
1108 commonLengthLarger = matchLength;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001109 largerPtr = nextPtr;
Yann Colleta81d9ac2015-11-06 19:03:59 +01001110 if (matchIndex <= btLow) largerPtr=&dummy32; /* beyond tree size, stop the search */
1111 matchIndex = (matchIndex <= btLow) ? windowLow : nextPtr[0];
Yann Collet59d70632015-11-04 12:05:27 +01001112 }
Yann Collet96b9f0b2015-11-04 03:52:54 +01001113 }
1114
Yann Collet59d70632015-11-04 12:05:27 +01001115 *smallerPtr = *largerPtr = 0;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001116
1117 zc->nextToUpdate = current+1; /* current has been inserted */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001118 return bestLength;
1119}
1120
1121
Yann Collet5be2dd22015-11-11 13:43:58 +01001122static const BYTE* ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
Yann Collet96b9f0b2015-11-04 03:52:54 +01001123{
1124 const BYTE* const base = zc->base;
1125 const U32 target = (U32)(ip - base);
1126 U32 idx = zc->nextToUpdate;
Yann Collet59d70632015-11-04 12:05:27 +01001127 //size_t dummy;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001128
Yann Colletf48e35c2015-11-07 01:13:31 +01001129 for( ; idx < target ; )
Yann Collet5be2dd22015-11-11 13:43:58 +01001130 idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares);
1131 //ZSTD_insertBtAndFindBestMatch(zc, base+idx, iend, &dummy, nbCompares, mls);
Yann Collet96b9f0b2015-11-04 03:52:54 +01001132
Yann Colletf48e35c2015-11-07 01:13:31 +01001133 zc->nextToUpdate = idx;
1134 return base + idx;
Yann Collet96b9f0b2015-11-04 03:52:54 +01001135}
1136
1137
1138/** Tree updater, providing best match */
1139FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
Yann Collet5be2dd22015-11-11 13:43:58 +01001140size_t ZSTD_BtFindBestMatch (
1141 ZSTD_CCtx* zc,
Yann Collet96b9f0b2015-11-04 03:52:54 +01001142 const BYTE* const ip, const BYTE* const iLimit,
1143 size_t* offsetPtr,
1144 const U32 maxNbAttempts, const U32 mls)
1145{
Yann Collet5be2dd22015-11-11 13:43:58 +01001146 const BYTE* nextToUpdate = ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
Yann Colletf48e35c2015-11-07 01:13:31 +01001147 if (nextToUpdate > ip)
1148 {
1149 /* RLE data */
1150 *offsetPtr = 1;
1151 return ZSTD_count(ip, ip-1, iLimit);
1152 }
Yann Collet5be2dd22015-11-11 13:43:58 +01001153 return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls);
Yann Collet96b9f0b2015-11-04 03:52:54 +01001154}
1155
1156
Yann Collet5be2dd22015-11-11 13:43:58 +01001157FORCE_INLINE size_t ZSTD_BtFindBestMatch_selectMLS (
1158 ZSTD_CCtx* zc, /* Index table will be updated */
Yann Collet96b9f0b2015-11-04 03:52:54 +01001159 const BYTE* ip, const BYTE* const iLimit,
1160 size_t* offsetPtr,
1161 const U32 maxNbAttempts, const U32 matchLengthSearch)
1162{
1163 switch(matchLengthSearch)
1164 {
1165 default :
Yann Collet5be2dd22015-11-11 13:43:58 +01001166 case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1167 case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1168 case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
Yann Collet96b9f0b2015-11-04 03:52:54 +01001169 }
1170}
1171
1172
Yann Collet5106a762015-11-05 15:00:24 +01001173/* ***********************
1174* Hash Chain
1175*************************/
1176
Yann Collet1f44b3f2015-11-05 17:32:18 +01001177#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
1178
Yann Collet5106a762015-11-05 15:00:24 +01001179/* Update chains up to ip (excluded) */
Yann Collet5be2dd22015-11-11 13:43:58 +01001180static U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
Yann Collet5106a762015-11-05 15:00:24 +01001181{
1182 U32* const hashTable = zc->hashTable;
1183 const U32 hashLog = zc->params.hashLog;
Yann Collet1f44b3f2015-11-05 17:32:18 +01001184 U32* const chainTable = zc->contentTable;
1185 const U32 chainMask = (1 << zc->params.contentLog) - 1;
Yann Collet5106a762015-11-05 15:00:24 +01001186 const BYTE* const base = zc->base;
1187 const U32 target = (U32)(ip - base);
1188 U32 idx = zc->nextToUpdate;
1189
1190 while(idx < target)
1191 {
Yann Collet5be2dd22015-11-11 13:43:58 +01001192 size_t h = ZSTD_hashPtr(base+idx, hashLog, mls);
Yann Collet5106a762015-11-05 15:00:24 +01001193 NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
1194 hashTable[h] = idx;
1195 idx++;
1196 }
1197
1198 zc->nextToUpdate = target;
Yann Collet5be2dd22015-11-11 13:43:58 +01001199 return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
Yann Collet5106a762015-11-05 15:00:24 +01001200}
1201
1202
1203FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
Yann Collet5be2dd22015-11-11 13:43:58 +01001204size_t ZSTD_HcFindBestMatch (
1205 ZSTD_CCtx* zc, /* Index table will be updated */
Yann Collet5106a762015-11-05 15:00:24 +01001206 const BYTE* const ip, const BYTE* const iLimit,
1207 size_t* offsetPtr,
1208 const U32 maxNbAttempts, const U32 matchLengthSearch)
1209{
Yann Collet1f44b3f2015-11-05 17:32:18 +01001210 U32* const chainTable = zc->contentTable;
1211 const U32 chainSize = (1 << zc->params.contentLog);
Yann Collet5106a762015-11-05 15:00:24 +01001212 const U32 chainMask = chainSize-1;
1213 const BYTE* const base = zc->base;
1214 const BYTE* const dictBase = zc->dictBase;
1215 const U32 dictLimit = zc->dictLimit;
Yann Collet89db5e02015-11-13 11:27:46 +01001216 const U32 lowLimit = zc->lowLimit;
Yann Collet5106a762015-11-05 15:00:24 +01001217 U32 matchIndex;
1218 const BYTE* match;
1219 int nbAttempts=maxNbAttempts;
1220 size_t ml=0;
1221
1222 /* HC4 match finder */
Yann Collet5be2dd22015-11-11 13:43:58 +01001223 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, matchLengthSearch);
Yann Collet5106a762015-11-05 15:00:24 +01001224
1225 while ((matchIndex>lowLimit) && (nbAttempts))
1226 {
1227 nbAttempts--;
1228 if (matchIndex >= dictLimit)
1229 {
1230 match = base + matchIndex;
Yann Collet4baee502015-11-09 03:19:33 +01001231 if (match[ml] == ip[ml]) /* potentially better */
Yann Collet5106a762015-11-05 15:00:24 +01001232 {
Yann Collet4baee502015-11-09 03:19:33 +01001233 const size_t mlt = ZSTD_count(ip, match, iLimit);
Yann Collet5106a762015-11-05 15:00:24 +01001234 if (mlt > ml)
1235 //if (((int)(4*mlt) - (int)ZSTD_highbit((U32)(ip-match)+1)) > ((int)(4*ml) - (int)ZSTD_highbit((U32)((*offsetPtr)+1))))
1236 {
1237 ml = mlt; *offsetPtr = ip-match;
Yann Collet4baee502015-11-09 03:19:33 +01001238 if (ip+mlt >= iLimit) break;
Yann Collet5106a762015-11-05 15:00:24 +01001239 }
1240 }
1241 }
1242 else
1243 {
1244 match = dictBase + matchIndex;
Yann Collet4baee502015-11-09 03:19:33 +01001245 if (MEM_read32(match) == MEM_read32(ip)) /* beware of end of dict */
Yann Collet5106a762015-11-05 15:00:24 +01001246 {
1247 size_t mlt;
1248 const BYTE* vLimit = ip + (dictLimit - matchIndex);
1249 if (vLimit > iLimit) vLimit = iLimit;
1250 mlt = ZSTD_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH;
1251 if ((ip+mlt == vLimit) && (vLimit < iLimit))
1252 mlt += ZSTD_count(ip+mlt, base+dictLimit, iLimit);
1253 if (mlt > ml) { ml = mlt; *offsetPtr = (ip-base) - matchIndex; }
1254 }
1255 }
1256
1257 if (base + matchIndex <= ip - chainSize) break;
1258 matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
1259 }
1260
1261 return ml;
1262}
1263
1264
Yann Collet5be2dd22015-11-11 13:43:58 +01001265FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
1266 ZSTD_CCtx* zc, /* Index table will be updated */
Yann Collet5106a762015-11-05 15:00:24 +01001267 const BYTE* ip, const BYTE* const iLimit,
1268 size_t* offsetPtr,
1269 const U32 maxNbAttempts, const U32 matchLengthSearch)
1270{
1271 switch(matchLengthSearch)
1272 {
1273 default :
Yann Collet5be2dd22015-11-11 13:43:58 +01001274 case 4 : return ZSTD_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1275 case 5 : return ZSTD_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1276 case 6 : return ZSTD_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
Yann Collet5106a762015-11-05 15:00:24 +01001277 }
1278}
1279
1280
Yann Collet90361052015-11-05 15:03:12 +01001281/* common lazy function, to be inlined */
Yann Collet5106a762015-11-05 15:00:24 +01001282FORCE_INLINE
Yann Collet5be2dd22015-11-11 13:43:58 +01001283size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
Yann Colletc95f8992015-11-19 17:28:35 +01001284 void* dst, size_t maxDstSize, const void* src, size_t srcSize,
1285 const U32 searchMethod, const U32 deep) /* 0 : hc; 1 : bt */
Yann Collet5106a762015-11-05 15:00:24 +01001286{
1287 seqStore_t* seqStorePtr = &(ctx->seqStore);
1288 const BYTE* const istart = (const BYTE*)src;
1289 const BYTE* ip = istart;
1290 const BYTE* anchor = istart;
1291 const BYTE* const iend = istart + srcSize;
1292 const BYTE* const ilimit = iend - 8;
1293
1294 size_t offset_2=REPCODE_STARTVALUE, offset_1=REPCODE_STARTVALUE;
1295 const U32 maxSearches = 1 << ctx->params.searchLog;
1296 const U32 mls = ctx->params.searchLength;
1297
Yann Collet5be2dd22015-11-11 13:43:58 +01001298 typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
Yann Collet5106a762015-11-05 15:00:24 +01001299 size_t* offsetPtr,
1300 U32 maxNbAttempts, U32 matchLengthSearch);
Yann Collet5be2dd22015-11-11 13:43:58 +01001301 searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
Yann Collet5106a762015-11-05 15:00:24 +01001302
1303 /* init */
1304 ZSTD_resetSeqStore(seqStorePtr);
1305 if (((ip-ctx->base) - ctx->dictLimit) < REPCODE_STARTVALUE) ip += REPCODE_STARTVALUE;
1306
1307 /* Match Loop */
Yann Collet7a231792015-11-21 15:27:35 +01001308 while (ip < ilimit)
Yann Collet5106a762015-11-05 15:00:24 +01001309 {
Yann Collet7a231792015-11-21 15:27:35 +01001310 size_t matchLength=0;
1311 size_t offset=0;
1312 const BYTE* start=ip+1;
Yann Collet5106a762015-11-05 15:00:24 +01001313
Yann Collet743402c2015-11-20 12:03:53 +01001314 /* check repCode */
Yann Collet7a231792015-11-21 15:27:35 +01001315 if (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))
Yann Collet5106a762015-11-05 15:00:24 +01001316 {
Yann Collet743402c2015-11-20 12:03:53 +01001317 /* repcode : we take it */
Yann Collet7a231792015-11-21 15:27:35 +01001318 matchLength = ZSTD_count(ip+1+MINMATCH, ip+1+MINMATCH-offset_1, iend) + MINMATCH;
1319 if (deep==0) goto _storeSequence;
Yann Collet5106a762015-11-05 15:00:24 +01001320 }
1321
Yann Colletfc2afcf2015-11-06 15:40:14 +01001322 {
Yann Collet7a231792015-11-21 15:27:35 +01001323 /* search first solution */
1324 size_t offsetFound = 99999999;
1325 size_t ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
1326 if (ml2 > matchLength)
1327 matchLength = ml2, start = ip, offset=offsetFound;
1328 if (matchLength < MINMATCH)
1329 {
1330 ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
1331 continue;
1332 }
1333 }
Yann Collet5106a762015-11-05 15:00:24 +01001334
1335 /* let's try to find a better solution */
Yann Collet7a231792015-11-21 15:27:35 +01001336 while ((deep>=1) && (ip<ilimit))
Yann Collet5106a762015-11-05 15:00:24 +01001337 {
1338 ip ++;
Yann Collet7a231792015-11-21 15:27:35 +01001339 if ((offset) && (MEM_read32(ip) == MEM_read32(ip - offset_1)))
Yann Collet5106a762015-11-05 15:00:24 +01001340 {
1341 size_t ml2 = ZSTD_count(ip+MINMATCH, ip+MINMATCH-offset_1, iend) + MINMATCH;
1342 int gain2 = (int)(ml2 * 3);
1343 int gain1 = (int)(matchLength*3 - ZSTD_highbit((U32)offset+1) + 1);
Yann Collet4baee502015-11-09 03:19:33 +01001344 if ((ml2 >= MINMATCH) && (gain2 > gain1))
Yann Collet5106a762015-11-05 15:00:24 +01001345 matchLength = ml2, offset = 0, start = ip;
1346 }
1347 {
1348 size_t offset2=999999;
1349 size_t ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
Yann Collet628065c2015-11-06 18:44:54 +01001350 int gain2 = (int)(ml2*(3+deep) - ZSTD_highbit((U32)offset2+1)); /* raw approx */
1351 int gain1 = (int)(matchLength*(3+deep) - ZSTD_highbit((U32)offset+1) + (3+deep));
Yann Collet4baee502015-11-09 03:19:33 +01001352 if ((ml2 >= MINMATCH) && (gain2 > gain1))
Yann Collet5106a762015-11-05 15:00:24 +01001353 {
Yann Collet628065c2015-11-06 18:44:54 +01001354 matchLength = ml2, offset = offset2, start = ip;
Yann Collet5106a762015-11-05 15:00:24 +01001355 continue; /* search a better one */
1356 }
1357 }
1358
1359 /* let's find an even better one */
Yann Collet7a231792015-11-21 15:27:35 +01001360 if ((deep==2) && (ip<ilimit))
Yann Collet5106a762015-11-05 15:00:24 +01001361 {
1362 ip ++;
Yann Collet7a231792015-11-21 15:27:35 +01001363 if ((offset) && (MEM_read32(ip) == MEM_read32(ip - offset_1)))
Yann Collet5106a762015-11-05 15:00:24 +01001364 {
1365 size_t ml2 = ZSTD_count(ip+MINMATCH, ip+MINMATCH-offset_1, iend) + MINMATCH;
1366 int gain2 = (int)(ml2 * 4);
1367 int gain1 = (int)(matchLength*4 - ZSTD_highbit((U32)offset+1) + 1);
Yann Collet4baee502015-11-09 03:19:33 +01001368 if ((ml2 >= MINMATCH) && (gain2 > gain1))
Yann Collet5106a762015-11-05 15:00:24 +01001369 matchLength = ml2, offset = 0, start = ip;
1370 }
1371 {
1372 size_t offset2=999999;
1373 size_t ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
Yann Collet628065c2015-11-06 18:44:54 +01001374 int gain2 = (int)(ml2*4 - ZSTD_highbit((U32)offset2+1)); /* raw approx */
1375 int gain1 = (int)(matchLength*4 - ZSTD_highbit((U32)offset+1) + 7);
Yann Collet4baee502015-11-09 03:19:33 +01001376 if ((ml2 >= MINMATCH) && (gain2 > gain1))
Yann Collet5106a762015-11-05 15:00:24 +01001377 {
Yann Collet628065c2015-11-06 18:44:54 +01001378 matchLength = ml2, offset = offset2, start = ip;
1379 continue;
Yann Collet5106a762015-11-05 15:00:24 +01001380 }
1381 }
1382 }
1383 break; /* nothing found : store previous solution */
1384 }
1385
Yann Collet4baee502015-11-09 03:19:33 +01001386 /* catch up */
Yann Collet628065c2015-11-06 18:44:54 +01001387 if (offset)
Yann Collet4baee502015-11-09 03:19:33 +01001388 {
1389 while ((start>anchor) && (start>ctx->base+offset) && (start[-1] == start[-1-offset]))
1390 { start--; matchLength++; }
Yann Collet743402c2015-11-20 12:03:53 +01001391 offset_2 = offset_1; offset_1 = offset;
Yann Collet4baee502015-11-09 03:19:33 +01001392 }
1393
1394 /* store sequence */
Yann Collet7a231792015-11-21 15:27:35 +01001395_storeSequence:
Yann Collet5106a762015-11-05 15:00:24 +01001396 {
1397 size_t litLength = start - anchor;
Yann Collet5106a762015-11-05 15:00:24 +01001398 ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, matchLength-MINMATCH);
Yann Collet4baee502015-11-09 03:19:33 +01001399 anchor = ip = start + matchLength;
Yann Collet5106a762015-11-05 15:00:24 +01001400 }
1401
Yann Collet743402c2015-11-20 12:03:53 +01001402 /* check immediate repcode */
1403 while ( (ip <= ilimit)
1404 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) )
1405 {
1406 /* store sequence */
1407 matchLength = ZSTD_count(ip+MINMATCH, ip+MINMATCH-offset_2, iend);
1408 offset = offset_2;
1409 offset_2 = offset_1;
1410 offset_1 = offset;
1411 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength);
1412 ip += matchLength+MINMATCH;
1413 anchor = ip;
1414 continue; /* faster when present ... (?) */
1415 }
Yann Collet5106a762015-11-05 15:00:24 +01001416 }
1417
1418 /* Last Literals */
1419 {
1420 size_t lastLLSize = iend - anchor;
1421 memcpy(seqStorePtr->lit, anchor, lastLLSize);
1422 seqStorePtr->lit += lastLLSize;
1423 }
1424
1425 /* Final compression stage */
1426 return ZSTD_compressSequences((BYTE*)dst, maxDstSize,
1427 seqStorePtr, srcSize);
1428}
1429
Yann Collet5be2dd22015-11-11 13:43:58 +01001430size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
Yann Collet5106a762015-11-05 15:00:24 +01001431{
Yann Collet7a231792015-11-21 15:27:35 +01001432 return ZSTD_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 1, 2);
Yann Collet5106a762015-11-05 15:00:24 +01001433}
1434
Yann Collet5be2dd22015-11-11 13:43:58 +01001435size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
Yann Collet5106a762015-11-05 15:00:24 +01001436{
Yann Collet7a231792015-11-21 15:27:35 +01001437 return ZSTD_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 0, 2);
Yann Collet5106a762015-11-05 15:00:24 +01001438}
1439
Yann Collet5be2dd22015-11-11 13:43:58 +01001440size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
Yann Collet5106a762015-11-05 15:00:24 +01001441{
Yann Collet7a231792015-11-21 15:27:35 +01001442 return ZSTD_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 0, 1);
Yann Collet5106a762015-11-05 15:00:24 +01001443}
1444
Yann Collet5be2dd22015-11-11 13:43:58 +01001445size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
Yann Colletf3eca252015-10-22 15:31:46 +01001446{
Yann Collet7a231792015-11-21 15:27:35 +01001447 return ZSTD_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 0, 0);
Yann Colletbe2010e2015-10-31 12:57:14 +01001448}
1449
1450
Yann Collet7a231792015-11-21 15:27:35 +01001451
Yann Collet5be2dd22015-11-11 13:43:58 +01001452typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
Yann Collet59d70632015-11-04 12:05:27 +01001453
Yann Collet89db5e02015-11-13 11:27:46 +01001454static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
Yann Collet59d70632015-11-04 12:05:27 +01001455{
Yann Collet89db5e02015-11-13 11:27:46 +01001456 if (extDict)
Yann Collet59d70632015-11-04 12:05:27 +01001457 {
Yann Collet89db5e02015-11-13 11:27:46 +01001458 switch(strat)
1459 {
1460 default :
1461 case ZSTD_fast:
1462 return ZSTD_compressBlock_fast_extDict;
1463 case ZSTD_greedy:
1464 return ZSTD_compressBlock_greedy;
1465 case ZSTD_lazy:
1466 return ZSTD_compressBlock_lazy;
1467 case ZSTD_lazy2:
1468 return ZSTD_compressBlock_lazy2;
1469 case ZSTD_btlazy2:
1470 return ZSTD_compressBlock_btlazy2;
1471 }
1472 }
1473 else
1474 {
1475 switch(strat)
1476 {
1477 default :
1478 case ZSTD_fast:
1479 return ZSTD_compressBlock_fast;
1480 case ZSTD_greedy:
1481 return ZSTD_compressBlock_greedy;
1482 case ZSTD_lazy:
1483 return ZSTD_compressBlock_lazy;
1484 case ZSTD_lazy2:
1485 return ZSTD_compressBlock_lazy2;
1486 case ZSTD_btlazy2:
1487 return ZSTD_compressBlock_btlazy2;
1488 }
Yann Collet59d70632015-11-04 12:05:27 +01001489 }
1490}
1491
1492
Yann Collet5be2dd22015-11-11 13:43:58 +01001493size_t ZSTD_compressBlock(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
Yann Colletbe2010e2015-10-31 12:57:14 +01001494{
Yann Collet89db5e02015-11-13 11:27:46 +01001495 ZSTD_blockCompressor blockCompressor = ZSTD_selectBlockCompressor(ctx->params.strategy, ctx->lowLimit < ctx->dictLimit);
Yann Collet7dfd56b2015-11-19 17:46:29 +01001496 if (srcSize < MIN_CBLOCK_SIZE+3) return 0; /* don't even attempt compression below a certain srcSize */
Yann Collet59d70632015-11-04 12:05:27 +01001497 return blockCompressor(ctx, dst, maxDstSize, src, srcSize);
Yann Colletbe2010e2015-10-31 12:57:14 +01001498}
1499
1500
Yann Collet5be2dd22015-11-11 13:43:58 +01001501static size_t ZSTD_compress_generic (ZSTD_CCtx* ctxPtr,
Yann Colletf3eca252015-10-22 15:31:46 +01001502 void* dst, size_t maxDstSize,
1503 const void* src, size_t srcSize)
1504{
Yann Collet3e358272015-11-04 18:19:39 +01001505 size_t blockSize = BLOCKSIZE;
Yann Colletf3eca252015-10-22 15:31:46 +01001506 size_t remaining = srcSize;
1507 const BYTE* ip = (const BYTE*)src;
1508 BYTE* const ostart = (BYTE*)dst;
1509 BYTE* op = ostart;
Yann Collet89db5e02015-11-13 11:27:46 +01001510 const U32 maxDist = 1 << ctxPtr->params.windowLog;
1511 //const ZSTD_blockCompressor blockCompressor = ZSTD_selectBlockCompressor(ctxPtr->params.strategy, ctxPtr->lowLimit < ctxPtr->dictLimit);
Yann Collet9b11b462015-11-01 12:40:22 +01001512
Yann Collet3e358272015-11-04 18:19:39 +01001513 while (remaining)
Yann Colletf3eca252015-10-22 15:31:46 +01001514 {
Yann Collet3e358272015-11-04 18:19:39 +01001515 size_t cSize;
1516
Yann Collet3137d1a2015-11-04 23:36:36 +01001517 if (maxDstSize < 3 + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
Yann Collet3e358272015-11-04 18:19:39 +01001518 if (remaining < blockSize) blockSize = remaining;
Yann Collet89db5e02015-11-13 11:27:46 +01001519
1520 if ((U32)(ip+blockSize - (ctxPtr->base + ctxPtr->lowLimit)) > maxDist)
1521 /* respect windowLog contract */
1522 ctxPtr->lowLimit = (U32)(ip+blockSize - ctxPtr->base) - maxDist;
1523
1524 //cSize = blockCompressor(ctxPtr, op+3, maxDstSize-3, ip, blockSize);
1525 cSize = ZSTD_compressBlock(ctxPtr, op+3, maxDstSize-3, ip, blockSize);
Yann Collet3e358272015-11-04 18:19:39 +01001526 if (ZSTD_isError(cSize)) return cSize;
Yann Colletf3eca252015-10-22 15:31:46 +01001527
1528 if (cSize == 0)
1529 {
1530 cSize = ZSTD_noCompressBlock(op, maxDstSize, ip, blockSize); /* block is not compressible */
1531 }
1532 else
1533 {
1534 op[0] = (BYTE)(cSize>>16);
1535 op[1] = (BYTE)(cSize>>8);
1536 op[2] = (BYTE)cSize;
Yann Colletc95f8992015-11-19 17:28:35 +01001537 op[0] += (BYTE)(bt_compressed << 6); /* is a compressed block */
Yann Colletf3eca252015-10-22 15:31:46 +01001538 cSize += 3;
1539 }
1540
1541 remaining -= blockSize;
Yann Collet3e358272015-11-04 18:19:39 +01001542 maxDstSize -= cSize;
Yann Colletf3eca252015-10-22 15:31:46 +01001543 ip += blockSize;
1544 op += cSize;
Yann Colletf3eca252015-10-22 15:31:46 +01001545 }
1546
1547 return op-ostart;
1548}
1549
1550
Yann Collet5be2dd22015-11-11 13:43:58 +01001551size_t ZSTD_compressContinue (ZSTD_CCtx* ctxPtr,
Yann Collet2acb5d32015-10-29 16:49:43 +01001552 void* dst, size_t dstSize,
1553 const void* src, size_t srcSize)
Yann Colletf3eca252015-10-22 15:31:46 +01001554{
Yann Collet2acb5d32015-10-29 16:49:43 +01001555 const BYTE* const ip = (const BYTE*) src;
Yann Colletf3eca252015-10-22 15:31:46 +01001556
Yann Collet89db5e02015-11-13 11:27:46 +01001557 /* preemptive overflow correction */
1558 if (ctxPtr->lowLimit > (1<<30) )
Yann Colletf3eca252015-10-22 15:31:46 +01001559 {
Yann Collet89db5e02015-11-13 11:27:46 +01001560 U32 correction = ctxPtr->lowLimit;
1561 ZSTD_reduceIndex(ctxPtr, correction);
1562 ctxPtr->base += correction;
1563 ctxPtr->dictBase += correction;
1564 ctxPtr->lowLimit -= correction;
1565 ctxPtr->dictLimit -= correction;
1566 if (ctxPtr->nextToUpdate < correction) ctxPtr->nextToUpdate = 0;
1567 else ctxPtr->nextToUpdate -= correction;
Yann Colletf3eca252015-10-22 15:31:46 +01001568 }
1569
Yann Collet89db5e02015-11-13 11:27:46 +01001570 /* Check if blocks follow each other */
1571 if (src != ctxPtr->nextSrc)
1572 {
1573 /* not contiguous */
1574 ctxPtr->lowLimit = ctxPtr->dictLimit;
1575 ctxPtr->dictLimit = (U32)(ctxPtr->nextSrc - ctxPtr->base);
1576 ctxPtr->dictBase = ctxPtr->base;
1577 ctxPtr->base += ip - ctxPtr->nextSrc;
1578 }
1579
1580 /* input-dictionary overlap */
1581 if ((ip+srcSize > ctxPtr->dictBase + ctxPtr->lowLimit) && (ip < ctxPtr->dictBase + ctxPtr->dictLimit))
1582 {
1583 ctxPtr->lowLimit = (U32)(ip + srcSize - ctxPtr->dictBase);
1584 if (ctxPtr->lowLimit > ctxPtr->dictLimit) ctxPtr->lowLimit = ctxPtr->dictLimit;
1585 }
1586
1587 ctxPtr->nextSrc = ip + srcSize;
1588
Yann Collet5be2dd22015-11-11 13:43:58 +01001589 return ZSTD_compress_generic (ctxPtr, dst, dstSize, src, srcSize);
Yann Colletf3eca252015-10-22 15:31:46 +01001590}
1591
1592
Yann Collet5be2dd22015-11-11 13:43:58 +01001593size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* ctx,
Yann Collet89db5e02015-11-13 11:27:46 +01001594 void* dst, size_t maxDstSize,
1595 const ZSTD_parameters params,
1596 const U64 srcSizeHint)
Yann Colletf3eca252015-10-22 15:31:46 +01001597{
Yann Collet4114f952015-10-30 06:40:22 +01001598 size_t errorCode;
Yann Colletf3eca252015-10-22 15:31:46 +01001599 if (maxDstSize < 4) return ERROR(dstSize_tooSmall);
Yann Collet5be2dd22015-11-11 13:43:58 +01001600 errorCode = ZSTD_resetCCtx_advanced(ctx, params, srcSizeHint);
Yann Collet4114f952015-10-30 06:40:22 +01001601 if (ZSTD_isError(errorCode)) return errorCode;
Yann Collet89db5e02015-11-13 11:27:46 +01001602
Yann Collet2acb5d32015-10-29 16:49:43 +01001603 MEM_writeLE32(dst, ZSTD_magicNumber); /* Write Header */
Yann Colletf3eca252015-10-22 15:31:46 +01001604 return 4;
1605}
1606
Yann Collet083fcc82015-10-25 14:06:35 +01001607
Yann Collet5be2dd22015-11-11 13:43:58 +01001608size_t ZSTD_compressBegin(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, int compressionLevel, U64 srcSizeHint)
Yann Collet083fcc82015-10-25 14:06:35 +01001609{
Yann Collet89db5e02015-11-13 11:27:46 +01001610 int tableID = ((srcSizeHint-1) > 128 KB); /* intentional underflow for srcSizeHint == 0 */
Yann Collet2acb5d32015-10-29 16:49:43 +01001611 if (compressionLevel<=0) compressionLevel = 1;
Yann Collet5be2dd22015-11-11 13:43:58 +01001612 if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
1613 return ZSTD_compressBegin_advanced(ctx, dst, maxDstSize, ZSTD_defaultParameters[tableID][compressionLevel], srcSizeHint);
Yann Collet083fcc82015-10-25 14:06:35 +01001614}
1615
1616
Yann Collet5be2dd22015-11-11 13:43:58 +01001617size_t ZSTD_compressEnd(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize)
Yann Collet2acb5d32015-10-29 16:49:43 +01001618{
1619 BYTE* op = (BYTE*)dst;
1620
1621 /* Sanity check */
1622 (void)ctx;
1623 if (maxDstSize < 3) return ERROR(dstSize_tooSmall);
1624
1625 /* End of frame */
1626 op[0] = (BYTE)(bt_end << 6);
1627 op[1] = 0;
1628 op[2] = 0;
1629
1630 return 3;
1631}
1632
Yann Collet5be2dd22015-11-11 13:43:58 +01001633size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
Yann Collet083fcc82015-10-25 14:06:35 +01001634 void* dst, size_t maxDstSize,
1635 const void* src, size_t srcSize,
Yann Collet5be2dd22015-11-11 13:43:58 +01001636 ZSTD_parameters params)
Yann Colletf3eca252015-10-22 15:31:46 +01001637{
1638 BYTE* const ostart = (BYTE*)dst;
1639 BYTE* op = ostart;
Yann Collet4114f952015-10-30 06:40:22 +01001640 size_t oSize;
Yann Colletf3eca252015-10-22 15:31:46 +01001641
1642 /* Header */
Yann Collet5be2dd22015-11-11 13:43:58 +01001643 oSize = ZSTD_compressBegin_advanced(ctx, dst, maxDstSize, params, srcSize);
Yann Colletf3eca252015-10-22 15:31:46 +01001644 if(ZSTD_isError(oSize)) return oSize;
1645 op += oSize;
1646 maxDstSize -= oSize;
1647
1648 /* body (compression) */
Yann Collet3d9cf7a2015-10-29 17:15:14 +01001649 ctx->base = (const BYTE*)src;
Yann Collet5be2dd22015-11-11 13:43:58 +01001650 oSize = ZSTD_compress_generic (ctx, op, maxDstSize, src, srcSize);
Yann Colletf3eca252015-10-22 15:31:46 +01001651 if(ZSTD_isError(oSize)) return oSize;
1652 op += oSize;
1653 maxDstSize -= oSize;
1654
1655 /* Close frame */
Yann Collet5be2dd22015-11-11 13:43:58 +01001656 oSize = ZSTD_compressEnd(ctx, op, maxDstSize);
Yann Colletf3eca252015-10-22 15:31:46 +01001657 if(ZSTD_isError(oSize)) return oSize;
1658 op += oSize;
1659
1660 return (op - ostart);
1661}
1662
Yann Collet5be2dd22015-11-11 13:43:58 +01001663size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel)
Yann Collet083fcc82015-10-25 14:06:35 +01001664{
Yann Collet43e0cd52015-11-09 16:38:17 +01001665 const int tableID = (srcSize > 128 KB);
Yann Collet5be2dd22015-11-11 13:43:58 +01001666 if (compressionLevel < 1) compressionLevel = 1;
1667 if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
1668 return ZSTD_compress_advanced(ctx, dst, maxDstSize, src, srcSize, ZSTD_defaultParameters[tableID][compressionLevel]);
Yann Collet083fcc82015-10-25 14:06:35 +01001669}
1670
Yann Collet5be2dd22015-11-11 13:43:58 +01001671size_t ZSTD_compress(void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel)
Yann Colletf3eca252015-10-22 15:31:46 +01001672{
Yann Collet44fe9912015-10-29 22:02:40 +01001673 size_t result;
Yann Collet5be2dd22015-11-11 13:43:58 +01001674 ZSTD_CCtx ctxBody;
Yann Collet712def92015-10-29 18:41:45 +01001675 memset(&ctxBody, 0, sizeof(ctxBody));
Yann Collet5be2dd22015-11-11 13:43:58 +01001676 result = ZSTD_compressCCtx(&ctxBody, dst, maxDstSize, src, srcSize, compressionLevel);
Yann Collet89db5e02015-11-13 11:27:46 +01001677 free(ctxBody.workSpace); /* can't free ctxBody, since it's on stack; take care of heap content */
Yann Collet44fe9912015-10-29 22:02:40 +01001678 return result;
Yann Colletf3eca252015-10-22 15:31:46 +01001679}