added ability to split input files for dictionary training
using command -B#
This is the same behavior as benchmark module,
which can also split input into arbitrary size blocks, using -B#.
diff --git a/programs/dibio.c b/programs/dibio.c
index 79f2729..d7b7601 100644
--- a/programs/dibio.c
+++ b/programs/dibio.c
@@ -53,13 +53,12 @@
* Console display
***************************************/
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
-#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
-static int g_displayLevel = 0; /* 0 : no display; 1: errors; 2: default; 4: full information */
+#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); }
-#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
- if ((DIB_clockSpan(g_time) > refreshRate) || (g_displayLevel>=4)) \
+#define DISPLAYUPDATE(l, ...) if (displayLevel>=l) { \
+ if ((DIB_clockSpan(g_time) > refreshRate) || (displayLevel>=4)) \
{ g_time = clock(); DISPLAY(__VA_ARGS__); \
- if (g_displayLevel>=4) fflush(stderr); } }
+ if (displayLevel>=4) fflush(stderr); } }
static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10;
static clock_t g_time = 0;
@@ -76,9 +75,9 @@
#define EXM_THROW(error, ...) \
{ \
DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
- DISPLAYLEVEL(1, "Error %i : ", error); \
- DISPLAYLEVEL(1, __VA_ARGS__); \
- DISPLAYLEVEL(1, "\n"); \
+ DISPLAY("Error %i : ", error); \
+ DISPLAY(__VA_ARGS__); \
+ DISPLAY("\n"); \
exit(error); \
}
@@ -102,30 +101,42 @@
* @return : nb of files effectively loaded into `buffer`
* *bufferSizePtr is modified, it provides the amount data loaded within buffer */
static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr,
- size_t* fileSizes,
- const char** fileNamesTable, unsigned nbFiles)
+ size_t* chunkSizes,
+ const char** fileNamesTable, unsigned nbFiles, size_t targetChunkSize,
+ unsigned displayLevel)
{
char* const buff = (char*)buffer;
size_t pos = 0;
- unsigned n;
+ unsigned nbLoadedChunks = 0, fileIndex;
- for (n=0; n<nbFiles; n++) {
- const char* const fileName = fileNamesTable[n];
+ for (fileIndex=0; fileIndex<nbFiles; fileIndex++) {
+ const char* const fileName = fileNamesTable[fileIndex];
unsigned long long const fs64 = UTIL_getFileSize(fileName);
- size_t const fileSize = (size_t) MIN(fs64, SAMPLESIZE_MAX);
- if (fileSize > *bufferSizePtr-pos) break;
- { FILE* const f = fopen(fileName, "rb");
- if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno));
- DISPLAYUPDATE(2, "Loading %s... \r", fileName);
- { size_t const readSize = fread(buff+pos, 1, fileSize, f);
- if (readSize != fileSize) EXM_THROW(11, "Pb reading %s", fileName);
- pos += readSize; }
- fileSizes[n] = fileSize;
- fclose(f);
- } }
+ unsigned long long remainingToLoad = fs64;
+ U32 const nbChunks = targetChunkSize ? (U32)((fs64 + (targetChunkSize-1)) / targetChunkSize) : 1;
+ U64 const chunkSize = targetChunkSize ? MIN(targetChunkSize, fs64) : fs64;
+ size_t const maxChunkSize = MIN(chunkSize, SAMPLESIZE_MAX);
+ U32 cnb;
+ FILE* const f = fopen(fileName, "rb");
+ if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno));
+ DISPLAYUPDATE(2, "Loading %s... \r", fileName);
+ for (cnb=0; cnb<nbChunks; cnb++) {
+ size_t const toLoad = MIN(maxChunkSize, remainingToLoad);
+ if (toLoad > *bufferSizePtr-pos) break;
+ { size_t const readSize = fread(buff+pos, 1, toLoad, f);
+ if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName);
+ pos += readSize;
+ chunkSizes[nbLoadedChunks++] = toLoad;
+ remainingToLoad -= targetChunkSize;
+ if (toLoad < targetChunkSize) {
+ fseek(f, (targetChunkSize - toLoad), SEEK_CUR);
+ } } }
+ fclose(f);
+ }
DISPLAYLEVEL(2, "\r%79s\r", "");
*bufferSizePtr = pos;
- return n;
+ DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10))
+ return nbLoadedChunks;
}
#define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r)))
@@ -207,18 +218,28 @@
}
-static int g_tooLargeSamples = 0;
-static U64 DiB_totalCappedFileSize(const char** fileNamesTable, unsigned nbFiles)
+typedef struct {
+ U64 totalSizeToLoad;
+ unsigned oneSampleTooLarge;
+ unsigned nbChunks;
+} fileStats;
+
+static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, unsigned displayLevel)
{
- U64 total = 0;
+ fileStats fs;
unsigned n;
+ memset(&fs, 0, sizeof(fs));
for (n=0; n<nbFiles; n++) {
U64 const fileSize = UTIL_getFileSize(fileNamesTable[n]);
- U64 const cappedFileSize = MIN(fileSize, SAMPLESIZE_MAX);
- total += cappedFileSize;
- g_tooLargeSamples |= (fileSize > 2*SAMPLESIZE_MAX);
+ U32 const nbChunks = (U32)(chunkSize ? (fileSize + (chunkSize-1)) / chunkSize : 1);
+ U64 const chunkToLoad = chunkSize ? MIN(chunkSize, fileSize) : fileSize;
+ size_t const cappedChunkSize = MIN(chunkToLoad, SAMPLESIZE_MAX);
+ fs.totalSizeToLoad += cappedChunkSize * nbChunks;
+ fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX);
+ fs.nbChunks += nbChunks;
}
- return total;
+ DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10));
+ return fs;
}
@@ -235,63 +256,65 @@
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
- const char** fileNamesTable, unsigned nbFiles,
+ const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover)
{
+ unsigned displayLevel = params ? params->zParams.notificationLevel :
+ coverParams ? coverParams->zParams.notificationLevel :
+ 0; /* should never happen */
void* const dictBuffer = malloc(maxDictSize);
- size_t* const fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t));
- unsigned long long const totalSizeToLoad = DiB_totalCappedFileSize(fileNamesTable, nbFiles);
+ fileStats const fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel);
+ size_t* const chunkSizes = (size_t*)malloc(fs.nbChunks * sizeof(size_t));
size_t const memMult = params ? MEMMULT : COVER_MEMMULT;
- size_t const maxMem = DiB_findMaxMem(totalSizeToLoad * memMult) / memMult;
- size_t benchedSize = (size_t) MIN ((unsigned long long)maxMem, totalSizeToLoad);
- void* const srcBuffer = malloc(benchedSize+NOISELENGTH);
+ size_t const maxMem = DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult;
+ size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad);
+ void* const srcBuffer = malloc(loadedSize+NOISELENGTH);
int result = 0;
/* Checks */
- if (params) g_displayLevel = params->zParams.notificationLevel;
- else if (coverParams) g_displayLevel = coverParams->zParams.notificationLevel;
- else EXM_THROW(13, "Neither dictionary algorithm selected"); /* should not happen */
- if ((!fileSizes) || (!srcBuffer) || (!dictBuffer))
+ if ((!chunkSizes) || (!srcBuffer) || (!dictBuffer))
EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */
- if (g_tooLargeSamples) {
- DISPLAYLEVEL(2, "! Warning : some samples are very large \n");
- DISPLAYLEVEL(2, "! Note that dictionary is only useful for small files or beginning of large files. \n");
- DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each file are loaded \n", SAMPLESIZE_MAX);
+ if (fs.oneSampleTooLarge) {
+ DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n");
+ DISPLAYLEVEL(2, "! Note that dictionary is only useful for small samples. \n");
+ DISPLAYLEVEL(2, "! As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX);
}
- if ((nbFiles < 5) || (totalSizeToLoad < 9 * (unsigned long long)maxDictSize)) {
+ if (fs.nbChunks < 5) {
DISPLAYLEVEL(2, "! Warning : nb of samples too low for proper processing ! \n");
DISPLAYLEVEL(2, "! Please provide _one file per sample_. \n");
- DISPLAYLEVEL(2, "! Do not concatenate samples together into a single file, \n");
- DISPLAYLEVEL(2, "! as dictBuilder will be unable to find the beginning of each sample, \n");
- DISPLAYLEVEL(2, "! resulting in poor dictionary quality. \n");
+ EXM_THROW(14, "nb of samples too low"); /* we now clearly forbid this case */
+ }
+ if (fs.totalSizeToLoad < (unsigned long long)(8 * maxDictSize)) {
+ DISPLAYLEVEL(2, "! Warning : data size of samples too small for target dictionary size \n");
+ DISPLAYLEVEL(2, "! Samples should be about 100x larger than target dictionary size \n");
}
/* init */
- if (benchedSize < totalSizeToLoad)
- DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(benchedSize >> 20));
+ if (loadedSize < fs.totalSizeToLoad)
+ DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20));
/* Load input buffer */
DISPLAYLEVEL(3, "Shuffling input files\n");
DiB_shuffle(fileNamesTable, nbFiles);
- nbFiles = DiB_loadFiles(srcBuffer, &benchedSize, fileSizes, fileNamesTable, nbFiles);
+ nbFiles = DiB_loadFiles(srcBuffer, &loadedSize, chunkSizes, fileNamesTable, nbFiles, chunkSize, displayLevel);
{ size_t dictSize;
if (params) {
- DiB_fillNoise((char*)srcBuffer + benchedSize, NOISELENGTH); /* guard band, for end of buffer condition */
+ DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */
dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize,
- srcBuffer, fileSizes, nbFiles,
+ srcBuffer, chunkSizes, fs.nbChunks,
*params);
} else if (optimizeCover) {
dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize,
- srcBuffer, fileSizes, nbFiles,
+ srcBuffer, chunkSizes, fs.nbChunks,
coverParams);
if (!ZDICT_isError(dictSize)) {
DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps);
}
} else {
dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer,
- fileSizes, nbFiles, *coverParams);
+ chunkSizes, fs.nbChunks, *coverParams);
}
if (ZDICT_isError(dictSize)) {
DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */
@@ -306,7 +329,7 @@
/* clean up */
_cleanup:
free(srcBuffer);
+ free(chunkSizes);
free(dictBuffer);
- free(fileSizes);
return result;
}
diff --git a/programs/dibio.h b/programs/dibio.h
index ac24244..499e303 100644
--- a/programs/dibio.h
+++ b/programs/dibio.h
@@ -32,7 +32,7 @@
@return : 0 == ok. Any other : error.
*/
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
- const char** fileNamesTable, unsigned nbFiles,
+ const char** fileNamesTable, unsigned nbFiles, size_t chunkSize,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover);
diff --git a/programs/zstd.1.md b/programs/zstd.1.md
index 2fcedde..e446422 100644
--- a/programs/zstd.1.md
+++ b/programs/zstd.1.md
@@ -184,6 +184,8 @@
Dictionary saved into `file` (default name: dictionary).
* `--maxdict=#`:
Limit dictionary to specified size (default: 112640).
+* `-B#`:
+ Split input files in blocks of size # (default: no split)
* `--dictID=#`:
A dictionary ID is a locally unique ID that a decoder can use to verify it is
using the right dictionary.
@@ -373,7 +375,7 @@
default value will likely result in a decrease in compression ratio.
The default value is `wlog - ldmhlog`.
-
+
### -B#:
Select the size of each compression job.
This parameter is available only when multi-threading is enabled.
diff --git a/programs/zstdcli.c b/programs/zstdcli.c
index 607287c..78adcb6 100644
--- a/programs/zstdcli.c
+++ b/programs/zstdcli.c
@@ -759,13 +759,13 @@
int const optimize = !coverParams.k || !coverParams.d;
coverParams.nbThreads = nbThreads;
coverParams.zParams = zParams;
- operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, NULL, &coverParams, optimize);
+ operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, NULL, &coverParams, optimize);
} else {
ZDICT_legacy_params_t dictParams;
memset(&dictParams, 0, sizeof(dictParams));
dictParams.selectivityLevel = dictSelect;
dictParams.zParams = zParams;
- operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, &dictParams, NULL, 0);
+ operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, &dictParams, NULL, 0);
}
#endif
goto _end;