Merge pull request #1236 from GeorgeLu97/paramgrillconstraints
ParamgrillConstraints
diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c
index d5193d5..74f9dc2 100644
--- a/lib/compress/zstdmt_compress.c
+++ b/lib/compress/zstdmt_compress.c
@@ -1080,9 +1080,9 @@
{
ZSTD_frameProgression fps;
DEBUGLOG(6, "ZSTDMT_getFrameProgression");
+ fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
fps.consumed = mtctx->consumed;
fps.produced = mtctx->produced;
- fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
{ unsigned jobNb;
unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
@@ -1092,8 +1092,8 @@
ZSTD_pthread_mutex_lock(&mtctx->jobs[wJobID].job_mutex);
{ size_t const cResult = mtctx->jobs[wJobID].cSize;
size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
- fps.consumed += mtctx->jobs[wJobID].consumed;
fps.ingested += mtctx->jobs[wJobID].src.size;
+ fps.consumed += mtctx->jobs[wJobID].consumed;
fps.produced += produced;
}
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
@@ -1545,6 +1545,8 @@
/*! ZSTDMT_flushProduced() :
+ * flush whatever data has been produced but not yet flushed in current job.
+ * move to next job if current one is fully flushed.
* `output` : `pos` will be updated with amount of data flushed .
* `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
* @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
@@ -1593,6 +1595,7 @@
mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
mtctx->jobs[wJobID].frameChecksumNeeded = 0;
}
+
if (cSize > 0) { /* compression is ongoing or completed */
size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
@@ -1606,7 +1609,7 @@
output->pos += toFlush;
mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
- if ( (srcConsumed == srcSize) /* job completed */
+ if ( (srcConsumed == srcSize) /* job is completed */
&& (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
diff --git a/programs/README.md b/programs/README.md
index 2833875..22a0040 100644
--- a/programs/README.md
+++ b/programs/README.md
@@ -185,7 +185,7 @@
Compression Speed vs Ratio | Decompression Speed
---------------------------|---------------------
-![Compression Speed vs Ratio](../doc/images/ldmCspeed.png "Compression Speed vs Ratio") | ![Decompression Speed](../doc/images/ldmDspeed.png "Decompression Speed")
+![Compression Speed vs Ratio](https://raw.githubusercontent.com/facebook/zstd/v1.3.3/doc/images/ldmCspeed.png "Compression Speed vs Ratio") | ![Decompression Speed](https://raw.githubusercontent.com/facebook/zstd/v1.3.3/doc/images/ldmDspeed.png "Decompression Speed")
| Method | Compression ratio | Compression speed | Decompression speed |
|:-------|------------------:|-------------------------:|---------------------------:|
@@ -208,10 +208,24 @@
[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia
| Method | Compression ratio | Compression speed | Decompression speed |
-|:-------|------------------:|-------------------------:|---------------------------:|
-| `zstd -1` | `2.878` | `231.7 MB/s` | `594.4 MB/s` |
-| `zstd -1 --long` | `2.929` | `106.5 MB/s` | `517.9 MB/s` |
-| `zstd -5` | `3.274` | `77.1 MB/s` | `464.2 MB/s` |
-| `zstd -5 --long` | `3.319` | `51.7 MB/s` | `371.9 MB/s` |
-| `zstd -10` | `3.523` | `16.4 MB/s` | `489.2 MB/s` |
-| `zstd -10 --long`| `3.566` | `16.2 MB/s` | `415.7 MB/s` |
+|:-------|------------------:|------------------:|---------------------:|
+| `zstd -1` | `2.878` | `231.7 MB/s` | `594.4 MB/s` |
+| `zstd -1 --long` | `2.929` | `106.5 MB/s` | `517.9 MB/s` |
+| `zstd -5` | `3.274` | `77.1 MB/s` | `464.2 MB/s` |
+| `zstd -5 --long` | `3.319` | `51.7 MB/s` | `371.9 MB/s` |
+| `zstd -10` | `3.523` | `16.4 MB/s` | `489.2 MB/s` |
+| `zstd -10 --long`| `3.566` | `16.2 MB/s` | `415.7 MB/s` |
+
+
+#### zstdgrep
+
+`zstdgrep` is a utility which makes it possible to `grep` directly a `.zst` compressed file.
+It's used the same way as normal `grep`, for example :
+`zstdgrep pattern file.zst`
+
+`zstdgrep` is _not_ compatible with dictionary compression.
+
+To search into a file compressed with a dictionary,
+it's necessary to decompress it using `zstd` or `zstdcat`,
+and then pipe the result to `grep`. For example :
+`zstdcat -D dictionary -qc -- file.zst | grep pattern`
diff --git a/programs/fileio.c b/programs/fileio.c
index 85367fd..39b2c74 100644
--- a/programs/fileio.c
+++ b/programs/fileio.c
@@ -727,11 +727,6 @@
#endif
-/*! FIO_compressFilename_internal() :
- * same as FIO_compressFilename_extRess(), with `ress.desFile` already opened.
- * @return : 0 : compression completed correctly,
- * 1 : missing or pb opening srcFileName
- */
static unsigned long long
FIO_compressZstdFrame(const cRess_t* ressPtr,
const char* srcFileName, U64 fileSize,
@@ -763,7 +758,8 @@
directive = ZSTD_e_end;
result = 1;
- while (inBuff.pos != inBuff.size || (directive == ZSTD_e_end && result != 0)) {
+ while ((inBuff.pos != inBuff.size) /* input buffer must be entirely ingested */
+ || (directive == ZSTD_e_end && result != 0) ) {
ZSTD_outBuffer outBuff = { ress.dstBuffer, ress.dstBufferSize, 0 };
CHECK_V(result, ZSTD_compress_generic(ress.cctx, &outBuff, &inBuff, directive));
@@ -786,7 +782,8 @@
(U32)(zfp.consumed >> 20),
(U32)(zfp.produced >> 20),
cShare );
- } else { /* g_displayLevel == 2 */
+ } else {
+ /* g_displayLevel <= 2; only display notifications if == 2; */
DISPLAYLEVEL(2, "\rRead : %u ", (U32)(zfp.consumed >> 20));
if (fileSize != UTIL_FILESIZE_UNKNOWN)
DISPLAYLEVEL(2, "/ %u ", (U32)(fileSize >> 20));