Spelling (#1582)

* spelling: accidentally

* spelling: across

* spelling: additionally

* spelling: addresses

* spelling: appropriate

* spelling: assumed

* spelling: available

* spelling: builder

* spelling: capacity

* spelling: compiler

* spelling: compressibility

* spelling: compressor

* spelling: compression

* spelling: contract

* spelling: convenience

* spelling: decompress

* spelling: description

* spelling: deflate

* spelling: deterministically

* spelling: dictionary

* spelling: display

* spelling: eliminate

* spelling: preemptively

* spelling: exclude

* spelling: failure

* spelling: independence

* spelling: independent

* spelling: intentionally

* spelling: matching

* spelling: maximum

* spelling: meaning

* spelling: mishandled

* spelling: memory

* spelling: occasionally

* spelling: occurrence

* spelling: official

* spelling: offsets

* spelling: original

* spelling: output

* spelling: overflow

* spelling: overridden

* spelling: parameter

* spelling: performance

* spelling: probability

* spelling: receives

* spelling: redundant

* spelling: recompression

* spelling: resources

* spelling: sanity

* spelling: segment

* spelling: series

* spelling: specified

* spelling: specify

* spelling: subtracted

* spelling: successful

* spelling: return

* spelling: translation

* spelling: update

* spelling: unrelated

* spelling: useless

* spelling: variables

* spelling: variety

* spelling: verbatim

* spelling: verification

* spelling: visited

* spelling: warming

* spelling: workers

* spelling: with
diff --git a/CHANGELOG b/CHANGELOG
index 0c09b4a..cf82ccd 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -243,7 +243,7 @@
 Change Licensing, all project is now BSD, Copyright Facebook
 Small decompression speed improvement
 API : Streaming API supports legacy format
-API : ZDICT_getDictID(), ZSTD_sizeof_{CCtx, DCtx, CStream, DStream}(), ZSTD_setDStreamParamter()
+API : ZDICT_getDictID(), ZSTD_sizeof_{CCtx, DCtx, CStream, DStream}(), ZSTD_setDStreamParameter()
 CLI supports legacy formats v0.4+
 Fixed : compression fails on certain huge files, reported by Jesse McGrew
 Enhanced documentation, by Przemyslaw Skibinski
diff --git a/build/cmake/README.md b/build/cmake/README.md
index 0c71a54..681b14c 100644
--- a/build/cmake/README.md
+++ b/build/cmake/README.md
@@ -3,7 +3,7 @@
 Contributions to the cmake build configurations are welcome. Please
 use case sensitivity that matches modern (ie. cmake version 2.6 and above)
 conventions of using lower-case for commands, and upper-case for
-varibles.
+variables.
 
 # CMake Style Recommendations
 
diff --git a/contrib/docker/README.md b/contrib/docker/README.md
index 2991177..43f6d7a 100644
--- a/contrib/docker/README.md
+++ b/contrib/docker/README.md
@@ -5,7 +5,7 @@
 
 ## Installing docker
 
-The officiel docker install docs use a ppa with a modern version available:
+The official docker install docs use a ppa with a modern version available:
 https://docs.docker.com/install/linux/docker-ce/ubuntu/
 
 ## How to run
diff --git a/contrib/experimental_dict_builders/benchmarkDictBuilder/benchmark.c b/contrib/experimental_dict_builders/benchmarkDictBuilder/benchmark.c
index b193456..cd94379 100644
--- a/contrib/experimental_dict_builders/benchmarkDictBuilder/benchmark.c
+++ b/contrib/experimental_dict_builders/benchmarkDictBuilder/benchmark.c
@@ -127,7 +127,7 @@
 
 
 /** compressWithDict() :
- *  Compress samples from sample buffer given dicionary stored on dictionary buffer and compression level
+ *  Compress samples from sample buffer given dictionary stored on dictionary buffer and compression level
  *  @return compression ratio
  */
 double compressWithDict(sampleInfo *srcInfo, dictInfo* dInfo, int compressionLevel, int displayLevel) {
@@ -194,7 +194,7 @@
     totalCompressedSize += compressedSize;
   }
 
-  /* Sum orignal sizes */
+  /* Sum original sizes */
   for (i = 0; i<srcInfo->nbSamples; i++) {
     totalOriginalSize += srcInfo->samplesSizes[i];
   }
diff --git a/contrib/experimental_dict_builders/fastCover/fastCover.c b/contrib/experimental_dict_builders/fastCover/fastCover.c
index 02c155a..0a338bd 100644
--- a/contrib/experimental_dict_builders/fastCover/fastCover.c
+++ b/contrib/experimental_dict_builders/fastCover/fastCover.c
@@ -125,7 +125,7 @@
  *
  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
  *
- * Once the dmer with hash value d is in the dictionay we set F(d) = F(d)/2.
+ * Once the dmer with hash value d is in the dictionary we set F(d) = F(d)/2.
  */
 static FASTCOVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
                                                   U32 *freqs, U32 begin,U32 end,
@@ -149,7 +149,7 @@
     while (activeSegment.end < end) {
       /* Get hash value of current dmer */
       const size_t index = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, parameters.f, ctx->d);
-      /* Add frequency of this index to score if this is the first occurence of index in active segment */
+      /* Add frequency of this index to score if this is the first occurrence of index in active segment */
       if (ctx->segmentFreqs[index] == 0) {
         activeSegment.score += freqs[index];
       }
diff --git a/contrib/largeNbDicts/largeNbDicts.c b/contrib/largeNbDicts/largeNbDicts.c
index 18953ec..dcc186b 100644
--- a/contrib/largeNbDicts/largeNbDicts.c
+++ b/contrib/largeNbDicts/largeNbDicts.c
@@ -424,7 +424,7 @@
 }
 
 
-/* mess with adresses, so that linear scanning dictionaries != linear address scanning */
+/* mess with addresses, so that linear scanning dictionaries != linear address scanning */
 void shuffleDictionaries(ddict_collection_t dicts)
 {
     size_t const nbDicts = dicts.nbDDict;
diff --git a/contrib/linux-kernel/0002-lib-Add-zstd-modules.patch b/contrib/linux-kernel/0002-lib-Add-zstd-modules.patch
index c3bbaed..0232d2d 100644
--- a/contrib/linux-kernel/0002-lib-Add-zstd-modules.patch
+++ b/contrib/linux-kernel/0002-lib-Add-zstd-modules.patch
@@ -4,7 +4,7 @@
 Subject: [PATCH v5 2/5] lib: Add zstd modules
 
 Add zstd compression and decompression kernel modules.
-zstd offers a wide varity of compression speed and quality trade-offs.
+zstd offers a wide variety of compression speed and quality trade-offs.
 It can compress at speeds approaching lz4, and quality approaching lzma.
 zstd decompressions at speeds more than twice as fast as zlib, and
 decompression speed remains roughly the same across all compression levels.
@@ -21,7 +21,7 @@
 I benchmarked zstd compression as a special character device. I ran zstd
 and zlib compression at several levels, as well as performing no
 compression, which measure the time spent copying the data to kernel space.
-Data is passed to the compresser 4096 B at a time. The benchmark file is
+Data is passed to the compressor 4096 B at a time. The benchmark file is
 located in the upstream zstd source repository under
 `contrib/linux-kernel/zstd_compress_test.c` [2].
 
@@ -86,7 +86,7 @@
 `contrib/linux-kernel/test/UserlandTest.cpp` [5] by mocking the kernel
 functions. Fuzz tested using libfuzzer [6] with the fuzz harnesses under
 `contrib/linux-kernel/test/{RoundTripCrash.c,DecompressCrash.c}` [7] [8]
-with ASAN, UBSAN, and MSAN. Additionaly, it was tested while testing the
+with ASAN, UBSAN, and MSAN. Additionally, it was tested while testing the
 BtrFS and SquashFS patches coming next.
 
 [1] https://clang.llvm.org/docs/ClangFormat.html
@@ -4200,14 +4200,14 @@
 +	BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
 +	U32 const fcsCode =
 +	    params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
-+	BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
++	BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
 +	size_t pos;
 +
 +	if (dstCapacity < ZSTD_frameHeaderSize_max)
 +		return ERROR(dstSize_tooSmall);
 +
 +	ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
-+	op[4] = frameHeaderDecriptionByte;
++	op[4] = frameHeaderDescriptionByte;
 +	pos = 5;
 +	if (!singleSegment)
 +		op[pos++] = windowLogByte;
@@ -8812,8 +8812,8 @@
 +		U32 position = 0;
 +		U32 symbol;
 +		for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
-+			int nbOccurences;
-+			for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
++			int nbOccurrences;
++			for (nbOccurrences = 0; nbOccurrences < normalizedCounter[symbol]; nbOccurrences++) {
 +				tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
 +				position = (position + step) & tableMask;
 +				while (position > highThreshold)
@@ -9944,7 +9944,7 @@
 +	HUF_repeat_none,  /**< Cannot use the previous table */
 +	HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
 +			     4}X_repeat */
-+	HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
++	HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
 +} HUF_repeat;
 +/** HUF_compress4X_repeat() :
 +*   Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
diff --git a/contrib/linux-kernel/0006-squashfs-tools-Add-zstd-support.patch b/contrib/linux-kernel/0006-squashfs-tools-Add-zstd-support.patch
index ca638f2..00d24e2 100644
--- a/contrib/linux-kernel/0006-squashfs-tools-Add-zstd-support.patch
+++ b/contrib/linux-kernel/0006-squashfs-tools-Add-zstd-support.patch
@@ -11,7 +11,7 @@
 ---
 v4 -> v5:
 - Fix patch documentation to reflect that Sean Purcell is the author
-- Don't strip trailing whitespace of unreleated code
+- Don't strip trailing whitespace of unrelated code
 - Make zstd_display_options() static
 
 v5 -> v6:
@@ -224,7 +224,7 @@
 + * set the default options, this is to ensure any user supplied
 + * -X options on the appending mksquashfs command line are over-ridden.
 + *
-+ * This function returns 0 on sucessful extraction of options, and -1 on error.
++ * This function returns 0 on successful extraction of options, and -1 on error.
 + */
 +static int zstd_extract_options(int block_size, void *buffer, int size)
 +{
diff --git a/contrib/linux-kernel/lib/zstd/compress.c b/contrib/linux-kernel/lib/zstd/compress.c
index ff18ae6..43535b8 100644
--- a/contrib/linux-kernel/lib/zstd/compress.c
+++ b/contrib/linux-kernel/lib/zstd/compress.c
@@ -2436,14 +2436,14 @@
 	BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
 	U32 const fcsCode =
 	    params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
-	BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
+	BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
 	size_t pos;
 
 	if (dstCapacity < ZSTD_frameHeaderSize_max)
 		return ERROR(dstSize_tooSmall);
 
 	ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
-	op[4] = frameHeaderDecriptionByte;
+	op[4] = frameHeaderDescriptionByte;
 	pos = 5;
 	if (!singleSegment)
 		op[pos++] = windowLogByte;
diff --git a/contrib/linux-kernel/lib/zstd/fse_compress.c b/contrib/linux-kernel/lib/zstd/fse_compress.c
index ef3d174..0fe468e 100644
--- a/contrib/linux-kernel/lib/zstd/fse_compress.c
+++ b/contrib/linux-kernel/lib/zstd/fse_compress.c
@@ -141,8 +141,8 @@
 		U32 position = 0;
 		U32 symbol;
 		for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
-			int nbOccurences;
-			for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
+			int nbOccurrences;
+			for (nbOccurrences = 0; nbOccurrences < normalizedCounter[symbol]; nbOccurrences++) {
 				tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
 				position = (position + step) & tableMask;
 				while (position > highThreshold)
diff --git a/contrib/linux-kernel/lib/zstd/huf.h b/contrib/linux-kernel/lib/zstd/huf.h
index 2143da2..923218d 100644
--- a/contrib/linux-kernel/lib/zstd/huf.h
+++ b/contrib/linux-kernel/lib/zstd/huf.h
@@ -134,7 +134,7 @@
 	HUF_repeat_none,  /**< Cannot use the previous table */
 	HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
 			     4}X_repeat */
-	HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+	HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
 } HUF_repeat;
 /** HUF_compress4X_repeat() :
 *   Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
diff --git a/contrib/linux-kernel/test/include/linux/compiler.h b/contrib/linux-kernel/test/include/linux/compiler.h
index 7991b8b..4fb4f42 100644
--- a/contrib/linux-kernel/test/include/linux/compiler.h
+++ b/contrib/linux-kernel/test/include/linux/compiler.h
@@ -1,5 +1,5 @@
-#ifndef LINUX_COMIPLER_H_
-#define LINUX_COMIPLER_H_
+#ifndef LINUX_COMPILER_H_
+#define LINUX_COMPILER_H_
 
 #ifndef __always_inline
 #  define __always_inline inline
@@ -9,4 +9,4 @@
 #  define noinline __attribute__((__noinline__))
 #endif
 
-#endif // LINUX_COMIPLER_H_
+#endif // LINUX_COMPILER_H_
diff --git a/contrib/pzstd/Pzstd.cpp b/contrib/pzstd/Pzstd.cpp
index 6c580b3..652187c 100644
--- a/contrib/pzstd/Pzstd.cpp
+++ b/contrib/pzstd/Pzstd.cpp
@@ -55,7 +55,7 @@
                              SharedState& state) {
   auto inputSize = fileSizeOrZero(inputFile);
   // WorkQueue outlives ThreadPool so in the case of error we are certain
-  // we don't accidently try to call push() on it after it is destroyed
+  // we don't accidentally try to call push() on it after it is destroyed
   WorkQueue<std::shared_ptr<BufferWorkQueue>> outs{options.numThreads + 1};
   std::uint64_t bytesRead;
   std::uint64_t bytesWritten;
diff --git a/contrib/pzstd/utils/Range.h b/contrib/pzstd/utils/Range.h
index 7e2559c..fedb5d7 100644
--- a/contrib/pzstd/utils/Range.h
+++ b/contrib/pzstd/utils/Range.h
@@ -9,7 +9,7 @@
  
 /**
  * A subset of `folly/Range.h`.
- * All code copied verbatiam modulo formatting
+ * All code copied verbatim modulo formatting
  */
 #pragma once
 
diff --git a/contrib/pzstd/utils/ResourcePool.h b/contrib/pzstd/utils/ResourcePool.h
index a6ff5ff..8dfcdd7 100644
--- a/contrib/pzstd/utils/ResourcePool.h
+++ b/contrib/pzstd/utils/ResourcePool.h
@@ -54,7 +54,7 @@
 
   /**
    * @returns  A unique pointer to a resource.  The resource is null iff
-   *           there are no avaiable resources and `factory()` returns null.
+   *           there are no available resources and `factory()` returns null.
    */
   UniquePtr get() {
     std::lock_guard<std::mutex> lock(mutex_);
diff --git a/doc/README.md b/doc/README.md
index 1f01fa4..bb7a3e4 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -12,8 +12,8 @@
 Compliant decoders must adhere to this document,
 and compliant encoders must generate data that follows it.
 
-Should you look for ressources to develop your own port of Zstandard algorithm,
-you may find the following ressources useful :
+Should you look for resources to develop your own port of Zstandard algorithm,
+you may find the following resources useful :
 
 __`educational_decoder`__ : This directory contains an implementation of a Zstandard decoder,
 compliant with the Zstandard compression format.
diff --git a/doc/educational_decoder/zstd_decompress.c b/doc/educational_decoder/zstd_decompress.c
index bea0e0c..8e231bb 100644
--- a/doc/educational_decoder/zstd_decompress.c
+++ b/doc/educational_decoder/zstd_decompress.c
@@ -358,7 +358,7 @@
                          ostream_t *const out);
 
 // Given an offset code from a sequence command (either an actual offset value
-// or an index for previous offset), computes the correct offset and udpates
+// or an index for previous offset), computes the correct offset and updates
 // the offset history
 static size_t compute_offset(sequence_command_t seq, u64 *const offset_hist);
 
diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html
index cb101e1..f1628b5 100644
--- a/doc/zstd_manual.html
+++ b/doc/zstd_manual.html
@@ -384,7 +384,7 @@
   Note 3 : Whenever all input data is provided and consumed in a single round,
            for example with ZSTD_compress2(),
            or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
-           this value is automatically overriden by srcSize instead.
+           this value is automatically overridden by srcSize instead.
  
 </p></pre><BR>
 
@@ -571,8 +571,8 @@
 </b><p>  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
   - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
   - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
-  - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
-  - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
   - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
   - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
                                                   and then immediately returns, just indicating that there is some data remaining to be flushed.
diff --git a/examples/multiple_simple_compression.c b/examples/multiple_simple_compression.c
index 51c9ec7..a44ac8b 100644
--- a/examples/multiple_simple_compression.c
+++ b/examples/multiple_simple_compression.c
@@ -107,7 +107,7 @@
         compressFile_orDie(ress, inFilename, outFilename);
     }
 
-    /* free momery */
+    /* free memory */
     freeResources(ress,outFilename);
 
     printf("compressed %i files \n", argc-1);
diff --git a/lib/common/compiler.h b/lib/common/compiler.h
index 7f56128..0836e3e 100644
--- a/lib/common/compiler.h
+++ b/lib/common/compiler.h
@@ -40,7 +40,7 @@
 
 /**
  * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
- * parameters. They must be inlined for the compiler to elimininate the constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
  * branches.
  */
 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
diff --git a/lib/common/fse.h b/lib/common/fse.h
index f72c519..811c670 100644
--- a/lib/common/fse.h
+++ b/lib/common/fse.h
@@ -358,7 +358,7 @@
 typedef enum {
    FSE_repeat_none,  /**< Cannot use the previous table */
    FSE_repeat_check, /**< Can use the previous table but it must be checked */
-   FSE_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
  } FSE_repeat;
 
 /* *****************************************
diff --git a/lib/common/threading.c b/lib/common/threading.c
index 8be8c8d..f3d4fa8 100644
--- a/lib/common/threading.c
+++ b/lib/common/threading.c
@@ -14,8 +14,8 @@
  * This file will hold wrapper for systems, which do not support pthreads
  */
 
-/* create fake symbol to avoid empty trnaslation unit warning */
-int g_ZSTD_threading_useles_symbol;
+/* create fake symbol to avoid empty translation unit warning */
+int g_ZSTD_threading_useless_symbol;
 
 #if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
 
diff --git a/lib/common/xxhash.c b/lib/common/xxhash.c
index 532b816..30599aa 100644
--- a/lib/common/xxhash.c
+++ b/lib/common/xxhash.c
@@ -66,10 +66,10 @@
 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
 
 /*!XXH_FORCE_NATIVE_FORMAT :
- * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
  * Results are therefore identical for little-endian and big-endian CPU.
  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
- * Should endian-independance be of no importance for your application, you may set the #define below to 1,
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
  * to improve speed for Big-endian CPU.
  * This option has no impact on Little_Endian CPU.
  */
diff --git a/lib/compress/fse_compress.c b/lib/compress/fse_compress.c
index 60f357b..68b47e1 100644
--- a/lib/compress/fse_compress.c
+++ b/lib/compress/fse_compress.c
@@ -129,9 +129,9 @@
     {   U32 position = 0;
         U32 symbol;
         for (symbol=0; symbol<=maxSymbolValue; symbol++) {
-            int nbOccurences;
+            int nbOccurrences;
             int const freq = normalizedCounter[symbol];
-            for (nbOccurences=0; nbOccurences<freq; nbOccurences++) {
+            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
                 tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
                 position = (position + step) & tableMask;
                 while (position > highThreshold)
diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c
index 4a9f6b7..2e163c8 100644
--- a/lib/compress/zstd_compress.c
+++ b/lib/compress/zstd_compress.c
@@ -2046,7 +2046,7 @@
  * If x == 0: Return 0
  * Else: Return floor(-log2(x / 256) * 256)
  */
-static unsigned const kInverseProbabiltyLog256[256] = {
+static unsigned const kInverseProbabilityLog256[256] = {
     0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
     1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
     874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
@@ -2085,7 +2085,7 @@
         if (count[s] != 0 && norm == 0)
             norm = 1;
         assert(count[s] < total);
-        cost += count[s] * kInverseProbabiltyLog256[norm];
+        cost += count[s] * kInverseProbabilityLog256[norm];
     }
     return cost >> 8;
 }
@@ -2108,7 +2108,7 @@
         unsigned const norm256 = normAcc << shift;
         assert(norm256 > 0);
         assert(norm256 < 256);
-        cost += count[s] * kInverseProbabiltyLog256[norm256];
+        cost += count[s] * kInverseProbabilityLog256[norm256];
     }
     return cost >> 8;
 }
@@ -2611,7 +2611,7 @@
         FORWARD_IF_ERROR(bitstreamSize);
         op += bitstreamSize;
         /* zstd versions <= 1.3.4 mistakenly report corruption when
-         * FSE_readNCount() recieves a buffer < 4 bytes.
+         * FSE_readNCount() receives a buffer < 4 bytes.
          * Fixed by https://github.com/facebook/zstd/pull/1146.
          * This can happen when the last set_compressed table present is 2
          * bytes and the bitstream is only one byte.
@@ -2914,7 +2914,7 @@
     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
     U32   const fcsCode = params.fParams.contentSizeFlag ?
                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
-    BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
     size_t pos=0;
 
     assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
@@ -2926,7 +2926,7 @@
         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
         pos = 4;
     }
-    op[pos++] = frameHeaderDecriptionByte;
+    op[pos++] = frameHeaderDescriptionByte;
     if (!singleSegment) op[pos++] = windowLogByte;
     switch(dictIDSizeCode)
     {
@@ -2950,7 +2950,7 @@
 /* ZSTD_writeLastEmptyBlock() :
  * output an empty Block with end-of-frame mark to complete a frame
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  */
 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
 {
diff --git a/lib/compress/zstd_compress_internal.h b/lib/compress/zstd_compress_internal.h
index 78b5355..cc3cbb9 100644
--- a/lib/compress/zstd_compress_internal.h
+++ b/lib/compress/zstd_compress_internal.h
@@ -36,9 +36,9 @@
 #define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index 1 now means "unsorted".
                                        It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
                                        It's not a big deal though : candidate will just be sorted again.
-                                       Additionnally, candidate position 1 will be lost.
+                                       Additionally, candidate position 1 will be lost.
                                        But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
-                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy
+                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy
                                        Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
 
 
@@ -842,7 +842,7 @@
 /* ZSTD_writeLastEmptyBlock() :
  * output an empty Block with end-of-frame mark to complete a frame
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  */
 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
 
diff --git a/lib/compress/zstd_lazy.h b/lib/compress/zstd_lazy.h
index ef85a6d..bb17630 100644
--- a/lib/compress/zstd_lazy.h
+++ b/lib/compress/zstd_lazy.h
@@ -19,7 +19,7 @@
 
 U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
 
-void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
 
 size_t ZSTD_compressBlock_btlazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
diff --git a/lib/compress/zstd_ldm.c b/lib/compress/zstd_ldm.c
index 58eb2ff..784d20f 100644
--- a/lib/compress/zstd_ldm.c
+++ b/lib/compress/zstd_ldm.c
@@ -429,7 +429,7 @@
      */
     assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
     /* The input could be very large (in zstdmt), so it must be broken up into
-     * chunks to enforce the maximmum distance and handle overflow correction.
+     * chunks to enforce the maximum distance and handle overflow correction.
      */
     assert(sequences->pos <= sequences->size);
     assert(sequences->size <= sequences->capacity);
diff --git a/lib/compress/zstd_opt.c b/lib/compress/zstd_opt.c
index cf2f70b..efb69d3 100644
--- a/lib/compress/zstd_opt.c
+++ b/lib/compress/zstd_opt.c
@@ -885,7 +885,7 @@
             /* large match -> immediate encoding */
             {   U32 const maxML = matches[nbMatches-1].len;
                 U32 const maxOffset = matches[nbMatches-1].off;
-                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new serie",
+                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
                             nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
 
                 if (maxML > sufficient_len) {
@@ -1133,7 +1133,7 @@
 /* ZSTD_initStats_ultra():
  * make a first compression pass, just to seed stats with more accurate starting values.
  * only works on first block, with no dictionary and no ldm.
- * this function cannot error, hence its constract must be respected.
+ * this function cannot error, hence its contract must be respected.
  */
 static void
 ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c
index 7e2c789..4b29fb3 100644
--- a/lib/compress/zstdmt_compress.c
+++ b/lib/compress/zstdmt_compress.c
@@ -456,7 +456,7 @@
      * Must be acquired after the main mutex when acquiring both.
      */
     ZSTD_pthread_mutex_t ldmWindowMutex;
-    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is udpated */
+    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is updated */
     ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */
 } serialState_t;
 
@@ -647,7 +647,7 @@
     buffer_t dstBuff = job->dstBuff;
     size_t lastCBlockSize = 0;
 
-    /* ressources */
+    /* resources */
     if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
     if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
         dstBuff = ZSTDMT_getBuffer(job->bufPool);
@@ -1527,7 +1527,7 @@
 /* ZSTDMT_writeLastEmptyBlock()
  * Write a single empty block with an end-of-frame to finish a frame.
  * Job must be created from streaming variant.
- * This function is always successfull if expected conditions are fulfilled.
+ * This function is always successful if expected conditions are fulfilled.
  */
 static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
 {
diff --git a/lib/compress/zstdmt_compress.h b/lib/compress/zstdmt_compress.h
index bae830e..8d274ce 100644
--- a/lib/compress/zstdmt_compress.h
+++ b/lib/compress/zstdmt_compress.h
@@ -103,7 +103,7 @@
  * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
 typedef enum {
     ZSTDMT_p_jobSize,     /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
-    ZSTDMT_p_overlapLog,  /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+    ZSTDMT_p_overlapLog,  /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
     ZSTDMT_p_rsyncable    /* Enables rsyncable mode. */
 } ZSTDMT_parameter;
 
diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c
index 4f29227..675596f 100644
--- a/lib/decompress/zstd_decompress.c
+++ b/lib/decompress/zstd_decompress.c
@@ -1187,7 +1187,7 @@
 }
 
 /*! ZSTD_getDictID_fromFrame() :
- *  Provides the dictID required to decompresse frame stored within `src`.
+ *  Provides the dictID required to decompress frame stored within `src`.
  *  If @return == 0, the dictID could not be decoded.
  *  This could for one of the following reasons :
  *  - The frame does not require a dictionary (most common case).
diff --git a/lib/decompress/zstd_decompress_block.c b/lib/decompress/zstd_decompress_block.c
index 4418c51..a2a7eed 100644
--- a/lib/decompress/zstd_decompress_block.c
+++ b/lib/decompress/zstd_decompress_block.c
@@ -801,7 +801,7 @@
 /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
  * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
  * bits before reloading. This value is the maximum number of bytes we read
- * after reloading when we are decoding long offets.
+ * after reloading when we are decoding long offsets.
  */
 #define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
     (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
@@ -1180,7 +1180,7 @@
 /* ZSTD_decompressSequencesLong() :
  * decompression function triggered when a minimum share of offsets is considered "long",
  * aka out of cache.
- * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes mearning "farther than memory cache distance".
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
  * This function will try to mitigate main memory latency through the use of prefetching */
 static size_t
 ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c
index ed5a02f..21464ad 100644
--- a/lib/dictBuilder/cover.c
+++ b/lib/dictBuilder/cover.c
@@ -391,7 +391,7 @@
  *
  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
  *
- * Once the dmer d is in the dictionay we set F(d) = 0.
+ * Once the dmer d is in the dictionary we set F(d) = 0.
  */
 static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
                                            COVER_map_t *activeDmers, U32 begin,
@@ -435,7 +435,7 @@
       U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
       activeSegment.begin += 1;
       *delDmerOcc -= 1;
-      /* If this is the last occurence of the dmer, subtract its score */
+      /* If this is the last occurrence of the dmer, subtract its score */
       if (*delDmerOcc == 0) {
         COVER_map_remove(activeDmers, delDmer);
         activeSegment.score -= freqs[delDmer];
diff --git a/lib/dictBuilder/cover.h b/lib/dictBuilder/cover.h
index 27e6fb7..efb4680 100644
--- a/lib/dictBuilder/cover.h
+++ b/lib/dictBuilder/cover.h
@@ -51,9 +51,9 @@
  * We will make sure that each epoch gets at least 10 * k bytes.
  *
  * The COVER algorithms divide the data up into epochs of equal size and
- * select one segemnt from each epoch.
+ * select one segment from each epoch.
  *
- * @param maxDictSize The maximum allowed dictioary size.
+ * @param maxDictSize The maximum allowed dictionary size.
  * @param nbDmers     The number of dmers we are training on.
  * @param k           The parameter k (segment size).
  * @param passes      The target number of passes over the dmer corpus.
diff --git a/lib/dictBuilder/fastcover.c b/lib/dictBuilder/fastcover.c
index 6cf3702..5b6b941 100644
--- a/lib/dictBuilder/fastcover.c
+++ b/lib/dictBuilder/fastcover.c
@@ -132,7 +132,7 @@
  *
  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
  *
- * Once the dmer with hash value d is in the dictionay we set F(d) = 0.
+ * Once the dmer with hash value d is in the dictionary we set F(d) = 0.
  */
 static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
                                               U32 *freqs, U32 begin, U32 end,
@@ -161,7 +161,7 @@
     /* Get hash value of current dmer */
     const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
 
-    /* Add frequency of this index to score if this is the first occurence of index in active segment */
+    /* Add frequency of this index to score if this is the first occurrence of index in active segment */
     if (segmentFreqs[idx] == 0) {
       activeSegment.score += freqs[idx];
     }
diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c
index bb0f4b5..cad2b99 100644
--- a/lib/legacy/zstd_v01.c
+++ b/lib/legacy/zstd_v01.c
@@ -1759,7 +1759,7 @@
                                 BYTE* const base, BYTE* const oend)
 {
     static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
     const BYTE* const ostart = op;
     const size_t litLength = sequence.litLength;
     BYTE* const endMatch = op + litLength + sequence.matchLength;    /* risk : address space overflow (32-bits) */
diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c
index 5948359..561bc41 100644
--- a/lib/legacy/zstd_v02.c
+++ b/lib/legacy/zstd_v02.c
@@ -3098,7 +3098,7 @@
                                 BYTE* const base, BYTE* const oend)
 {
     static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
     const BYTE* const ostart = op;
     BYTE* const oLitEnd = op + sequence.litLength;
     BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c
index b6c60d2..a1bf0fa 100644
--- a/lib/legacy/zstd_v03.c
+++ b/lib/legacy/zstd_v03.c
@@ -2739,7 +2739,7 @@
                                 BYTE* const base, BYTE* const oend)
 {
     static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
     const BYTE* const ostart = op;
     BYTE* const oLitEnd = op + sequence.litLength;
     BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
diff --git a/lib/legacy/zstd_v04.c b/lib/legacy/zstd_v04.c
index 65dc64d..4342330 100644
--- a/lib/legacy/zstd_v04.c
+++ b/lib/legacy/zstd_v04.c
@@ -2862,7 +2862,7 @@
                                 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
 {
     static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
     BYTE* const oLitEnd = op + sequence.litLength;
     const size_t sequenceLength = sequence.litLength + sequence.matchLength;
     BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c
index 1c39f2f..caaf15f 100644
--- a/lib/legacy/zstd_v05.c
+++ b/lib/legacy/zstd_v05.c
@@ -3219,7 +3219,7 @@
                                 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
 {
     static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
     BYTE* const oLitEnd = op + sequence.litLength;
     const size_t sequenceLength = sequence.litLength + sequence.matchLength;
     BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c
index 65975ac..a695cbb 100644
--- a/lib/legacy/zstd_v06.c
+++ b/lib/legacy/zstd_v06.c
@@ -3408,7 +3408,7 @@
     if (sequence.offset < 8) {
         /* close range match, overlap */
         static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
         int const sub2 = dec64table[sequence.offset];
         op[0] = match[0];
         op[1] = match[1];
diff --git a/lib/legacy/zstd_v07.c b/lib/legacy/zstd_v07.c
index 443524b..6b94889 100644
--- a/lib/legacy/zstd_v07.c
+++ b/lib/legacy/zstd_v07.c
@@ -3633,7 +3633,7 @@
     if (sequence.offset < 8) {
         /* close range match, overlap */
         static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
         int const sub2 = dec64table[sequence.offset];
         op[0] = match[0];
         op[1] = match[1];
diff --git a/lib/zstd.h b/lib/zstd.h
index cc87628..53470c1 100644
--- a/lib/zstd.h
+++ b/lib/zstd.h
@@ -432,7 +432,7 @@
  *  Note 3 : Whenever all input data is provided and consumed in a single round,
  *           for example with ZSTD_compress2(),
  *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
- *           this value is automatically overriden by srcSize instead.
+ *           this value is automatically overridden by srcSize instead.
  */
 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
 
@@ -635,8 +635,8 @@
  *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
  *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
  *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
- *  - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
- *  - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
  *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
  *  - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
  *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
@@ -662,7 +662,7 @@
 
 /*******************************************************************************
  * This is a legacy streaming API, and can be replaced by ZSTD_CCtx_reset() and
- * ZSTD_compressStream2(). It is redundent, but is still fully supported.
+ * ZSTD_compressStream2(). It is redundant, but is still fully supported.
  * Advanced parameters and dictionary compression can only be used through the
  * new API.
  ******************************************************************************/
@@ -1023,7 +1023,7 @@
 #define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
                                            * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
                                            * to preserve host's memory from unreasonable requirements.
-                                           * This limit can be overriden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
                                            * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
 
 
diff --git a/programs/benchzstd.c b/programs/benchzstd.c
index 94ec5f2..263dc08 100644
--- a/programs/benchzstd.c
+++ b/programs/benchzstd.c
@@ -383,7 +383,7 @@
         }
     }
 
-    /* warmimg up `compressedBuffer` */
+    /* warming up `compressedBuffer` */
     if (adv->mode == BMK_decodeOnly) {
         memcpy(compressedBuffer, srcBuffer, loadedCompressedSize);
     } else {
diff --git a/programs/benchzstd.h b/programs/benchzstd.h
index 376a80a..2c76277 100644
--- a/programs/benchzstd.h
+++ b/programs/benchzstd.h
@@ -170,7 +170,7 @@
  *  comprParams - basic compression parameters
  *  dictBuffer - a dictionary if used, null otherwise
  *  dictBufferSize - size of dictBuffer, 0 otherwise
- *  diplayLevel - see BMK_benchFiles
+ *  displayLevel - see BMK_benchFiles
  *  displayName - name used by display
  * @return:
  *      a variant, which expresses either an error, or a valid result.
diff --git a/programs/fileio.c b/programs/fileio.c
index 30514d4..7ada592 100644
--- a/programs/fileio.c
+++ b/programs/fileio.c
@@ -1060,7 +1060,7 @@
                         /* test if compression is blocked
                          * either because output is slow and all buffers are full
                          * or because input is slow and no job can start while waiting for at least one buffer to be filled.
-                         * note : excluse starting part, since currentJobID > 1 */
+                         * note : exclude starting part, since currentJobID > 1 */
                         if ( (zfp.consumed == previous_zfp_update.consumed)   /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/
                           && (zfp.nbActiveWorkers == 0)                       /* confirmed : no compression ongoing */
                           ) {
diff --git a/programs/zstdcli.c b/programs/zstdcli.c
index 904bcdf..fbb1c04 100644
--- a/programs/zstdcli.c
+++ b/programs/zstdcli.c
@@ -347,7 +347,7 @@
 
 /**
  * parseLegacyParameters() :
- * reads legacy dictioanry builter parameters from *stringPtr (e.g. "--train-legacy=selectivity=8") into *selectivity
+ * reads legacy dictionary builder parameters from *stringPtr (e.g. "--train-legacy=selectivity=8") into *selectivity
  * @return 1 means that legacy dictionary builder parameters were correct
  * @return 0 in case of malformed parameters
  */
diff --git a/tests/README.md b/tests/README.md
index 7c6fb0d..f345011 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -72,7 +72,7 @@
 
 This tool will generate .zst files with checksums,
 as well as optionally output the corresponding correct uncompressed data for
-extra verfication.
+extra verification.
 
 Example:
 ```
@@ -123,7 +123,7 @@
                     Higher values will make optimizer run longer, more chances to find better solution.
     memLog    : Limits the log of the size of each memotable (1 per strategy). Will use hash tables when state space is larger than max size.
                     Setting memLog = 0 turns off memoization
- --display=   : specifiy which parameters are included in the output
+ --display=   : specify which parameters are included in the output
                     can use all --zstd parameter names and 'cParams' as a shorthand for all parameters used in ZSTD_compressionParameters
                     (Default: display all params available)
  -P#          : generated sample compressibility (when no file is provided)
diff --git a/tests/decodecorpus.c b/tests/decodecorpus.c
index d8b3324..9910d3c 100644
--- a/tests/decodecorpus.c
+++ b/tests/decodecorpus.c
@@ -514,7 +514,7 @@
         if ((RAND(seed) & 3) || !frame->stats.hufInit) {
             do {
                 if (RAND(seed) & 3) {
-                    /* add 10 to ensure some compressability */
+                    /* add 10 to ensure some compressibility */
                     double const weight = ((RAND(seed) % 90) + 10) / 100.0;
 
                     DISPLAYLEVEL(5, "    distribution weight: %d%%\n",
diff --git a/tests/fuzz/Makefile b/tests/fuzz/Makefile
index 12ec952..31b151b 100644
--- a/tests/fuzz/Makefile
+++ b/tests/fuzz/Makefile
@@ -104,7 +104,7 @@
 	$(AR) $(FUZZ_ARFLAGS) $@ regression_driver.o
 
 # Install libfuzzer (not usable for MSAN testing)
-# Provided for convienence. To use this library run make libFuzzer and
+# Provided for convenience. To use this library run make libFuzzer and
 # set LDFLAGS=-L.
 .PHONY: libFuzzer
 libFuzzer:
diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md
index f184be6..9e0bb25 100644
--- a/tests/fuzz/README.md
+++ b/tests/fuzz/README.md
@@ -37,8 +37,8 @@
 `--lib-fuzzing-engine`, the default is `libregression.a`.
 It has flags that can easily set up sanitizers `--enable-{a,ub,m}san`, and
 coverage instrumentation `--enable-coverage`.
-It sets sane defaults which can be overriden with flags `--debug`,
-`--enable-ubsan-pointer-overlow`, etc.
+It sets sane defaults which can be overridden with flags `--debug`,
+`--enable-ubsan-pointer-overflow`, etc.
 Run `./fuzz.py build -h` for help.
 
 ### Running Fuzzers
diff --git a/tests/fuzz/fuzz.py b/tests/fuzz/fuzz.py
index ee27015..cd2a5b4 100755
--- a/tests/fuzz/fuzz.py
+++ b/tests/fuzz/fuzz.py
@@ -339,13 +339,13 @@
     args = parse_env_flags(args, ' '.join(
         [args.cppflags, args.cflags, args.cxxflags, args.ldflags]))
 
-    # Check option sanitiy
+    # Check option sanity
     if args.msan and (args.asan or args.ubsan):
         raise RuntimeError('MSAN may not be used with any other sanitizers')
     if args.msan_track_origins and not args.msan:
         raise RuntimeError('--enable-msan-track-origins requires MSAN')
     if args.ubsan_pointer_overflow and not args.ubsan:
-        raise RuntimeError('--enable-ubsan-pointer-overlow requires UBSAN')
+        raise RuntimeError('--enable-ubsan-pointer-overflow requires UBSAN')
     if args.sanitize_recover and not args.sanitize:
         raise RuntimeError('--enable-sanitize-recover but no sanitizers used')
 
@@ -623,7 +623,7 @@
 
 def gen_parser(args):
     description = """
-    Generate a seed corpus appropiate for TARGET with data generated with
+    Generate a seed corpus appropriate for TARGET with data generated with
     decodecorpus.
     The fuzz inputs are prepended with a seed before the zstd data, so the
     output of decodecorpus shouldn't be used directly.
diff --git a/tests/fuzz/fuzz_helpers.h b/tests/fuzz/fuzz_helpers.h
index 468c39f..0cf79d0 100644
--- a/tests/fuzz/fuzz_helpers.h
+++ b/tests/fuzz/fuzz_helpers.h
@@ -55,7 +55,7 @@
 #endif
 
 /**
- * Determininistically constructs a seed based on the fuzz input.
+ * Deterministically constructs a seed based on the fuzz input.
  * Consumes up to the first FUZZ_RNG_SEED_SIZE bytes of the input.
  */
 FUZZ_STATIC uint32_t FUZZ_seed(uint8_t const **src, size_t* size) {
diff --git a/tests/fuzz/zstd_helpers.c b/tests/fuzz/zstd_helpers.c
index 0e64400..9dff289 100644
--- a/tests/fuzz/zstd_helpers.c
+++ b/tests/fuzz/zstd_helpers.c
@@ -75,7 +75,7 @@
     setRand(cctx, ZSTD_c_contentSizeFlag, 0, 1, state);
     setRand(cctx, ZSTD_c_checksumFlag, 0, 1, state);
     setRand(cctx, ZSTD_c_dictIDFlag, 0, 1, state);
-    /* Select long distance matchig parameters */
+    /* Select long distance matching parameters */
     setRand(cctx, ZSTD_c_enableLongDistanceMatching, 0, 1, state);
     setRand(cctx, ZSTD_c_ldmHashLog, ZSTD_HASHLOG_MIN, 16, state);
     setRand(cctx, ZSTD_c_ldmMinMatch, ZSTD_LDM_MINMATCH_MIN,
diff --git a/tests/fuzzer.c b/tests/fuzzer.c
index d5f872d..1a31c78 100644
--- a/tests/fuzzer.c
+++ b/tests/fuzzer.c
@@ -909,7 +909,7 @@
         CHECK_EQ(value, 5);
         CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_jobSize, &value) );
         CHECK_EQ(value, 2 MB);
-        /* Set the number of worksers and check the overlap log and job size. */
+        /* Set the number of workers and check the overlap log and job size. */
         CHECK( ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, 2) );
         CHECK( ZSTD_CCtxParams_getParameter(params, ZSTD_c_overlapLog, &value) );
         CHECK_EQ(value, 5);
diff --git a/tests/paramgrill.c b/tests/paramgrill.c
index fb3c776..75c179a 100644
--- a/tests/paramgrill.c
+++ b/tests/paramgrill.c
@@ -1235,7 +1235,7 @@
     return 0;
 }
 
-/* allocates buffer's arguments. returns success / failuere */
+/* allocates buffer's arguments. returns success / failure */
 static int createBuffers(buffers_t* buff, const char* const * const fileNamesTable,
                           size_t nbFiles) {
     size_t pos = 0;
@@ -1508,7 +1508,7 @@
 }
 
 /* Sets pc to random unmeasured set of parameters */
-/* specifiy strategy */
+/* specify strategy */
 static void randomConstrainedParams(paramValues_t* pc, const memoTable_t* memoTableArray, const ZSTD_strategy st)
 {
     size_t j;
@@ -1573,7 +1573,7 @@
     display_params_tested(*comprParams);
     memset(&bResult, 0, sizeof(bResult));
 
-    /* warmimg up memory */
+    /* warming up memory */
     for (i = 0; i < buf.nbBlocks; i++) {
         if (mode != BMK_decodeOnly) {
             RDG_genBuffer(dstPtrs[i], dstCapacities[i], 0.10, 0.50, 1);
@@ -1992,7 +1992,7 @@
 
 /* BMK_generate_cLevelTable() :
  * test a large number of configurations
- * and distribute them accross compression levels according to speed conditions.
+ * and distribute them across compression levels according to speed conditions.
  * display and save all intermediate results into rfName = "grillResults.txt".
  * the function automatically stops after g_timeLimit_s.
  * this function cannot error, it directly exit() in case of problem.
@@ -2270,7 +2270,7 @@
 
 /* Optimizes for a fixed strategy */
 
-/* flexible parameters: iterations of failed climbing (or if we do non-random, maybe this is when everything is close to visitied)
+/* flexible parameters: iterations of failed climbing (or if we do non-random, maybe this is when everything is close to visited)
    weight more on visit for bad results, less on good results/more on later results / ones with more failures.
    allocate memoTable here.
  */
diff --git a/tests/regression/test.c b/tests/regression/test.c
index 9e7b83c..812893b 100644
--- a/tests/regression/test.c
+++ b/tests/regression/test.c
@@ -153,7 +153,7 @@
     }
 }
 
-/** Parse the arguments. Teturn 0 on success. Print help on failure. */
+/** Parse the arguments. Return 0 on success. Print help on failure. */
 static int parse_args(int argc, char** argv) {
     int option_index = 0;
     int c;
diff --git a/tests/zstreamtest.c b/tests/zstreamtest.c
index f07477a..55c14ad 100644
--- a/tests/zstreamtest.c
+++ b/tests/zstreamtest.c
@@ -771,7 +771,7 @@
     }
     DISPLAYLEVEL(3, "OK \n");
 
-    DISPLAYLEVEL(3, "test%3i : ZSTD_resetDStream() wtih dictionary : ", testNb++);
+    DISPLAYLEVEL(3, "test%3i : ZSTD_resetDStream() with dictionary : ", testNb++);
     {
         ZSTD_DCtx* dctx = ZSTD_createDCtx();
         /* We should succeed to decompress with the dictionary. */
@@ -1051,7 +1051,7 @@
         inBuff.size = srcSize; assert(srcSize < COMPRESSIBLE_NOISE_LENGTH);
         inBuff.pos = 0;
     }
-    {   ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, 4 KB, dictionary.filled);   /* intentionnally lies on estimatedSrcSize, to push cdict into targeting a small window size */
+    {   ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, 4 KB, dictionary.filled);   /* intentionally lies on estimatedSrcSize, to push cdict into targeting a small window size */
         ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictionary.start, dictionary.filled, ZSTD_dlm_byRef, ZSTD_dct_fullDict, cParams, ZSTD_defaultCMem);
         DISPLAYLEVEL(5, "cParams.windowLog = %u : ", cParams.windowLog);
         CHECK_Z( ZSTD_CCtx_refCDict(zc, cdict) );
@@ -2069,7 +2069,7 @@
                     CHECK_Z( ZSTD_CCtx_setPledgedSrcSize(zc, pledgedSrcSize) );
                 }
 
-                /* multi-threading parameters. Only adjust ocassionally for small tests. */
+                /* multi-threading parameters. Only adjust occasionally for small tests. */
                 if (bigTests || (FUZ_rand(&lseed) & 0xF) == 0xF) {
                     U32 const nbThreadsCandidate = (FUZ_rand(&lseed) & 4) + 1;
                     U32 const nbThreadsAdjusted = (windowLogMalus < nbThreadsCandidate) ? nbThreadsCandidate - windowLogMalus : 1;
diff --git a/zlibWrapper/README.md b/zlibWrapper/README.md
index 164b69a..e61767c 100644
--- a/zlibWrapper/README.md
+++ b/zlibWrapper/README.md
@@ -71,7 +71,7 @@
 The script used for compilation can be found at [zlibWrapper/Makefile](Makefile).
 
 
-#### The measurement of performace of Zstandard wrapper for zlib
+#### The measurement of performance of Zstandard wrapper for zlib
 
 The zstd distribution contains a tool called `zwrapbench` which can measure speed and ratio of zlib, zstd, and the wrapper.
 The benchmark is conducted using given filenames or synthetic data if filenames are not provided.
@@ -96,8 +96,8 @@
 #### Reusing contexts
 
 The ordinary zlib compression of two files/streams allocates two contexts:
-- for the 1st file calls `deflateInit`, `deflate`, `...`, `deflate`, `defalateEnd`
-- for the 2nd file calls `deflateInit`, `deflate`, `...`, `deflate`, `defalateEnd`
+- for the 1st file calls `deflateInit`, `deflate`, `...`, `deflate`, `deflateEnd`
+- for the 2nd file calls `deflateInit`, `deflate`, `...`, `deflate`, `deflateEnd`
 
 The speed of compression can be improved with reusing a single context with following steps:
 - initialize the context with `deflateInit`
diff --git a/zlibWrapper/examples/fitblk.c b/zlibWrapper/examples/fitblk.c
index ee413c3..6418ca3 100644
--- a/zlibWrapper/examples/fitblk.c
+++ b/zlibWrapper/examples/fitblk.c
@@ -21,7 +21,7 @@
    data in order to determine how much of that input will compress to
    nearly the requested output block size.  The first pass generates
    enough deflate blocks to produce output to fill the requested
-   output size plus a specfied excess amount (see the EXCESS define
+   output size plus a specified excess amount (see the EXCESS define
    below).  The last deflate block may go quite a bit past that, but
    is discarded.  The second pass decompresses and recompresses just
    the compressed data that fit in the requested plus excess sized
@@ -217,7 +217,7 @@
     if (ret == Z_MEM_ERROR)
         quit("out of memory");
 
-    /* set up for next reocmpression */
+    /* set up for next recompression */
     ret = inflateReset(&inf);
     assert(ret != Z_STREAM_ERROR);
     ret = deflateReset(&def);
diff --git a/zlibWrapper/examples/fitblk_original.c b/zlibWrapper/examples/fitblk_original.c
index c61de5c..20f351b 100644
--- a/zlibWrapper/examples/fitblk_original.c
+++ b/zlibWrapper/examples/fitblk_original.c
@@ -17,7 +17,7 @@
    data in order to determine how much of that input will compress to
    nearly the requested output block size.  The first pass generates
    enough deflate blocks to produce output to fill the requested
-   output size plus a specfied excess amount (see the EXCESS define
+   output size plus a specified excess amount (see the EXCESS define
    below).  The last deflate block may go quite a bit past that, but
    is discarded.  The second pass decompresses and recompresses just
    the compressed data that fit in the requested plus excess sized
@@ -198,7 +198,7 @@
     if (ret == Z_MEM_ERROR)
         quit("out of memory");
 
-    /* set up for next reocmpression */
+    /* set up for next recompression */
     ret = inflateReset(&inf);
     assert(ret != Z_STREAM_ERROR);
     ret = deflateReset(&def);
diff --git a/zlibWrapper/examples/zwrapbench.c b/zlibWrapper/examples/zwrapbench.c
index 99f9e11..61031b9 100644
--- a/zlibWrapper/examples/zwrapbench.c
+++ b/zlibWrapper/examples/zwrapbench.c
@@ -193,7 +193,7 @@
                 remaining -= thisBlockSize;
     }   }   }
 
-    /* warmimg up memory */
+    /* warming up memory */
     RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1);
 
     /* Bench */