[3.10] Fix typos in the Tools directory (GH-28769) (GH-28801)

Like GH-28744 but for the Tools directory.

Automerge-Triggered-By: GH:pablogsal
(cherry picked from commit 682aecfdeba481c876bfc9f3796c635bd5b5df50)

Co-authored-by: Christian Clauss <cclauss@me.com>
diff --git a/Tools/c-analyzer/c_analyzer/__main__.py b/Tools/c-analyzer/c_analyzer/__main__.py
index 24fc6cd..5d89b29 100644
--- a/Tools/c-analyzer/c_analyzer/__main__.py
+++ b/Tools/c-analyzer/c_analyzer/__main__.py
@@ -482,7 +482,7 @@ def cmd_data(datacmd, filenames, known=None, *,
         cmd_analyze,
     ),
     'data': (
-        'check/manage local data (e.g. knwon types, ignored vars, caches)',
+        'check/manage local data (e.g. known types, ignored vars, caches)',
         [_cli_data],
         cmd_data,
     ),
diff --git a/Tools/c-analyzer/c_analyzer/info.py b/Tools/c-analyzer/c_analyzer/info.py
index b75918e..27c3a5a 100644
--- a/Tools/c-analyzer/c_analyzer/info.py
+++ b/Tools/c-analyzer/c_analyzer/info.py
@@ -230,11 +230,11 @@ def fix_filename(self, relroot=fsutil.USE_CWD, **kwargs):
         return self
 
     def as_rowdata(self, columns=None):
-        # XXX finsih!
+        # XXX finish!
         return self.item.as_rowdata(columns)
 
     def render_rowdata(self, columns=None):
-        # XXX finsih!
+        # XXX finish!
         return self.item.render_rowdata(columns)
 
     def render(self, fmt='line', *, itemonly=False):
diff --git a/Tools/c-analyzer/c_common/logging.py b/Tools/c-analyzer/c_common/logging.py
index 12398f7..10af852 100644
--- a/Tools/c-analyzer/c_common/logging.py
+++ b/Tools/c-analyzer/c_common/logging.py
@@ -41,7 +41,7 @@ def configure_logger(logger, verbosity=VERBOSITY, *,
 def hide_emit_errors():
     """Ignore errors while emitting log entries.
 
-    Rather than printing a message desribing the error, we show nothing.
+    Rather than printing a message describing the error, we show nothing.
     """
     # For now we simply ignore all exceptions.  If we wanted to ignore
     # specific ones (e.g. BrokenPipeError) then we would need to use
diff --git a/Tools/c-analyzer/c_common/strutil.py b/Tools/c-analyzer/c_common/strutil.py
index e7535d4..07193c0 100644
--- a/Tools/c-analyzer/c_common/strutil.py
+++ b/Tools/c-analyzer/c_common/strutil.py
@@ -26,7 +26,7 @@ def parse_entries(entries, *, ignoresep=None):
                     # We read the entire file here to ensure the file
                     # gets closed sooner rather than later.  Note that
                     # the file would stay open if this iterator is never
-                    # exchausted.
+                    # exhausted.
                     lines = infile.read().splitlines()
                 for line in _iter_significant_lines(lines):
                     yield line, filename
diff --git a/Tools/c-analyzer/c_parser/info.py b/Tools/c-analyzer/c_parser/info.py
index 98ff511..697b1f2 100644
--- a/Tools/c-analyzer/c_parser/info.py
+++ b/Tools/c-analyzer/c_parser/info.py
@@ -1029,7 +1029,7 @@ def _resolve_data(cls, data):
 
     @classmethod
     def _raw_data(self, data):
-        # XXX finsh!
+        # XXX finish!
         return data
 
     @classmethod
@@ -1255,7 +1255,7 @@ def _resolve_data(cls, data):
 
     @classmethod
     def _raw_data(self, data):
-        # XXX finsih!
+        # XXX finish!
         return data
 
     @classmethod
@@ -1296,12 +1296,12 @@ class Statement(HighlevelParsedItem):
 
     @classmethod
     def _resolve_data(cls, data):
-        # XXX finsih!
+        # XXX finish!
         return data, None
 
     @classmethod
     def _raw_data(self, data):
-        # XXX finsih!
+        # XXX finish!
         return data
 
     @classmethod
diff --git a/Tools/c-analyzer/c_parser/parser/__init__.py b/Tools/c-analyzer/c_parser/parser/__init__.py
index 3905609..df70aae 100644
--- a/Tools/c-analyzer/c_parser/parser/__init__.py
+++ b/Tools/c-analyzer/c_parser/parser/__init__.py
@@ -7,7 +7,7 @@
 
 Furthermore, the grammar rules for the C syntax (particularly as
 described in the K&R book) actually describe a superset, of which the
-full C langage is a proper subset.  Here are some of the extra
+full C language is a proper subset.  Here are some of the extra
 conditions that must be applied when parsing C code:
 
 * ...
@@ -90,7 +90,7 @@
 * no "inline" type declarations (struct, union, enum) in function
   parameters ~(including function pointers)~
 * no "inline" type decls in function return types
-* no superflous parentheses in declarators
+* no superfluous parentheses in declarators
 * var decls in for loops are always "simple" (e.g. no inline types)
 * only inline struct/union/enum decls may be anonymouns (without a name)
 * no function pointers in function pointer parameters
diff --git a/Tools/c-analyzer/c_parser/preprocessor/__init__.py b/Tools/c-analyzer/c_parser/preprocessor/__init__.py
index 8da4d8c..e38176f 100644
--- a/Tools/c-analyzer/c_parser/preprocessor/__init__.py
+++ b/Tools/c-analyzer/c_parser/preprocessor/__init__.py
@@ -19,7 +19,7 @@
 logger = logging.getLogger(__name__)
 
 
-# Supprted "source":
+# Supported "source":
 #  * filename (string)
 #  * lines (iterable)
 #  * text (string)
@@ -156,7 +156,7 @@ def handling_errors(ignore_exc=None, *, log_err=None):
 # tools
 
 _COMPILERS = {
-    # matching disutils.ccompiler.compiler_class:
+    # matching distutils.ccompiler.compiler_class:
     'unix': _gcc.preprocess,
     'msvc': None,
     'cygwin': None,
diff --git a/Tools/c-analyzer/cpython/__main__.py b/Tools/c-analyzer/cpython/__main__.py
index a11b687..06ec871 100644
--- a/Tools/c-analyzer/cpython/__main__.py
+++ b/Tools/c-analyzer/cpython/__main__.py
@@ -342,7 +342,7 @@ def cmd_capi(filenames=None, *,
         cmd_parse,
     ),
     'data': (
-        'check/manage local data (e.g. knwon types, ignored vars, caches)',
+        'check/manage local data (e.g. known types, ignored vars, caches)',
         [_cli_data],
         cmd_data,
     ),
diff --git a/Tools/c-analyzer/cpython/ignored.tsv b/Tools/c-analyzer/cpython/ignored.tsv
index e5d9378..91867ca 100644
--- a/Tools/c-analyzer/cpython/ignored.tsv
+++ b/Tools/c-analyzer/cpython/ignored.tsv
@@ -2128,7 +2128,7 @@
 #-----------------------
 # runtime state
 
-# (look at the bottome of the file)
+# (look at the bottom of the file)
 
 #-----------------------
 # modules
diff --git a/Tools/peg_generator/pegen/c_generator.py b/Tools/peg_generator/pegen/c_generator.py
index c1ab5e0..7941978 100644
--- a/Tools/peg_generator/pegen/c_generator.py
+++ b/Tools/peg_generator/pegen/c_generator.py
@@ -723,7 +723,7 @@ def handle_alt_normal(self, node: Alt, is_gather: bool, rulename: Optional[str])
             self.print(
                 f'D(fprintf(stderr, "%*c+ {rulename}[%d-%d]: %s succeeded!\\n", p->level, \' \', _mark, p->mark, "{node_str}"));'
             )
-            # Prepare to emmit the rule action and do so
+            # Prepare to emit the rule action and do so
             if node.action and "EXTRA" in node.action:
                 self._set_up_token_end_metadata_extraction()
             if self.skip_actions:
diff --git a/Tools/peg_generator/pegen/first_sets.py b/Tools/peg_generator/pegen/first_sets.py
index 71be5a2..ce80bb5 100755
--- a/Tools/peg_generator/pegen/first_sets.py
+++ b/Tools/peg_generator/pegen/first_sets.py
@@ -59,7 +59,7 @@ def visit_Alt(self, item: Alt) -> Set[str]:
                 result -= to_remove
 
             # If the set of new terminals can start with the empty string,
-            # it means that the item is completelly nullable and we should
+            # it means that the item is completely nullable and we should
             # also considering at least the next item in case the current
             # one fails to parse.
 
diff --git a/Tools/peg_generator/scripts/download_pypi_packages.py b/Tools/peg_generator/scripts/download_pypi_packages.py
index 9874202..ca12160 100755
--- a/Tools/peg_generator/scripts/download_pypi_packages.py
+++ b/Tools/peg_generator/scripts/download_pypi_packages.py
@@ -72,7 +72,7 @@ def main() -> None:
 
         package_json = load_json(package_name)
         try:
-            print(f"Dowloading and compressing package {package_name} ... ", end="")
+            print(f"Downloading and compressing package {package_name} ... ", end="")
             download_package_code(package_name, package_json)
             print("Done")
         except (IndexError, KeyError):
diff --git a/Tools/pynche/ColorDB.py b/Tools/pynche/ColorDB.py
index eb76d40..c013a60 100644
--- a/Tools/pynche/ColorDB.py
+++ b/Tools/pynche/ColorDB.py
@@ -9,7 +9,7 @@
 trouble reading the file, None is returned.  You can pass get_colordb() an
 optional filetype argument.
 
-Supporte file types are:
+Supported file types are:
 
     X_RGB_TXT -- X Consortium rgb.txt format files.  Three columns of numbers
                  from 0 .. 255 separated by whitespace.  Arbitrary trailing
diff --git a/Tools/scripts/dutree.doc b/Tools/scripts/dutree.doc
index 97bd2e2..490126b 100644
--- a/Tools/scripts/dutree.doc
+++ b/Tools/scripts/dutree.doc
@@ -15,7 +15,7 @@
 :And Perl is definitely awkward with data types.  I haven't yet found a
 :pleasant way of shoving non-trivial data types into Perl's grammar.
 
-Yes, it's pretty aweful at that, alright.  Sometimes I write perl programs
+Yes, it's pretty awful at that, alright.  Sometimes I write perl programs
 that need them, and sometimes it just takes a little creativity.  But
 sometimes it's not worth it.  I actually wrote a C program the other day
 (gasp) because I didn't want to deal with a game matrix with six links per node.
diff --git a/Tools/scripts/stable_abi.py b/Tools/scripts/stable_abi.py
index b7fd2c8..6d70340 100755
--- a/Tools/scripts/stable_abi.py
+++ b/Tools/scripts/stable_abi.py
@@ -67,7 +67,7 @@ class Manifest:
     def add(self, item):
         if item.name in self.contents:
             # We assume that stable ABI items do not share names,
-            # even if they're diferent kinds (e.g. function vs. macro).
+            # even if they're different kinds (e.g. function vs. macro).
             raise ValueError(f'duplicate ABI item {item.name}')
         self.contents[item.name] = item
 
@@ -295,7 +295,7 @@ def do_unixy_check(manifest, args):
     present_macros = gcc_get_limited_api_macros(['Include/Python.h'])
     feature_defines = manifest.feature_defines & present_macros
 
-    # Check that we have all neded macros
+    # Check that we have all needed macros
     expected_macros = set(
         item.name for item in manifest.select({'macro'})
     )
@@ -412,7 +412,7 @@ def binutils_check_library(manifest, library, expected_symbols, dynamic):
 def gcc_get_limited_api_macros(headers):
     """Get all limited API macros from headers.
 
-    Runs the preprocesor over all the header files in "Include" setting
+    Runs the preprocessor over all the header files in "Include" setting
     "-DPy_LIMITED_API" to the correct value for the running version of the
     interpreter and extracting all macro definitions (via adding -dM to the
     compiler arguments).
@@ -449,7 +449,7 @@ def gcc_get_limited_api_macros(headers):
 def gcc_get_limited_api_definitions(headers):
     """Get all limited API definitions from headers.
 
-    Run the preprocesor over all the header files in "Include" setting
+    Run the preprocessor over all the header files in "Include" setting
     "-DPy_LIMITED_API" to the correct value for the running version of the
     interpreter.