#3773: Check for errors around the use of PyTokenizer_FindEncoding().

reviewed by Brett Cannon.
diff --git a/Misc/NEWS b/Misc/NEWS
index 0e51c80..889cead 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -12,6 +12,9 @@
 Core and Builtins
 -----------------
 
+- Issue 3774: Added a few more checks in PyTokenizer_FindEncoding to handle
+  error conditions.
+
 - Issue 3594: Fix Parser/tokenizer.c:fp_setreadl() to open the file being
   tokenized by either a file path or file pointer for the benefit of
   PyTokenizer_FindEncoding().
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index e4cf8e4..a040696 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -1610,7 +1610,10 @@
 	fclose(fp);
 	if (tok->encoding) {
             encoding = (char *)PyMem_MALLOC(strlen(tok->encoding) + 1);
-            strcpy(encoding, tok->encoding);
+            if (encoding)
+                strcpy(encoding, tok->encoding);
+            else
+                PyErr_NoMemory();
         }
 	PyTokenizer_Free(tok);
 	return encoding;
diff --git a/Python/import.c b/Python/import.c
index d87d751..9c077fe 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -2830,6 +2830,8 @@
 			   memory. */
 			found_encoding = PyTokenizer_FindEncoding(fd);
 			lseek(fd, 0, 0); /* Reset position */
+			if (found_encoding == NULL && PyErr_Occurred())
+				return NULL;
 			encoding = (found_encoding != NULL) ? found_encoding :
 				   (char*)PyUnicode_GetDefaultEncoding();
 		}