PyTokenizer_FindEncoding() always failed because it set the tokenizer state
with only a file pointer when it called fp_setreadl() which expected a file
path. Changed fp_setreadl() to use either a file path or file descriptor
(derived from the file pointer) to fix the issue.

Closes issue 3594.
Reviewed by Antoine Pitrou and Benjamin Peterson.
diff --git a/Lib/test/test_imp.py b/Lib/test/test_imp.py
index e2b9e9a..3a3059e 100644
--- a/Lib/test/test_imp.py
+++ b/Lib/test/test_imp.py
@@ -1,4 +1,5 @@
 import imp
+import sys
 import unittest
 from test import support
 
@@ -59,6 +60,21 @@
                          '"""Tokenization help for Python programs.\n')
         fp.close()
 
+    def test_issue3594(self):
+        temp_mod_name = 'test_imp_helper'
+        sys.path.insert(0, '.')
+        try:
+            with open(temp_mod_name + '.py', 'w') as file:
+                file.write("# coding: cp1252\nu = 'test.test_imp'\n")
+            file, filename, info = imp.find_module(temp_mod_name)
+            file.close()
+            self.assertEquals(file.encoding, 'cp1252')
+        finally:
+            del sys.path[0]
+            support.unlink(temp_mod_name + '.py')
+            support.unlink(temp_mod_name + '.pyc')
+            support.unlink(temp_mod_name + '.pyo')
+
     def test_reload(self):
         import marshal
         imp.reload(marshal)