Automatically detect CUDA config during ./configure on the first attempt. If that fails, ask for more detail.

PiperOrigin-RevId: 243794789
diff --git a/configure.py b/configure.py
index c95b57f..fe0e6d7 100644
--- a/configure.py
+++ b/configure.py
@@ -33,13 +33,8 @@
   from distutils.spawn import find_executable as which
 # pylint: enable=g-import-not-at-top
 
-_DEFAULT_CUDA_VERSION = '10.0'
-_DEFAULT_CUDNN_VERSION = '7'
 _DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
-_DEFAULT_CUDA_PATH = '/usr/local/cuda'
-_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
-_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
-                          'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
+
 _TF_OPENCL_VERSION = '1.2'
 _DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
 _DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
@@ -69,11 +64,6 @@
     'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
 ]
 
-if platform.machine() == 'ppc64le':
-  _DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
-else:
-  _DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
-
 
 class UserInputError(Exception):
   pass
@@ -866,36 +856,30 @@
   """Set TF_CUDA_PATHS."""
   ask_cuda_paths = (
       'Please specify the comma-separated list of base paths to look for CUDA '
-      'libraries and headers. [Leave empty to use the default]: '
-  )
+      'libraries and headers. [Leave empty to use the default]: ')
   tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
-                                                  ask_cuda_paths,
-                                                  '')
+                                                  ask_cuda_paths, '')
   if tf_cuda_paths:
     environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
 
 
 def set_tf_cuda_version(environ_cp):
   """Set TF_CUDA_VERSION."""
-  ask_cuda_version = (
-      'Please specify the CUDA SDK version you want to use. '
-      '[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
+  ask_cuda_version = ('Please specify the CUDA SDK version you want to use. '
+                      '[Leave empty to accept any version]: ')
   tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
                                                     'TF_CUDA_VERSION',
-                                                    ask_cuda_version,
-                                                    _DEFAULT_CUDA_VERSION)
+                                                    ask_cuda_version, '')
   environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
 
 
 def set_tf_cudnn_version(environ_cp):
   """Set TF_CUDNN_VERSION."""
-  ask_cudnn_version = (
-      'Please specify the cuDNN version you want to use. '
-      '[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
+  ask_cudnn_version = ('Please specify the cuDNN version you want to use. '
+                       '[Leave empty to accept any version]: ')
   tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
                                                      'TF_CUDNN_VERSION',
-                                                     ask_cudnn_version,
-                                                     _DEFAULT_CUDNN_VERSION)
+                                                     ask_cudnn_version, '')
   environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
 
 
@@ -933,14 +917,12 @@
   if not is_linux():
     raise ValueError('Currently TensorRT is only supported on Linux platform.')
 
-  # Backwards compatibility: early-out when TF_NEED_TENSORRT=0.
-  if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
-                     False))) != '1':
+  if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
     return
 
   ask_tensorrt_version = (
       'Please specify the TensorRT version you want to use. '
-      '[Leave empty to not use a specific version]: ')
+      '[Leave empty to accept any version]: ')
   tf_tensorrt_version = get_from_env_or_user_or_default(environ_cp,
                                                         'TF_TENSORRT_VERSION',
                                                         ask_tensorrt_version,
@@ -953,9 +935,12 @@
   if not is_linux():
     raise ValueError('Currently NCCL is only supported on Linux platform.')
 
+  if 'TF_NCCL_VERSION' in environ_cp:
+    return
+
   ask_nccl_version = (
       'Please specify the locally installed NCCL version you want to use. '
-      '[Leave empty to use https://github.com/nvidia/nccl]: ')
+      '[Leave empty to use http://github.com/nvidia/nccl]: ')
   tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
                                                     'TF_NCCL_VERSION',
                                                     ask_nccl_version, '')
@@ -1316,6 +1301,66 @@
     symlink_force(filepath, new_filepath)
 
 
+def validate_cuda_config(environ_cp):
+  """Run find_cuda_config.py and return cuda_toolkit_path, or None."""
+
+  def maybe_encode_env(env):
+    """Encodes unicode in env to str on Windows python 2.x."""
+    if not is_windows() or sys.version_info[0] != 2:
+      return env
+    for k, v in env.items():
+      if isinstance(k, unicode):
+        k = k.encode('ascii')
+      if isinstance(v, unicode):
+        v = v.encode('ascii')
+      env[k] = v
+    return env
+
+  cuda_libraries = ['cuda', 'cudnn']
+  if is_linux():
+    if 'TF_TENSORRT_VERSION' in environ_cp:  # if env variable exists
+      cuda_libraries.append('tensorrt')
+    if environ_cp.get('TF_NCCL_VERSION', None):  # if env variable not empty
+      cuda_libraries.append('nccl')
+
+  proc = subprocess.Popen(
+      [environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
+      cuda_libraries,
+      stdout=subprocess.PIPE,
+      env=maybe_encode_env(environ_cp))
+
+  if proc.wait():
+    # Errors from find_cuda_config.py were sent to stderr.
+    print('\n\nAsking for detailed CUDA configuration...\n')
+    return False
+
+  config = dict(
+      tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
+
+  print('Found CUDA %s in:' % config['cuda_version'])
+  print('    %s' % config['cuda_library_dir'])
+  print('    %s' % config['cuda_include_dir'])
+
+  print('Found cuDNN %s in:' % config['cudnn_version'])
+  print('    %s' % config['cudnn_library_dir'])
+  print('    %s' % config['cudnn_include_dir'])
+
+  if 'tensorrt_version' in config:
+    print('Found TensorRT %s in:' % config['tensorrt_version'])
+    print('    %s' % config['tensorrt_library_dir'])
+    print('    %s' % config['tensorrt_include_dir'])
+
+  if config.get('nccl_version', None):
+    print('Found NCCL %s in:' % config['nccl_version'])
+    print('    %s' % config['nccl_library_dir'])
+    print('    %s' % config['nccl_include_dir'])
+
+  print('\n')
+
+  environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
+  return True
+
+
 def main():
   global _TF_WORKSPACE_ROOT
   global _TF_BAZELRC
@@ -1392,70 +1437,40 @@
   set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
   if (environ_cp.get('TF_NEED_CUDA') == '1' and
       'TF_CUDA_CONFIG_REPO' not in environ_cp):
+
+    set_action_env_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT', False)
+
+    environ_save = dict(environ_cp)
     for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
+
+      if validate_cuda_config(environ_cp):
+        cuda_env_names = [
+            'TF_CUDA_VERSION', 'TF_CUBLAS_VERSION', 'TF_CUDNN_VERSION',
+            'TF_TENSORRT_VERSION', 'TF_NCCL_VERSION', 'TF_CUDA_PATHS',
+            'CUDA_TOOLKIT_PATH'
+        ]
+        for name in cuda_env_names:
+          if name in environ_cp:
+            write_action_env_to_bazelrc(name, environ_cp[name])
+        break
+
+      # Restore settings changed below if CUDA config could not be validated.
+      environ_cp = dict(environ_save)
+
       set_tf_cuda_version(environ_cp)
       set_tf_cudnn_version(environ_cp)
-      cuda_libraries = ['cuda', 'cudnn']
       if is_linux():
         set_tf_tensorrt_version(environ_cp)
-        if 'TF_TENSORRT_VERSION' in environ_cp:  # if env variable exists
-          cuda_libraries.append('tensorrt')
         set_tf_nccl_version(environ_cp)
-        if environ_cp['TF_NCCL_VERSION']:  # if env variable not empty
-          cuda_libraries.append('nccl')
 
-      def maybe_encode_env(env):
-        """Encodes unicode in env to str on Windows python 2.x."""
-        if not is_windows() or sys.version_info[0] != 2:
-          return env
-        for k, v in env.items():
-          if isinstance(k, unicode):
-            k = k.encode('ascii')
-          if isinstance(v, unicode):
-            v = v.encode('ascii')
-          env[k] = v
-        return env
+      set_tf_cuda_paths(environ_cp)
 
-      # Verify CUDA configuration by calling find_cuda_config.py.
-      proc = subprocess.Popen(
-          [
-              environ_cp['PYTHON_BIN_PATH'],
-              'third_party/gpus/find_cuda_config.py'
-          ] + cuda_libraries,
-          stdout=subprocess.PIPE,
-          env=maybe_encode_env(environ_cp))
-
-      cuda_env_variable_names = [
-          'TF_CUDA_VERSION', 'TF_CUBLAS_VERSION', 'TF_CUDNN_VERSION',
-          'TF_TENSORRT_VERSION', 'TF_NCCL_VERSION', 'TF_CUDA_PATHS'
-      ]
-
-      if proc.wait():
-        print('\nCould not find all requested CUDA libraries or headers.\n')
-        for name in cuda_env_variable_names:
-          if name in environ_cp:
-            del environ_cp[name]
-        set_tf_cuda_paths(environ_cp)
-        continue
-
-      for line in proc.stdout:
-        match = re.match('cuda_toolkit_path: (.*)', line.decode('ascii'))
-        if match:
-          cuda_toolkit_path = match.group(1)
-
-      for name in cuda_env_variable_names:
-        if name in environ_cp:
-          write_action_env_to_bazelrc(name, environ_cp[name])
-
-      break
     else:
       raise UserInputError(
           'Invalid CUDA setting were provided %d '
           'times in a row. Assuming to be a scripting mistake.' %
           _DEFAULT_PROMPT_ASK_ATTEMPTS)
 
-    environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
-
     set_tf_cuda_compute_capabilities(environ_cp)
     if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
         'LD_LIBRARY_PATH') != '1':
diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl
index 3e31143..c66eac0 100644
--- a/third_party/gpus/cuda_configure.bzl
+++ b/third_party/gpus/cuda_configure.bzl
@@ -666,13 +666,9 @@
     ] + cuda_libraries)
     if exec_result.return_code:
         auto_configure_fail("Failed to run find_cuda_config.py: %s" % exec_result.stderr)
-    config = {}
-    for line in exec_result.stdout.splitlines():
-        elements = line.split(": ")
-        if len(elements) != 2:
-            auto_configure_fail("Unexpected output from find_cuda_config.py")
-        config[elements[0]] = elements[1]
-    return config
+
+    # Parse the dict from stdout.
+    return dict([tuple(x.split(": ")) for x in exec_result.stdout.splitlines()])
 
 def _get_cuda_config(repository_ctx):
     """Detects and returns information about the CUDA installation on the system.
diff --git a/third_party/gpus/find_cuda_config.py b/third_party/gpus/find_cuda_config.py
index 87be0b2..c8977a0 100644
--- a/third_party/gpus/find_cuda_config.py
+++ b/third_party/gpus/find_cuda_config.py
@@ -182,7 +182,7 @@
 
 def _not_found_error(paths, filepattern):
   return ConfigError(
-      "Could not find any %s in:%s" %
+      "Could not find any %s in:%s\n" %
       (filepattern, "".join(["\n        %s" % path for path in sorted(paths)])))