Wrap some (CUDA, TensorRT, clang, SYCL) --action_envs into --configs: the .tf_configure.bazelrc only sets the --configs, and the .bazelrc maps that to --action_envs.
PiperOrigin-RevId: 249618180
diff --git a/.bazelrc b/.bazelrc
index d4d7ad6..6b892ea 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -39,32 +39,46 @@
build:download_clang --crosstool_top=@local_config_download_clang//:toolchain
build:download_clang --define=using_clang=true
+build:download_clang --action_env TF_DOWNLOAD_CLANG=1
# Instruct clang to use LLD for linking.
# This only works with GPU builds currently, since Bazel sets -B/usr/bin in
# auto-generated CPU crosstool, forcing /usr/bin/ld.lld to be preferred over
# the downloaded one.
build:download_clang_use_lld --linkopt='-fuse-ld=lld'
-build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
-build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true
+# This config refers to building with CUDA available. It does not necessarily
+# mean that we build CUDA op kernels.
+build:using_cuda --define=using_cuda=true
+build:using_cuda --action_env TF_NEED_CUDA=1
+build:using_cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
+
+# This config refers to building CUDA op kernels with nvcc.
+build:cuda --config=using_cuda
+build:cuda --define=using_cuda_nvcc=true
+
+# This config refers to building CUDA op kernels with clang.
+build:cuda_clang --config=using_cuda
+build:cuda_clang --define=using_cuda_clang=true
+build:cuda_clang --define=using_clang=true
+
+build:tensorrt --action_env TF_NEED_TENSORRT=1
build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain
build:rocm --define=using_rocm=true --define=using_rocm_hipcc=true
-
-build:cuda_clang --crosstool_top=@local_config_cuda//crosstool:toolchain
-build:cuda_clang --define=using_cuda=true --define=using_cuda_clang=true --define=using_clang=true
+build:rocm --action_env TF_NEED_ROCM=1
build:sycl --crosstool_top=@local_config_sycl//crosstool:toolchain
-build:sycl --define=using_sycl=true --define=using_trisycl=false
+build:sycl --define=using_sycl=true
+build:sycl --action_env TF_NEED_OPENCL_SYCL=1
-build:sycl_nodouble --crosstool_top=@local_config_sycl//crosstool:toolchain
-build:sycl_nodouble --define=using_sycl=true --cxxopt -DTENSORFLOW_SYCL_NO_DOUBLE
+build:sycl_nodouble --config=sycl
+build:sycl_nodouble --cxxopt -DTENSORFLOW_SYCL_NO_DOUBLE
-build:sycl_asan --crosstool_top=@local_config_sycl//crosstool:toolchain
-build:sycl_asan --define=using_sycl=true --define=using_trisycl=false --copt -fno-omit-frame-pointer --copt -fsanitize-coverage=3 --copt -DGPR_NO_DIRECT_SYSCALLS --linkopt -fPIC --linkopt -fsanitize=address
+build:sycl_nodouble --config=sycl
+build:sycl_asan --copt -fno-omit-frame-pointer --copt -fsanitize-coverage=3 --copt -DGPR_NO_DIRECT_SYSCALLS --linkopt -fPIC --linkopt -fsanitize=address
-build:sycl_trisycl --crosstool_top=@local_config_sycl//crosstool:toolchain
-build:sycl_trisycl --define=using_sycl=true --define=using_trisycl=true
+build:sycl_nodouble --config=sycl
+build:sycl_trisycl --define=using_trisycl=true
# Options extracted from configure script
build:gdr --define=with_gdr_support=true
@@ -97,8 +111,7 @@
# Build TF with C++ 17 features.
build:c++17 --cxxopt=-std=c++1z
build:c++17 --cxxopt=-stdlib=libc++
-build:c++1z --cxxopt=-std=c++1z
-build:c++1z --cxxopt=-stdlib=libc++
+build:c++1z --config=c++17
# Default paths for TF_SYSTEM_LIBS
build --define=PREFIX=/usr
diff --git a/configure.py b/configure.py
index 43af22d..a6dfb4c 100644
--- a/configure.py
+++ b/configure.py
@@ -403,7 +403,8 @@
enabled_by_default,
question=None,
yes_reply=None,
- no_reply=None):
+ no_reply=None,
+ bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
@@ -418,12 +419,16 @@
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
+ bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
- write_action_env_to_bazelrc(var_name, var)
+ if not bazel_config_name:
+ write_action_env_to_bazelrc(var_name, var)
+ elif var:
+ write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
@@ -543,7 +548,8 @@
False,
question=question,
yes_reply=yes_reply,
- no_reply=no_reply)
+ no_reply=no_reply,
+ bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
@@ -558,7 +564,8 @@
False,
question=question,
yes_reply=yes_reply,
- no_reply=no_reply)
+ no_reply=no_reply,
+ bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
@@ -782,8 +789,8 @@
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
- 'errors.\n' % (android_ndk_home_path, ndk_version,
- _SUPPORTED_ANDROID_NDK_VERSIONS))
+ 'errors.\n' %
+ (android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
@@ -952,6 +959,7 @@
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
+
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
@@ -1419,7 +1427,12 @@
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
xla_enabled_by_default, 'xla')
- set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
+ set_action_env_var(
+ environ_cp,
+ 'TF_NEED_OPENCL_SYCL',
+ 'OpenCL SYCL',
+ False,
+ bazel_config_name='sycl')
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
@@ -1429,30 +1442,44 @@
else:
set_trisycl_include_dir(environ_cp)
- set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
+ set_action_env_var(
+ environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
- set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
+ environ_cp['TF_NEED_CUDA'] = str(
+ int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
- set_action_env_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT', False)
+ set_action_env_var(
+ environ_cp,
+ 'TF_NEED_TENSORRT',
+ 'TensorRT',
+ False,
+ bazel_config_name='tensorrt')
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
- 'TF_CUDA_VERSION', 'TF_CUBLAS_VERSION', 'TF_CUDNN_VERSION',
- 'TF_TENSORRT_VERSION', 'TF_NCCL_VERSION', 'TF_CUDA_PATHS',
+ 'TF_CUDA_VERSION',
+ 'TF_CUBLAS_VERSION',
+ 'TF_CUDNN_VERSION',
+ 'TF_TENSORRT_VERSION',
+ 'TF_NCCL_VERSION',
+ 'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
- 'CUDA_TOOLKIT_PATH', 'CUDNN_INSTALL_PATH', 'NCCL_INSTALL_PATH',
- 'NCCL_HDR_PATH', 'TENSORRT_INSTALL_PATH'
+ 'CUDA_TOOLKIT_PATH',
+ 'CUDNN_INSTALL_PATH',
+ 'NCCL_INSTALL_PATH',
+ 'NCCL_HDR_PATH',
+ 'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
@@ -1503,8 +1530,6 @@
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
- if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
- write_to_bazelrc('build --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.