blob: 48ebebedf89d52700512e0f1c9e78d1c9f9e1dc5 [file] [log] [blame]
#!/bin/bash
## Set up python-related environment settings
while true; do
fromuser=""
if [ -z "$PYTHON_BIN_PATH" ]; then
default_python_bin_path=$(which python)
read -p "Please specify the location of python. [Default is $default_python_bin_path]: " PYTHON_BIN_PATH
fromuser="1"
if [ -z "$PYTHON_BIN_PATH" ]; then
PYTHON_BIN_PATH=$default_python_bin_path
fi
fi
if [ -e "$PYTHON_BIN_PATH" ]; then
break
fi
echo "Invalid python path. ${PYTHON_BIN_PATH} cannot be found" 1>&2
if [ -z "$fromuser" ]; then
exit 1
fi
PYTHON_BIN_PATH=""
# Retry
done
# Invoke python_config and set up symlinks to python includes
(./util/python/python_config.sh --setup "$PYTHON_BIN_PATH";) || exit -1
## Set up Cuda-related environment settings
while [ "$TF_NEED_CUDA" == "" ]; do
read -p "Do you wish to build TensorFlow with GPU support? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=1;;
[Nn]* ) echo "No GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
"" ) echo "No GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
if [ "$TF_NEED_CUDA" == "0" ]; then
echo "Configuration finished"
exit
fi
# Find out where the CUDA toolkit is installed
while true; do
fromuser=""
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
default_cuda_path=/usr/local/cuda
read -p "Please specify the location where CUDA 7.0 toolkit is installed. Refer to README.md for more details. [Default is $default_cuda_path]: " CUDA_TOOLKIT_PATH
fromuser="1"
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
CUDA_TOOLKIT_PATH=$default_cuda_path
fi
fi
if [ -e "$CUDA_TOOLKIT_PATH/lib64/libcudart.so.7.0" ]; then
break
fi
echo "Invalid path to CUDA 7.0 toolkit. ${CUDA_TOOLKIT_PATH}/lib64/libcudart.so.7.0 cannot be found"
if [ -z "$fromuser" ]; then
exit 1
fi
CUDA_TOOLKIT_PATH=""
# Retry
done
# Find out where the CUDNN library is installed
while true; do
fromuser=""
if [ -z "$CUDNN_INSTALL_PATH" ]; then
default_cudnn_path=${CUDA_TOOLKIT_PATH}
read -p "Please specify the location where CUDNN 6.5 V2 library is installed. Refer to README.md for more details. [Default is $default_cudnn_path]: " CUDNN_INSTALL_PATH
fromuser="1"
if [ -z "$CUDNN_INSTALL_PATH" ]; then
CUDNN_INSTALL_PATH=$default_cudnn_path
fi
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
# Going through one more level of expansion to handle that.
CUDNN_INSTALL_PATH=$(bash -c "readlink -f $CUDNN_INSTALL_PATH")
fi
if [ -e "$CUDNN_INSTALL_PATH/libcudnn.so.6.5" -o -e "$CUDNN_INSTALL_PATH/lib64/libcudnn.so.6.5" ]; then
break
fi
echo "Invalid path to CUDNN 6.5 V2 toolkit. Neither of the following two files can be found:"
echo "$CUDNN_INSTALL_PATH/lib64/libcudnn.so.6.5"
echo "$CUDNN_INSTALL_PATH/libcudnn.so.6.5"
if [ -z "$fromuser" ]; then
exit 1
fi
CUDNN_INSTALL_PATH=""
# Retry
done
cat > third_party/gpus/cuda/cuda.config <<EOF
# CUDA_TOOLKIT_PATH refers to the CUDA toolkit. Tensorflow requries Cuda 7.0
# at the moment.
CUDA_TOOLKIT_PATH="$CUDA_TOOLKIT_PATH"
# CUDNN_INSTALL_PATH refers to the CUDNN toolkit. The cudnn header and library
# files can be either in this directory, or under include/ and lib64/
# directories separately.
CUDNN_INSTALL_PATH="$CUDNN_INSTALL_PATH"
EOF
function UnofficialSetting() {
echo -e "\nWARNING: You are configuring unofficial settings in TensorFlow. Because some external libraries are not backward compatible, these settings are largely untested and unsupported. \n" 1>&2
# Configure the compute capabilities that TensorFlow builds for.
# Since Cuda toolkit is not backward-compatible, this is not guaranteed to work.
while true; do
fromuser=""
if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
cat << EOF
Please specify a list of comma-separated Cuda compute capabilities you want to build with.
You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
Please note that each additional compute capability significantly increases your build time and binary size.
EOF
read -p "[Default is: \"3.5,5.2\"]: " TF_CUDA_COMPUTE_CAPABILITIES
fromuser=1
fi
# Check whether all capabilities from the input is valid
COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES//,/ }
ALL_VALID=1
for CAPABILITY in $COMPUTE_CAPABILITIES; do
if [[ ! "$CAPABILITY" =~ [0-9]+.[0-9]+ ]]; then
echo "Invalid compute capability: " $CAPABILITY
ALL_VALID=0
break
fi
done
if [ "$ALL_VALID" == "0" ]; then
if [ -z "$fromuser" ]; then
exit 1
fi
else
break
fi
TF_CUDA_COMPUTE_CAPABILITIES=""
done
if [ ! -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
export WARNING="Unofficial setting. DO NOT"" SUBMIT!!!"
function CudaGenCodeOpts() {
OUTPUT=""
for CAPABILITY in $@; do
OUTPUT=${OUTPUT}" \"${CAPABILITY}\", "
done
echo $OUTPUT
}
export CUDA_GEN_CODES_OPTS=$(CudaGenCodeOpts ${TF_CUDA_COMPUTE_CAPABILITIES//,/ })
perl -pi -0 -e 's,\n( *)([^\n]*supported_cuda_compute_capabilities\s*=\s*\[).*?(\]),\n\1# $ENV{WARNING}\n\1\2$ENV{CUDA_GEN_CODES_OPTS}\3,s' third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc
function CudaVersionOpts() {
OUTPUT=""
for CAPABILITY in $@; do
OUTPUT=$OUTPUT"CudaVersion(\"${CAPABILITY}\"), "
done
echo $OUTPUT
}
export CUDA_VERSION_OPTS=$(CudaVersionOpts ${TF_CUDA_COMPUTE_CAPABILITIES//,/ })
perl -pi -0 -e 's,\n( *)([^\n]*supported_cuda_compute_capabilities\s*=\s*\{).*?(\}),\n\1// $ENV{WARNING}\n\1\2$ENV{CUDA_VERSION_OPTS}\3,s' tensorflow/core/common_runtime/gpu/gpu_device.cc
fi
}
# Only run the unofficial settings when users explicitly choose to.
if [ "$TF_UNOFFICIAL_SETTING" == "1" ]; then
UnofficialSetting
fi
# Invoke the cuda_config.sh and set up the TensorFlow's canonical view of the Cuda libraries
(cd third_party/gpus/cuda; ./cuda_config.sh;) || exit -1
echo "Configuration finished"