|
| 1 | +#!/usr/bin/env bash |
| 2 | +set -ex |
| 3 | + |
| 4 | +export CMAKE_LIBRARY_PATH=$PREFIX/lib:$PREFIX/include:$CMAKE_LIBRARY_PATH |
| 5 | +export CMAKE_PREFIX_PATH=$PREFIX |
| 6 | +export TH_BINARY_BUILD=1 # links CPU BLAS libraries thrice in a row (was needed for some MKL static linkage) |
| 7 | +export PYTORCH_BUILD_VERSION=$PKG_VERSION |
| 8 | +export PYTORCH_BUILD_NUMBER=$PKG_BUILDNUM |
| 9 | + |
| 10 | +# Why do we disable Ninja when ninja is included in the meta.yaml? Well, using |
| 11 | +# ninja in the conda builds leads to a system python2.7 library being called |
| 12 | +# which leads to ascii decode errors when building third_party/onnx. Is the |
| 13 | +# ninja n this conda env being picked up? We still need ninja in the meta.yaml |
| 14 | +# for cpp_tests I believe though. TODO figure out what's going on here and fix |
| 15 | +# it. It would be nice to use ninja in the builds of the conda binaries as well |
| 16 | +export USE_NINJA=OFF |
| 17 | + |
| 18 | +# MacOS build is simple, and will not be for CUDA |
| 19 | +if [[ "$OSTYPE" == "darwin"* ]]; then |
| 20 | + MACOSX_DEPLOYMENT_TARGET=10.9 \ |
| 21 | + CXX=clang++ \ |
| 22 | + CC=clang \ |
| 23 | + python setup.py install |
| 24 | + exit 0 |
| 25 | +fi |
| 26 | + |
| 27 | + |
| 28 | +if [[ -z "$NO_CUDA" || "$NO_CUDA" == 0 ]]; then |
| 29 | + build_with_cuda=1 |
| 30 | +fi |
| 31 | +if [[ -n "$build_with_cuda" ]]; then |
| 32 | + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX" |
| 33 | + if [[ $CUDA_VERSION == 8.0* ]]; then |
| 34 | + export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1" |
| 35 | + elif [[ $CUDA_VERSION == 9.0* ]]; then |
| 36 | + export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;7.0" |
| 37 | + elif [[ $CUDA_VERSION == 9.2* ]]; then |
| 38 | + export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0" |
| 39 | + elif [[ $CUDA_VERSION == 10.0* ]]; then |
| 40 | + export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0;7.5" |
| 41 | + fi |
| 42 | + export TORCH_NVCC_FLAGS="-Xfatbin -compress-all" |
| 43 | + export NCCL_ROOT_DIR=/usr/local/cuda |
| 44 | + export USE_STATIC_CUDNN=1 # links cudnn statically (driven by tools/setup_helpers/cudnn.py) |
| 45 | + export USE_STATIC_NCCL=1 # links nccl statically (driven by tools/setup_helpers/nccl.py, some of the NCCL cmake files such as FindNCCL.cmake and gloo/FindNCCL.cmake) |
| 46 | + |
| 47 | + # not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet |
| 48 | + # export ATEN_STATIC_CUDA=1 # links ATen / libcaffe2_gpu.so with static CUDA libs, also sets up special cufft linkage |
| 49 | + # export USE_CUDA_STATIC_LINK=1 # links libcaffe2_gpu.so with static CUDA libs. Likely both these flags can be de-duplicated |
| 50 | +fi |
| 51 | + |
| 52 | +fname_with_sha256() { |
| 53 | + HASH=$(sha256sum $1 | cut -c1-8) |
| 54 | + DIRNAME=$(dirname $1) |
| 55 | + BASENAME=$(basename $1) |
| 56 | + if [[ $BASENAME == "libnvrtc-builtins.so" ]]; then |
| 57 | + echo $1 |
| 58 | + else |
| 59 | + INITNAME=$(echo $BASENAME | cut -f1 -d".") |
| 60 | + ENDNAME=$(echo $BASENAME | cut -f 2- -d".") |
| 61 | + echo "$DIRNAME/$INITNAME-$HASH.$ENDNAME" |
| 62 | + fi |
| 63 | +} |
| 64 | + |
| 65 | +DEPS_LIST=() |
| 66 | +# not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet |
| 67 | +# if [[ -n "$build_with_cuda" ]]; then |
| 68 | +# cuda_majmin="$(echo $CUDA_VERSION | cut -f1,2 -d'.')" |
| 69 | +# DEPS_LIST+=("/usr/local/cuda/lib64/libcudart.so.$cuda_majmin") |
| 70 | +# DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1") |
| 71 | +# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc.so.$cuda_majmin") |
| 72 | +# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc-builtins.so") |
| 73 | +# fi |
| 74 | + |
| 75 | + |
| 76 | +# install |
| 77 | +python setup.py install |
| 78 | + |
| 79 | +# copy over needed dependent .so files over and tag them with their hash |
| 80 | +patched=() |
| 81 | +for filepath in "${DEPS_LIST[@]}"; do |
| 82 | + filename=$(basename $filepath) |
| 83 | + destpath=$SP_DIR/torch/lib/$filename |
| 84 | + cp $filepath $destpath |
| 85 | + |
| 86 | + patchedpath=$(fname_with_sha256 $destpath) |
| 87 | + patchedname=$(basename $patchedpath) |
| 88 | + if [[ "$destpath" != "$patchedpath" ]]; then |
| 89 | + mv $destpath $patchedpath |
| 90 | + fi |
| 91 | + |
| 92 | + patched+=("$patchedname") |
| 93 | + echo "Copied $filepath to $patchedpath" |
| 94 | +done |
| 95 | + |
| 96 | +# run patchelf to fix the so names to the hashed names |
| 97 | +for ((i=0;i<${#DEPS_LIST[@]};++i)); do |
| 98 | + find $SP_DIR/torch -name '*.so*' | while read sofile; do |
| 99 | + origname="$(basename ${DEPS_LIST[i]})" |
| 100 | + patchedname=${patched[i]} |
| 101 | + set +e |
| 102 | + patchelf --print-needed $sofile | grep $origname 2>&1 >/dev/null |
| 103 | + ERRCODE=$? |
| 104 | + set -e |
| 105 | + if [ "$ERRCODE" -eq "0" ]; then |
| 106 | + echo "patching $sofile entry $origname to $patchedname" |
| 107 | + patchelf --replace-needed $origname $patchedname $sofile |
| 108 | + fi |
| 109 | + done |
| 110 | +done |
| 111 | + |
| 112 | +# set RPATH of _C.so and similar to $ORIGIN, $ORIGIN/lib and conda/lib |
| 113 | +find $SP_DIR/torch -name "*.so*" -maxdepth 1 -type f | while read sofile; do |
| 114 | + echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..' |
| 115 | + patchelf --set-rpath '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..' $sofile |
| 116 | + patchelf --print-rpath $sofile |
| 117 | +done |
| 118 | + |
| 119 | +# set RPATH of lib/ files to $ORIGIN and conda/lib |
| 120 | +find $SP_DIR/torch/lib -name "*.so*" -maxdepth 1 -type f | while read sofile; do |
| 121 | + echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../../..' |
| 122 | + patchelf --set-rpath '$ORIGIN:$ORIGIN/../../../..' $sofile |
| 123 | + patchelf --print-rpath $sofile |
| 124 | +done |
0 commit comments