Skip to content

Commit f330f3d

Browse files
committed
Nightly: do test install with the dependencies better and skip CUDA tests on cpu only box
1 parent e2e4542 commit f330f3d

File tree

2 files changed

+21
-25
lines changed

2 files changed

+21
-25
lines changed

conda/build_pytorch.sh

+6-1
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,12 @@ for py_ver in "${DESIRED_PYTHON[@]}"; do
388388

389389
# Install the built package and run tests, unless it's for mac cross compiled arm64
390390
if [[ -z "$CROSS_COMPILE_ARM64" ]]; then
391-
conda install -y "$built_package"
391+
# Install the package as if from local repo instead of tar.bz2 directly in order
392+
# to trigger runtime dependency installation. See https://github.com/conda/conda/issues/1884
393+
# Notes:
394+
# - pytorch-nightly is included to install torchtriton
395+
# - nvidia is included for cuda builds, there's no harm in listing the channel for cpu builds
396+
conda install -y -c "file://$(realpath $output_folder)" pytorch==$PYTORCH_BUILD_VERSION -c pytorch -c numba/label/dev -c pytorch-nightly -c nvidia
392397

393398
echo "$(date) :: Running tests"
394399
pushd "$pytorch_rootdir"

run_tests.sh

+15-24
Original file line numberDiff line numberDiff line change
@@ -72,21 +72,6 @@ fi
7272

7373
# Environment initialization
7474
if [[ "$package_type" == conda || "$(uname)" == Darwin ]]; then
75-
# Why are there two different ways to install dependencies after installing an offline package?
76-
# The "cpu" conda package for pytorch doesn't actually depend on "cpuonly" which means that
77-
# when we attempt to update dependencies using "conda update --all" it will attempt to install
78-
# whatever "cudatoolkit" your current computer relies on (which is sometimes none). When conda
79-
# tries to install this cudatoolkit that correlates with your current hardware it will also
80-
# overwrite the currently installed "local" pytorch package meaning you aren't actually testing
81-
# the right package.
82-
# TODO (maybe): Make the "cpu" package of pytorch depend on "cpuonly"
83-
if [[ "$cuda_ver" = 'cpu' ]]; then
84-
# Installing cpuonly will also install dependencies as well
85-
retry conda install -y -c pytorch cpuonly
86-
else
87-
# Install dependencies from installing the pytorch conda package offline
88-
retry conda update -yq --all -c defaults -c pytorch -c numba/label/dev
89-
fi
9075
# Install the testing dependencies
9176
retry conda install -yq future hypothesis ${NUMPY_PACKAGE} ${PROTOBUF_PACKAGE} pytest setuptools six typing_extensions pyyaml
9277
else
@@ -140,15 +125,21 @@ python -c "import torch; exit(0 if torch.__version__ == '$expected_version' else
140125

141126
# Test that CUDA builds are setup correctly
142127
if [[ "$cuda_ver" != 'cpu' ]]; then
143-
# Test CUDA archs
144-
echo "Checking that CUDA archs are setup correctly"
145-
timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()'
146-
147-
# These have to run after CUDA is initialized
148-
echo "Checking that magma is available"
149-
python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)'
150-
echo "Checking that CuDNN is available"
151-
python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)'
128+
cuda_installed=1
129+
nvidia-smi || cuda_installed=0
130+
if [[ "$cuda_installed" == 0 ]]; then
131+
echo "Skip CUDA tests for machines without a Nvidia GPU card"
132+
else
133+
# Test CUDA archs
134+
echo "Checking that CUDA archs are setup correctly"
135+
timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()'
136+
137+
# These have to run after CUDA is initialized
138+
echo "Checking that magma is available"
139+
python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)'
140+
echo "Checking that CuDNN is available"
141+
python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)'
142+
fi
152143
fi
153144

154145
# Check that OpenBlas is not linked to on Macs

0 commit comments

Comments
 (0)