|
72 | 72 |
|
73 | 73 | # Environment initialization
|
74 | 74 | if [[ "$package_type" == conda || "$(uname)" == Darwin ]]; then
|
75 |
| - # Why are there two different ways to install dependencies after installing an offline package? |
76 |
| - # The "cpu" conda package for pytorch doesn't actually depend on "cpuonly" which means that |
77 |
| - # when we attempt to update dependencies using "conda update --all" it will attempt to install |
78 |
| - # whatever "cudatoolkit" your current computer relies on (which is sometimes none). When conda |
79 |
| - # tries to install this cudatoolkit that correlates with your current hardware it will also |
80 |
| - # overwrite the currently installed "local" pytorch package meaning you aren't actually testing |
81 |
| - # the right package. |
82 |
| - # TODO (maybe): Make the "cpu" package of pytorch depend on "cpuonly" |
83 |
| - if [[ "$cuda_ver" = 'cpu' ]]; then |
84 |
| - # Installing cpuonly will also install dependencies as well |
85 |
| - retry conda install -y -c pytorch cpuonly |
86 |
| - else |
87 |
| - # Install dependencies from installing the pytorch conda package offline |
88 |
| - retry conda update -yq --all -c defaults -c pytorch -c numba/label/dev |
89 |
| - fi |
90 | 75 | # Install the testing dependencies
|
91 | 76 | retry conda install -yq future hypothesis ${NUMPY_PACKAGE} ${PROTOBUF_PACKAGE} pytest setuptools six typing_extensions pyyaml
|
92 | 77 | else
|
@@ -140,15 +125,21 @@ python -c "import torch; exit(0 if torch.__version__ == '$expected_version' else
|
140 | 125 |
|
141 | 126 | # Test that CUDA builds are setup correctly
|
142 | 127 | if [[ "$cuda_ver" != 'cpu' ]]; then
|
143 |
| - # Test CUDA archs |
144 |
| - echo "Checking that CUDA archs are setup correctly" |
145 |
| - timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()' |
146 |
| - |
147 |
| - # These have to run after CUDA is initialized |
148 |
| - echo "Checking that magma is available" |
149 |
| - python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)' |
150 |
| - echo "Checking that CuDNN is available" |
151 |
| - python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)' |
| 128 | + cuda_installed=1 |
| 129 | + nvidia-smi || cuda_installed=0 |
| 130 | + if [[ "$cuda_installed" == 0 ]]; then |
| 131 | + echo "Skip CUDA tests for machines without a Nvidia GPU card" |
| 132 | + else |
| 133 | + # Test CUDA archs |
| 134 | + echo "Checking that CUDA archs are setup correctly" |
| 135 | + timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()' |
| 136 | + |
| 137 | + # These have to run after CUDA is initialized |
| 138 | + echo "Checking that magma is available" |
| 139 | + python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)' |
| 140 | + echo "Checking that CuDNN is available" |
| 141 | + python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)' |
| 142 | + fi |
152 | 143 | fi
|
153 | 144 |
|
154 | 145 | # Check that OpenBlas is not linked to on Macs
|
|
0 commit comments