Skip to content

Commit 6c371b8

Browse files
committed
Add smoke test for binary package
1 parent 219fab7 commit 6c371b8

File tree

6 files changed

+237
-40
lines changed

6 files changed

+237
-40
lines changed

check_binary.sh

Lines changed: 7 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -256,8 +256,8 @@ build_and_run_example_cpp () {
256256
if [ -f "${install_root}/lib/libtorch_cuda.so" ] || [ -f "${install_root}/lib/libtorch_cuda.dylib" ]; then
257257
TORCH_CUDA_LINK_FLAGS="-ltorch_cuda"
258258
fi
259-
g++ example-app.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++14 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o example-app
260-
./example-app
259+
g++ /builder/test_example_code/$1.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++14 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o $1
260+
./$1
261261
}
262262

263263
build_example_cpp_with_incorrect_abi () {
@@ -287,7 +287,7 @@ build_example_cpp_with_incorrect_abi () {
287287
if [ -f "${install_root}/lib/libtorch_cuda.so" ] || [ -f "${install_root}/lib/libtorch_cuda.dylib" ]; then
288288
TORCH_CUDA_LINK_FLAGS="-ltorch_cuda"
289289
fi
290-
g++ example-app.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++14 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o example-app
290+
g++ /builder/test_example_code/$1.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++14 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o $1
291291
ERRCODE=$?
292292
set -e
293293
if [ "$ERRCODE" -eq "0" ]; then
@@ -302,19 +302,11 @@ build_example_cpp_with_incorrect_abi () {
302302
# Check simple Python/C++ calls
303303
###############################################################################
304304
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
305-
cat >example-app.cpp <<EOL
306-
#include <torch/torch.h>
307-
308-
int main(int argc, const char* argv[]) {
309-
TORCH_WARN("Simple test passed!");
310-
return 0;
311-
}
312-
EOL
313-
build_and_run_example_cpp
305+
build_and_run_example_cpp simple-torch-test
314306
# `_GLIBCXX_USE_CXX11_ABI` is always ignored by gcc in devtoolset7, so we test
315307
# the expected failure case for Ubuntu 16.04 + gcc 5.4 only.
316308
if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then
317-
build_example_cpp_with_incorrect_abi
309+
build_example_cpp_with_incorrect_abi simple-torch-test
318310
fi
319311
else
320312
python -c 'import torch'
@@ -328,15 +320,7 @@ fi
328320

329321
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
330322
echo "Checking that MKL is available"
331-
cat >example-app.cpp <<EOL
332-
#include <torch/torch.h>
333-
334-
int main(int argc, const char* argv[]) {
335-
TORCH_CHECK(torch::hasMKL(), "MKL is not available");
336-
return 0;
337-
}
338-
EOL
339-
build_and_run_example_cpp
323+
build_and_run_example_cpp check-torch-mkl
340324
else
341325
if [[ "$(uname)" != 'Darwin' || "$PACKAGE_TYPE" != *wheel ]]; then
342326
echo "Checking that MKL is available"
@@ -360,24 +344,7 @@ fi
360344
# Test that CUDA builds are setup correctly
361345
if [[ "$DESIRED_CUDA" != 'cpu' ]]; then
362346
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
363-
cat >example-app.cpp <<EOL
364-
#include <torch/torch.h>
365-
366-
int main(int argc, const char* argv[]) {
367-
std::cout << "Checking that CUDA archs are setup correctly" << std::endl;
368-
TORCH_CHECK(torch::rand({3, 5}, torch::Device(torch::kCUDA)).defined(), "CUDA archs are not setup correctly");
369-
370-
// These have to run after CUDA is initialized
371-
372-
std::cout << "Checking that magma is available" << std::endl;
373-
TORCH_CHECK(torch::hasMAGMA(), "MAGMA is not available");
374-
375-
std::cout << "Checking that CuDNN is available" << std::endl;
376-
TORCH_CHECK(torch::cuda::cudnn_is_available(), "CuDNN is not available");
377-
return 0;
378-
}
379-
EOL
380-
build_and_run_example_cpp
347+
build_and_run_example_cpp check-torch-cuda
381348
else
382349
echo "Checking that CUDA archs are setup correctly"
383350
timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()'
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
#include <torch/torch.h>
2+
3+
int main(int argc, const char* argv[]) {
4+
std::cout << "Checking that CUDA archs are setup correctly" << std::endl;
5+
TORCH_CHECK(torch::rand({ 3, 5 }, torch::Device(torch::kCUDA)).defined(), "CUDA archs are not setup correctly");
6+
7+
// These have to run after CUDA is initialized
8+
9+
std::cout << "Checking that magma is available" << std::endl;
10+
TORCH_CHECK(torch::hasMAGMA(), "MAGMA is not available");
11+
12+
std::cout << "Checking that CuDNN is available" << std::endl;
13+
TORCH_CHECK(torch::cuda::cudnn_is_available(), "CuDNN is not available");
14+
return 0;
15+
}

test_example_code/check-torch-mkl.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#include <torch/torch.h>
2+
3+
int main(int argc, const char* argv[]) {
4+
TORCH_CHECK(torch::hasMKL(), "MKL is not available");
5+
return 0;
6+
}
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#include <torch/torch.h>
2+
3+
int main(int argc, const char* argv[]) {
4+
TORCH_WARN("Simple test passed!");
5+
return 0;
6+
}

windows/internal/driver_update.bat

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
set "DRIVER_DOWNLOAD_LINK=https://s3.amazonaws.com/ossci-windows/442.50-tesla-desktop-winserver-2019-2016-international.exe"
2+
curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output 442.50-tesla-desktop-winserver-2019-2016-international.exe
3+
if errorlevel 1 exit /b 1
4+
5+
start /wait 442.50-tesla-desktop-winserver-2019-2016-international.exe -s -noreboot
6+
if errorlevel 1 exit /b 1
7+
8+
del 442.50-tesla-desktop-winserver-2019-2016-international.exe || ver > NUL
9+

windows/internal/smoke_test.bat

Lines changed: 194 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,194 @@
1+
set SRC_DIR=%~dp0
2+
3+
pushd %SRC_DIR%\..
4+
5+
if "%CUDA_VERSION%" == "102" call internal\driver_update.bat
6+
if errorlevel 1 exit /b 1
7+
8+
set "ORIG_PATH=%PATH%"
9+
10+
setlocal EnableDelayedExpansion
11+
set NVIDIA_GPU_EXISTS=0
12+
for /F "delims=" %%i in ('wmic path win32_VideoController get name') do (
13+
set GPUS=%%i
14+
if not "x!GPUS:NVIDIA=!" == "x!GPUS!" (
15+
SET NVIDIA_GPU_EXISTS=1
16+
goto gpu_check_end
17+
)
18+
)
19+
:gpu_check_end
20+
endlocal & set NVIDIA_GPU_EXISTS=%NVIDIA_GPU_EXISTS%
21+
22+
if "%PACKAGE_TYPE%" == "wheel" goto wheel
23+
if "%PACKAGE_TYPE%" == "conda" goto conda
24+
if "%PACKAGE_TYPE%" == "libtorch" goto libtorch
25+
26+
echo "unknown package type"
27+
exit /b 1
28+
29+
:wheel
30+
echo "install wheel package"
31+
32+
set PYTHON_INSTALLER_URL=
33+
if "%DESIRED_PYTHON%" == "3.8" set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.8.2/python-3.8.2-amd64.exe"
34+
if "%DESIRED_PYTHON%" == "3.7" set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.7.7/python-3.7.7-amd64.exe"
35+
if "%DESIRED_PYTHON%" == "3.6" set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.6.8/python-3.6.8-amd64.exe"
36+
if "%PYTHON_INSTALLER_URL%" == "" (
37+
echo Python %DESIRED_PYTHON% not supported yet
38+
)
39+
40+
del python-amd64.exe
41+
curl --retry 3 -kL "%PYTHON_INSTALLER_URL%" --output python-amd64.exe
42+
if errorlevel 1 exit /b 1
43+
44+
start /wait "" python-amd64.exe /quiet InstallAllUsers=1 PrependPath=1 Include_test=0 TargetDir=%CD%\Python%PYTHON_VERSION%
45+
if errorlevel 1 exit /b 1
46+
47+
set "PATH=%CD%\Python%PYTHON_VERSION%\Scripts;%CD%\Python%PYTHON_VERSION%;%PATH%"
48+
49+
pip install -q future numpy protobuf six "mkl>=2019"
50+
if errorlevel 1 exit /b 1
51+
52+
for /F "delims=" %%i in ('where /R "%PYTORCH_FINAL_PACKAGE_DIR:/=\%" *.whl') do pip install "%%i"
53+
if errorlevel 1 exit /b 1
54+
55+
goto smoke_test
56+
57+
:conda
58+
echo "install conda package"
59+
60+
:: Install Miniconda3
61+
set "CONDA_HOME=%CD%\conda"
62+
set "tmp_conda=%CONDA_HOME%"
63+
set "miniconda_exe=%CD%\miniconda.exe"
64+
65+
rmdir /s /q conda
66+
del miniconda.exe
67+
curl -k https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe -o "%miniconda_exe%"
68+
call ..\conda\install_conda.bat
69+
if ERRORLEVEL 1 exit /b 1
70+
71+
set "PATH=%CONDA_HOME%;%CONDA_HOME%\scripts;%CONDA_HOME%\Library\bin;%PATH%"
72+
73+
conda create -qyn testenv python=%DESIRED_PYTHON%
74+
if errorlevel 1 exit /b 1
75+
76+
call %CONDA_HOME%\condabin\activate.bat testenv
77+
if errorlevel 1 exit /b 1
78+
79+
call conda install -yq future numpy protobuf six
80+
if ERRORLEVEL 1 exit /b 1
81+
82+
for /F "delims=" %%i in ('where /R "%PYTORCH_FINAL_PACKAGE_DIR:/=\%" *.tar.bz2') do call conda install -y "%%i" --offline
83+
if ERRORLEVEL 1 exit /b 1
84+
85+
if "%CUDA_VERSION%" == "cpu" goto install_cpu_torch
86+
87+
set /a CUDA_VER=%CUDA_VERSION%
88+
set CUDA_VER_MAJOR=%CUDA_VERSION:~0,-1%
89+
set CUDA_VER_MINOR=%CUDA_VERSION:~-1,1%
90+
set CUDA_VERSION_STR=%CUDA_VER_MAJOR%.%CUDA_VER_MINOR%
91+
92+
if "%CUDA_VERSION_STR%" == "9.2" (
93+
call conda install -y "cudatoolkit=%CUDA_VERSION_STR%" -c pytorch -c defaults -c numba/label/dev
94+
) else (
95+
call conda install -y "cudatoolkit=%CUDA_VERSION_STR%" -c pytorch
96+
)
97+
if ERRORLEVEL 1 exit /b 1
98+
99+
goto smoke_test
100+
101+
:install_cpu_torch
102+
call conda install -y cpuonly -c pytorch
103+
if ERRORLEVEL 1 exit /b 1
104+
105+
:smoke_test
106+
python -c "import torch"
107+
if ERRORLEVEL 1 exit /b 1
108+
109+
python -c "from caffe2.python import core"
110+
if ERRORLEVEL 1 exit /b 1
111+
112+
echo Checking that MKL is available
113+
python -c "import torch; exit(0 if torch.backends.mkl.is_available() else 1)"
114+
if ERRORLEVEL 1 exit /b 1
115+
116+
if "%NVIDIA_GPU_EXISTS%" == "0" (
117+
echo "Skip CUDA tests for machines without a Nvidia GPU card"
118+
goto end
119+
)
120+
121+
echo Checking that CUDA archs are setup correctly
122+
python -c "import torch; torch.randn([3,5]).cuda()"
123+
if ERRORLEVEL 1 exit /b 1
124+
125+
echo Checking that magma is available
126+
python -c "import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)"
127+
if ERRORLEVEL 1 exit /b 1
128+
129+
echo Checking that CuDNN is available
130+
python -c "import torch; exit(0 if torch.backends.cudnn.is_available() else 1)"
131+
if ERRORLEVEL 1 exit /b 1
132+
133+
goto end
134+
135+
:libtorch
136+
echo "install and test libtorch"
137+
138+
powershell internal\vs_install.ps1
139+
if ERRORLEVEL 1 exit /b 1
140+
141+
for /F "delims=" %%i in ('where /R "%PYTORCH_FINAL_PACKAGE_DIR:/=\%" *-latest.zip') do 7z x "%%i" -otmp
142+
if ERRORLEVEL 1 exit /b 1
143+
144+
pushd tmp\libtorch
145+
146+
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [15^,16^) -property installationPath`) do (
147+
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
148+
set "VS15INSTALLDIR=%%i"
149+
set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat"
150+
goto vswhere
151+
)
152+
)
153+
154+
:vswhere
155+
IF "%VS15VCVARSALL%"=="" (
156+
echo Visual Studio 2017 C++ BuildTools is required to compile PyTorch test on Windows
157+
exit /b 1
158+
)
159+
call "%VS15VCVARSALL%" x64
160+
161+
set install_root=%CD%
162+
set INCLUDE=%INCLUDE%;%install_root%\include;%install_root%\include\torch\csrc\api\include
163+
set LIB=%LIB%;%install_root%\lib
164+
set PATH=%PATH%;%install_root%\lib
165+
166+
cl %BUILDER_ROOT%\test_example_code\simple-torch-test.cpp c10.lib torch_cpu.lib /EHsc
167+
if ERRORLEVEL 1 exit /b 1
168+
169+
.\simple-torch-test.exe
170+
if ERRORLEVEL 1 exit /b 1
171+
172+
cl %BUILDER_ROOT%\test_example_code\check-torch-mkl.cpp c10.lib torch_cpu.lib /EHsc
173+
if ERRORLEVEL 1 exit /b 1
174+
175+
.\check-torch-mkl.exe
176+
if ERRORLEVEL 1 exit /b 1
177+
178+
if "%NVIDIA_GPU_EXISTS%" == "0" (
179+
echo "Skip CUDA tests for machines without a Nvidia GPU card"
180+
goto end
181+
)
182+
183+
cl %BUILDER_ROOT%\test_example_code\check-torch-cuda.cpp torch_cpu.lib c10.lib torch_cuda.lib /EHsc /link /INCLUDE:?warp_size@cuda@at@@YAHXZ
184+
.\check-torch-cuda.exe
185+
if ERRORLEVEL 1 exit /b 1
186+
187+
popd
188+
189+
echo Cleaning temp files
190+
rd /s /q "tmp" || ver > nul
191+
192+
:end
193+
set "PATH=%ORIG_PATH%"
194+
popd

0 commit comments

Comments
 (0)