Skip to content

Commit 50d345a

Browse files
committed
1.1.0 recipe
1 parent f601c06 commit 50d345a

File tree

4 files changed

+304
-0
lines changed

4 files changed

+304
-0
lines changed

conda/pytorch-1.1.0/bld.bat

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
@echo off
2+
3+
set TH_BINARY_BUILD=1
4+
set PYTORCH_BUILD_VERSION=%PKG_VERSION%
5+
set PYTORCH_BUILD_NUMBER=%PKG_BUILDNUM%
6+
7+
if "%NO_CUDA%" == "" (
8+
set build_with_cuda=1
9+
set desired_cuda=%CUDA_VERSION:~0,-1%.%CUDA_VERSION:~-1,1%
10+
) else (
11+
set build_with_cuda=
12+
set USE_CUDA=0
13+
)
14+
15+
if "%build_with_cuda%" == "" goto cuda_flags_end
16+
17+
set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%desired_cuda%
18+
set CUDA_BIN_PATH=%CUDA_PATH%\bin
19+
set TORCH_CUDA_ARCH_LIST=3.5;5.0+PTX
20+
if "%desired_cuda%" == "8.0" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;6.1
21+
if "%desired_cuda%" == "9.0" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;7.0
22+
if "%desired_cuda%" == "9.2" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;6.1;7.0
23+
if "%desired_cuda%" == "10.0" set TORCH_CUDA_ARCH_LIST=%TORCH_CUDA_ARCH_LIST%;6.0;6.1;7.0;7.5
24+
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
25+
26+
:cuda_flags_end
27+
28+
set DISTUTILS_USE_SDK=1
29+
30+
curl https://s3.amazonaws.com/ossci-windows/mkl_2018.2.185.7z -k -O
31+
7z x -aoa mkl_2018.2.185.7z -omkl
32+
set CMAKE_INCLUDE_PATH=%SRC_DIR%\mkl\include
33+
set LIB=%SRC_DIR%\mkl\lib;%LIB%
34+
35+
IF "%USE_SCCACHE%" == "1" (
36+
mkdir %SRC_DIR%\tmp_bin
37+
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %SRC_DIR%\tmp_bin\sccache.exe
38+
copy %SRC_DIR%\tmp_bin\sccache.exe %SRC_DIR%\tmp_bin\nvcc.exe
39+
set "PATH=%SRC_DIR%\tmp_bin;%PATH%"
40+
)
41+
42+
IF "%build_with_cuda%" == "" goto cuda_end
43+
44+
IF "%desired_cuda%" == "8.0" (
45+
set MAGMA_VERSION=2.4.0
46+
) ELSE (
47+
set MAGMA_VERSION=2.5.0
48+
)
49+
50+
curl https://s3.amazonaws.com/ossci-windows/magma_%MAGMA_VERSION%_cuda%CUDA_VERSION%_release.7z -k -O
51+
7z x -aoa magma_%MAGMA_VERSION%_cuda%CUDA_VERSION%_release.7z -omagma_cuda%CUDA_VERSION%_release
52+
set MAGMA_HOME=%cd%\magma_cuda%CUDA_VERSION%_release
53+
54+
IF "%USE_SCCACHE%" == "1" (
55+
set CUDA_NVCC_EXECUTABLE=%SRC_DIR%\tmp_bin\nvcc
56+
)
57+
58+
set "PATH=%CUDA_BIN_PATH%;%PATH%"
59+
60+
if "%CUDA_VERSION%" == "80" (
61+
:: Only if you use Ninja with CUDA 8
62+
set "CUDAHOSTCXX=%VS140COMNTOOLS%\..\..\VC\bin\amd64\cl.exe"
63+
)
64+
65+
:cuda_end
66+
67+
set CMAKE_GENERATOR=Ninja
68+
69+
IF NOT "%USE_SCCACHE%" == "1" goto sccache_end
70+
71+
sccache --stop-server
72+
sccache --start-server
73+
sccache --zero-stats
74+
75+
set CC=sccache cl
76+
set CXX=sccache cl
77+
78+
:sccache_end
79+
80+
python setup.py install
81+
if errorlevel 1 exit /b 1
82+
83+
IF "%USE_SCCACHE%" == "1" (
84+
taskkill /im sccache.exe /f /t || ver > nul
85+
taskkill /im nvcc.exe /f /t || ver > nul
86+
)
87+
88+
if NOT "%build_with_cuda%" == "" (
89+
copy "%CUDA_BIN_PATH%\cudnn64_%CUDNN_VERSION%.dll*" %SP_DIR%\torch\lib
90+
)
91+
92+
exit /b 0

conda/pytorch-1.1.0/build.sh

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
#!/usr/bin/env bash
2+
set -ex
3+
4+
export CMAKE_LIBRARY_PATH=$PREFIX/lib:$PREFIX/include:$CMAKE_LIBRARY_PATH
5+
export CMAKE_PREFIX_PATH=$PREFIX
6+
export TH_BINARY_BUILD=1 # links CPU BLAS libraries thrice in a row (was needed for some MKL static linkage)
7+
export PYTORCH_BUILD_VERSION=$PKG_VERSION
8+
export PYTORCH_BUILD_NUMBER=$PKG_BUILDNUM
9+
10+
# Why do we disable Ninja when ninja is included in the meta.yaml? Well, using
11+
# ninja in the conda builds leads to a system python2.7 library being called
12+
# which leads to ascii decode errors when building third_party/onnx. Is the
13+
# ninja n this conda env being picked up? We still need ninja in the meta.yaml
14+
# for cpp_tests I believe though. TODO figure out what's going on here and fix
15+
# it. It would be nice to use ninja in the builds of the conda binaries as well
16+
export USE_NINJA=OFF
17+
18+
# MacOS build is simple, and will not be for CUDA
19+
if [[ "$OSTYPE" == "darwin"* ]]; then
20+
MACOSX_DEPLOYMENT_TARGET=10.9 \
21+
CXX=clang++ \
22+
CC=clang \
23+
python setup.py install
24+
exit 0
25+
fi
26+
27+
28+
if [[ -z "$NO_CUDA" || "$NO_CUDA" == 0 ]]; then
29+
build_with_cuda=1
30+
fi
31+
if [[ -n "$build_with_cuda" ]]; then
32+
export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX"
33+
if [[ $CUDA_VERSION == 8.0* ]]; then
34+
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1"
35+
elif [[ $CUDA_VERSION == 9.0* ]]; then
36+
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;7.0"
37+
elif [[ $CUDA_VERSION == 9.2* ]]; then
38+
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0"
39+
elif [[ $CUDA_VERSION == 10.0* ]]; then
40+
export TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0;7.5"
41+
fi
42+
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
43+
export NCCL_ROOT_DIR=/usr/local/cuda
44+
export USE_STATIC_CUDNN=1 # links cudnn statically (driven by tools/setup_helpers/cudnn.py)
45+
export USE_STATIC_NCCL=1 # links nccl statically (driven by tools/setup_helpers/nccl.py, some of the NCCL cmake files such as FindNCCL.cmake and gloo/FindNCCL.cmake)
46+
47+
# not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet
48+
# export ATEN_STATIC_CUDA=1 # links ATen / libcaffe2_gpu.so with static CUDA libs, also sets up special cufft linkage
49+
# export USE_CUDA_STATIC_LINK=1 # links libcaffe2_gpu.so with static CUDA libs. Likely both these flags can be de-duplicated
50+
fi
51+
52+
fname_with_sha256() {
53+
HASH=$(sha256sum $1 | cut -c1-8)
54+
DIRNAME=$(dirname $1)
55+
BASENAME=$(basename $1)
56+
if [[ $BASENAME == "libnvrtc-builtins.so" ]]; then
57+
echo $1
58+
else
59+
INITNAME=$(echo $BASENAME | cut -f1 -d".")
60+
ENDNAME=$(echo $BASENAME | cut -f 2- -d".")
61+
echo "$DIRNAME/$INITNAME-$HASH.$ENDNAME"
62+
fi
63+
}
64+
65+
DEPS_LIST=()
66+
# not needed if using conda's cudatoolkit package. Uncomment to statically link a new CUDA version that's not available in conda yet
67+
# if [[ -n "$build_with_cuda" ]]; then
68+
# cuda_majmin="$(echo $CUDA_VERSION | cut -f1,2 -d'.')"
69+
# DEPS_LIST+=("/usr/local/cuda/lib64/libcudart.so.$cuda_majmin")
70+
# DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
71+
# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc.so.$cuda_majmin")
72+
# DEPS_LIST+=("/usr/local/cuda/lib64/libnvrtc-builtins.so")
73+
# fi
74+
75+
76+
# install
77+
python setup.py install
78+
79+
# copy over needed dependent .so files over and tag them with their hash
80+
patched=()
81+
for filepath in "${DEPS_LIST[@]}"; do
82+
filename=$(basename $filepath)
83+
destpath=$SP_DIR/torch/lib/$filename
84+
cp $filepath $destpath
85+
86+
patchedpath=$(fname_with_sha256 $destpath)
87+
patchedname=$(basename $patchedpath)
88+
if [[ "$destpath" != "$patchedpath" ]]; then
89+
mv $destpath $patchedpath
90+
fi
91+
92+
patched+=("$patchedname")
93+
echo "Copied $filepath to $patchedpath"
94+
done
95+
96+
# run patchelf to fix the so names to the hashed names
97+
for ((i=0;i<${#DEPS_LIST[@]};++i)); do
98+
find $SP_DIR/torch -name '*.so*' | while read sofile; do
99+
origname="$(basename ${DEPS_LIST[i]})"
100+
patchedname=${patched[i]}
101+
set +e
102+
patchelf --print-needed $sofile | grep $origname 2>&1 >/dev/null
103+
ERRCODE=$?
104+
set -e
105+
if [ "$ERRCODE" -eq "0" ]; then
106+
echo "patching $sofile entry $origname to $patchedname"
107+
patchelf --replace-needed $origname $patchedname $sofile
108+
fi
109+
done
110+
done
111+
112+
# set RPATH of _C.so and similar to $ORIGIN, $ORIGIN/lib and conda/lib
113+
find $SP_DIR/torch -name "*.so*" -maxdepth 1 -type f | while read sofile; do
114+
echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..'
115+
patchelf --set-rpath '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../..' $sofile
116+
patchelf --print-rpath $sofile
117+
done
118+
119+
# set RPATH of lib/ files to $ORIGIN and conda/lib
120+
find $SP_DIR/torch/lib -name "*.so*" -maxdepth 1 -type f | while read sofile; do
121+
echo "Setting rpath of $sofile to " '$ORIGIN:$ORIGIN/lib:$ORIGIN/../../../..'
122+
patchelf --set-rpath '$ORIGIN:$ORIGIN/../../../..' $sofile
123+
patchelf --print-rpath $sofile
124+
done
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
blas_impl:
2+
- mkl # [x86_64]
3+
c_compiler:
4+
- vs2017 # [win]
5+
cxx_compiler:
6+
- vs2017 # [win]
7+
python:
8+
- 3.5
9+
- 3.6
10+
# This differs from target_platform in that it determines what subdir the compiler
11+
# will target, not what subdir the compiler package will be itself.
12+
# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32
13+
# code on win-64 miniconda.
14+
cross_compiler_target_platform:
15+
- win-64 # [win]
16+
target_platform:
17+
- win-64 # [win]
18+
vc:
19+
- 14
20+
zip_keys:
21+
- # [win]
22+
- vc # [win]
23+
- c_compiler # [win]
24+
- cxx_compiler # [win]

conda/pytorch-1.1.0/meta.yaml

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
package:
2+
name: pytorch{{ environ.get('PYTORCH_PACKAGE_SUFFIX') }}
3+
version: "{{ environ.get('PYTORCH_BUILD_VERSION') }}"
4+
5+
source:
6+
path: "{{ environ.get('PYTORCH_GITHUB_ROOT_DIR') }}"
7+
8+
requirements:
9+
build:
10+
- cmake
11+
- {{ compiler('c') }} # [win]
12+
13+
host:
14+
- python
15+
- numpy 1.11.*
16+
- setuptools
17+
- pyyaml
18+
- cffi
19+
- mkl >=2018
20+
- mkl-include
21+
- typing
22+
- ninja
23+
{{ environ.get('MAGMA_PACKAGE') }}
24+
25+
run:
26+
- python
27+
- numpy >=1.11
28+
- mkl >=2018
29+
- cffi
30+
- ninja
31+
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
32+
33+
build:
34+
number: {{ environ.get('PYTORCH_BUILD_NUMBER') }}
35+
detect_binary_files_with_prefix: False
36+
string: "{{ environ.get('PYTORCH_BUILD_STRING') }}"
37+
script_env:
38+
- CUDA_VERSION
39+
- CUDNN_VERSION
40+
- CONDA_CUDATOOLKIT_CONSTRAINT
41+
- NO_CUDA
42+
- CMAKE_ARGS
43+
- EXTRA_CAFFE2_CMAKE_FLAGS
44+
- DEVELOPER_DIR
45+
- DEBUG
46+
- NO_FBGEMM
47+
- USE_SCCACHE # [win]
48+
49+
test:
50+
imports:
51+
- torch
52+
source_files:
53+
- test
54+
commands:
55+
- OMP_NUM_THREADS=4 python ./test/run_test.py || true # [not win]
56+
- python ./test/run_test.py # [win]
57+
58+
59+
about:
60+
home: http://pytorch.org/
61+
license: BSD 3-Clause
62+
license_family: BSD
63+
license_file: LICENSE
64+
summary: PyTorch is an optimized tensor library for deep learning using GPUs and CPUs.

0 commit comments

Comments
 (0)