Skip to content

Commit 3be6024

Browse files
authored
Merge pull request #76 from iotamudelta/master
Merge from upstream
2 parents fdb4a36 + 21db32f commit 3be6024

File tree

220 files changed

+3242
-2698
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

220 files changed

+3242
-2698
lines changed

.jenkins/caffe2/build.sh

Lines changed: 36 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -2,46 +2,51 @@
22

33
set -ex
44

5+
pip install --user --no-cache-dir hypothesis==3.59.0
6+
7+
58
# The INSTALL_PREFIX here must match up with test.sh
69
INSTALL_PREFIX="/usr/local/caffe2"
710
LOCAL_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
811
ROOT_DIR=$(cd "$LOCAL_DIR"/../.. && pwd)
912
CMAKE_ARGS=()
13+
SCCACHE="$(which sccache)"
14+
15+
if [ "$(which gcc)" != "/root/sccache/gcc" ]; then
16+
# Setup SCCACHE
17+
###############################################################################
18+
# Setup sccache if SCCACHE_BUCKET is set
19+
if [ -n "${SCCACHE_BUCKET}" ]; then
20+
mkdir -p ./sccache
21+
22+
SCCACHE="$(which sccache)"
23+
if [ -z "${SCCACHE}" ]; then
24+
echo "Unable to find sccache..."
25+
exit 1
26+
fi
1027

28+
# Setup wrapper scripts
29+
for compiler in cc c++ gcc g++ x86_64-linux-gnu-gcc; do
30+
(
31+
echo "#!/bin/sh"
32+
echo "exec $SCCACHE $(which $compiler) \"\$@\""
33+
) > "./sccache/$compiler"
34+
chmod +x "./sccache/$compiler"
35+
done
36+
37+
if [[ "${BUILD_ENVIRONMENT}" == *-cuda* ]]; then
38+
(
39+
echo "#!/bin/sh"
40+
echo "exec $SCCACHE $(which nvcc) \"\$@\""
41+
) > "./sccache/nvcc"
42+
chmod +x "./sccache/nvcc"
43+
fi
1144

12-
# Setup SCCACHE
13-
###############################################################################
14-
# Setup sccache if SCCACHE_BUCKET is set
15-
if [ -n "${SCCACHE_BUCKET}" ]; then
16-
mkdir -p ./sccache
17-
18-
SCCACHE="$(which sccache)"
19-
if [ -z "${SCCACHE}" ]; then
20-
echo "Unable to find sccache..."
21-
exit 1
22-
fi
23-
24-
# Setup wrapper scripts
25-
for compiler in cc c++ gcc g++ x86_64-linux-gnu-gcc; do
26-
(
27-
echo "#!/bin/sh"
28-
echo "exec $SCCACHE $(which $compiler) \"\$@\""
29-
) > "./sccache/$compiler"
30-
chmod +x "./sccache/$compiler"
31-
done
45+
export CACHE_WRAPPER_DIR="$PWD/sccache"
3246

33-
if [[ "${BUILD_ENVIRONMENT}" == *-cuda* ]]; then
34-
(
35-
echo "#!/bin/sh"
36-
echo "exec $SCCACHE $(which nvcc) \"\$@\""
37-
) > "./sccache/nvcc"
38-
chmod +x "./sccache/nvcc"
47+
# CMake must find these wrapper scripts
48+
export PATH="$CACHE_WRAPPER_DIR:$PATH"
3949
fi
40-
41-
export CACHE_WRAPPER_DIR="$PWD/sccache"
42-
43-
# CMake must find these wrapper scripts
44-
export PATH="$CACHE_WRAPPER_DIR:$PATH"
4550
fi
4651

4752
# Setup ccache if configured to use it (and not sccache)

.jenkins/caffe2/test.sh

Lines changed: 40 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -90,26 +90,48 @@ if [[ $BUILD_ENVIRONMENT == conda* ]]; then
9090
conda_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/checkpoint_test.py")
9191
fi
9292
93+
rocm_ignore_test=()
94+
if [[ $BUILD_ENVIRONMENT == *-rocm* ]]; then
95+
export LANG=C.UTF-8
96+
export LC_ALL=C.UTF-8
9397
94-
# TODO: re-enable this for rocm CI jobs once we have more rocm workers
95-
if [[ $BUILD_ENVIRONMENT != *rocm* ]]; then
96-
# Python tests
97-
echo "Running Python tests.."
98-
"$PYTHON" \
99-
-m pytest \
100-
-x \
101-
-v \
102-
--junit-xml="$TEST_DIR/python/result.xml" \
103-
--ignore "$CAFFE2_PYPATH/python/test/executor_test.py" \
104-
--ignore "$CAFFE2_PYPATH/python/operator_test/matmul_op_test.py" \
105-
--ignore "$CAFFE2_PYPATH/python/operator_test/pack_ops_test.py" \
106-
--ignore "$CAFFE2_PYPATH/python/mkl/mkl_sbn_speed_test.py" \
107-
${conda_ignore_test[@]} \
108-
"$CAFFE2_PYPATH/python" \
109-
"${EXTRA_TESTS[@]}"
98+
# Currently these tests are failing on ROCM platform:
99+
100+
# Unknown reasons, need to debug
101+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/arg_ops_test.py")
102+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/piecewise_linear_transform_test.py")
103+
104+
# Need to go through roi ops to replace max(...) with fmaxf(...)
105+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/roi_align_rotated_op_test.py")
106+
107+
# Our cuda top_k op has some asm code, the hipified version doesn't
108+
# compile yet, so we don't have top_k operator for now
109+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/top_k_test.py")
110+
111+
# These are fixed in rocm 1.8.2, re-enable them once our CI docker images are upgraded
112+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/recurrent_net_executor_test.py")
113+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/softmax_ops_test.py")
114+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/conv_test.py")
115+
rocm_ignore_test+=("--ignore $CAFFE2_PYPATH/python/operator_test/group_conv_test.py")
110116
fi
111117
118+
# Python tests
119+
echo "Running Python tests.."
120+
"$PYTHON" \
121+
-m pytest \
122+
-x \
123+
-v \
124+
--junit-xml="$TEST_DIR/python/result.xml" \
125+
--ignore "$CAFFE2_PYPATH/python/test/executor_test.py" \
126+
--ignore "$CAFFE2_PYPATH/python/operator_test/matmul_op_test.py" \
127+
--ignore "$CAFFE2_PYPATH/python/operator_test/pack_ops_test.py" \
128+
--ignore "$CAFFE2_PYPATH/python/mkl/mkl_sbn_speed_test.py" \
129+
${conda_ignore_test[@]} \
130+
${rocm_ignore_test[@]} \
131+
"$CAFFE2_PYPATH/python" \
132+
"${EXTRA_TESTS[@]}"
133+
112134
if [[ -n "$INTEGRATED" ]]; then
113-
pip install --user pytest-xdist torchvision
114-
"$ROOT_DIR/scripts/onnx/test.sh" -p
135+
pip install --user torchvision
136+
"$ROOT_DIR/scripts/onnx/test.sh"
115137
fi

.jenkins/pytorch/build.sh

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ pip install -r requirements.txt || true
3131

3232
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
3333
# This is necessary in order to cross compile (or else we'll have missing GPU device).
34+
export MAX_JOBS=4
3435
export HCC_AMDGPU_TARGET=gfx900
3536

3637
# These environment variables are not set on CI when we were running as the Jenkins user.
@@ -47,9 +48,6 @@ if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
4748
rm -rf "$(dirname "${BASH_SOURCE[0]}")/../../../pytorch_amd/" || true
4849
python "$(dirname "${BASH_SOURCE[0]}")/../../tools/amd_build/build_pytorch_amd.py"
4950

50-
# ROCm builds experience OOM issues when buliding with sscache. (HCC Issue #785)
51-
export MAX_JOBS=`expr $(nproc) / 2`
52-
5351
USE_ROCM=1 python setup.py install
5452
exit
5553
fi

.jenkins/pytorch/disabled-configs.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,4 @@
44
# turn on CI side before PyTorch repository supports it. This
55
# file has the same format as .jenkins/enabled-configs.txt
66

7-
py2-clang3.8-rocmnightly-ubuntu16.04-test
7+
py2-clang3.8-rocm1.7.1-ubuntu16.04-test

.jenkins/pytorch/enabled-configs.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,4 +40,4 @@ pytorch-macos-10.13-cuda9.2-cudnn7-py3-build
4040
pytorch-docker-build-test
4141
short-perf-test-cpu
4242
short-perf-test-gpu
43-
py2-clang3.8-rocmnightly-ubuntu16.04-build
43+
py2-clang3.8-rocm1.7.1-ubuntu16.04-build

CONTRIBUTING.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -139,17 +139,17 @@ not very optimized for incremental rebuilds, this will actually be very slow.
139139
Far better is to only request rebuilds of the parts of the project you are
140140
working on:
141141

142-
- Working on `torch/csrc`? Run `python setup.py develop` to rebuild
142+
- Working on the Python bindings? Run `python setup.py develop` to rebuild
143143
(NB: no `build` here!)
144144

145-
- Working on `torch/lib/TH`, did not make any cmake changes, and just want to
146-
see if it compiles? Run `(cd torch/lib/build/TH && make install -j$(getconf _NPROCESSORS_ONLN))`. This
147-
applies for any other subdirectory of `torch/lib`. **Warning: Changes you
148-
make here will not be visible from Python.** See below.
145+
- Working on `torch/csrc` or `aten`? Run `python setup.py build_caffe2` to
146+
rebuild and avoid having to rebuild other dependent libraries we
147+
depend on. The other valid targets are listed in `dep_libs` in `setup.py`
148+
(prepend `build_` to get a target).
149149

150-
- Working on `torch/lib` and want to run your changes / rerun cmake? Run
151-
`python setup.py build_deps`. Note that this will rerun cmake for
152-
every subdirectory in TH.
150+
- Working on a test binary? Run `(cd build && ninja bin/test_binary_name)` to
151+
rebuild only that test binary (without rerunning cmake). (Replace `ninja` with
152+
`make` if you don't have ninja installed).
153153

154154
On the initial build, you can also speed things up with the environment
155155
variables `DEBUG` and `NO_CUDA`.

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ on [our website](http://pytorch.org/previous-versions/).
231231
## Getting Started
232232

233233
Three pointers to get you started:
234-
- [Tutorials: get you started with understanding and using PyTorch](http://pytorch.org/tutorials/)
234+
- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/)
235235
- [Examples: easy to understand pytorch code across all domains](https://github.com/pytorch/examples)
236236
- [The API Reference](http://pytorch.org/docs/)
237237

aten/src/ATen/ATen.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,3 +22,4 @@
2222
#include "ATen/Layout.h"
2323
#include "ATen/OptionsGuard.h"
2424
#include "ATen/CUDAGuard.h"
25+
#include "ATen/Error.h"

aten/src/ATen/Context.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,6 @@
1515

1616
namespace at {
1717

18-
enum class IsVariable {
19-
NotVariable,
20-
Variable,
21-
NumOptions
22-
};
23-
2418
class AT_API Context {
2519
public:
2620
Context();

aten/src/ATen/Declarations.cwrap

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
return: argument 0
3636
options:
3737
- cname: set
38-
scalar_check: source_->isScalar()
38+
scalar_check: source_->dim() == 0
3939
arguments:
4040
- THTensor* self
4141
- THTensor* source
@@ -181,7 +181,7 @@
181181
variants:
182182
- function
183183
return: self
184-
scalar_check: the_template_->isScalar()
184+
scalar_check: the_template_->dim() == 0
185185
arguments:
186186
- THTensor* self
187187
- THTensor* the_template
@@ -219,7 +219,7 @@
219219
- method
220220
- function
221221
return: argument 0
222-
scalar_check: index_->isScalar()
222+
scalar_check: index_->dim() == 0
223223
arguments:
224224
- arg: THTensor* result
225225
output: True
@@ -837,7 +837,7 @@
837837
options:
838838
- cname: min
839839
return: argument 0,1
840-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
840+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
841841
arguments:
842842
- arg: THTensor* min
843843
output: True
@@ -876,7 +876,7 @@
876876
options:
877877
- cname: max
878878
return: argument 0,1
879-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
879+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
880880
arguments:
881881
- arg: THTensor* max
882882
output: True
@@ -897,7 +897,7 @@
897897
- function
898898
cname: kthvalue
899899
return: argument 0,1
900-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
900+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
901901
arguments:
902902
- arg: THTensor* values
903903
output: True
@@ -918,7 +918,7 @@
918918
- function
919919
cname: mode
920920
return: argument 0,1
921-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
921+
scalar_check: self_->dim() == 0|| (keepdim == false && self_->dim() == 1)
922922
arguments:
923923
- arg: THTensor* values
924924
output: True
@@ -952,7 +952,7 @@
952952
return: argument 0,1
953953
options:
954954
- cname: median
955-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
955+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
956956
arguments:
957957
- arg: THTensor* values
958958
output: True
@@ -1032,7 +1032,7 @@
10321032
options:
10331033
- cname: logicalAnd
10341034
return: argument 0
1035-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
1035+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
10361036
arguments:
10371037
- arg: THTensor* result
10381038
output: True
@@ -1071,7 +1071,7 @@
10711071
options:
10721072
- cname: logicalAny
10731073
return: argument 0
1074-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
1074+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
10751075
arguments:
10761076
- arg: THTensor* result
10771077
output: True
@@ -1669,7 +1669,7 @@
16691669
default: 0
16701670
- cname: var
16711671
return: argument 0
1672-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
1672+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
16731673
arguments:
16741674
- arg: THTensor* result
16751675
output: True
@@ -1704,7 +1704,7 @@
17041704
default: 0
17051705
- cname: std
17061706
return: argument 0
1707-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
1707+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
17081708
arguments:
17091709
- arg: THTensor* result
17101710
output: True
@@ -1749,7 +1749,7 @@
17491749
options:
17501750
- cname: norm
17511751
return: argument 0
1752-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
1752+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
17531753
arguments:
17541754
- arg: THTensor* result
17551755
output: True
@@ -2097,7 +2097,7 @@
20972097
options:
20982098
- cname: sum
20992099
return: argument 0
2100-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
2100+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
21012101
arguments:
21022102
- arg: THTensor* result
21032103
output: True
@@ -2126,7 +2126,7 @@
21262126
options:
21272127
- cname: prod
21282128
return: argument 0
2129-
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
2129+
scalar_check: self_->dim() == 0 || (keepdim == false && self_->dim() == 1)
21302130
arguments:
21312131
- arg: THTensor* result
21322132
output: True
@@ -2600,11 +2600,11 @@
26002600
- arg: long diagonal
26012601
default: 0
26022602
aten_custom_call: |
2603-
if (self_->isScalar()) {
2603+
if (self_->dim() == 0) {
26042604
throw std::runtime_error("Input must be 1-d or 2-d");
26052605
}
26062606
${THTensor}_diag(${state,}result_->tensor, self_->tensor, diagonal);
2607-
result_->maybeScalar(self_->isScalar());
2607+
result_->maybe_zero_dim(self_->dim() == 0);
26082608
]]
26092609
[[
26102610
name: th_addmm

0 commit comments

Comments
 (0)