Skip to content

Commit f737380

Browse files
authored
Merge pull request #18 from iotamudelta/master
Merge from pytorch upstream
2 parents 2f769c4 + d28c837 commit f737380

File tree

93 files changed

+1334
-767
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

93 files changed

+1334
-767
lines changed

.jenkins/pytorch/build-asan.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,6 @@ export ASAN_OPTIONS=detect_leaks=0:symbolize=1
1616

1717
# TODO: Make the ASAN flags a more unified env var
1818
CC="clang" CXX="clang++" LDSHARED="clang --shared" \
19-
CFLAGS="-fsanitize=address -shared-libasan" \
19+
CFLAGS="-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -shared-libasan" \
2020
NO_CUDA=1 DEBUG=1 \
2121
python setup.py install

.jenkins/pytorch/test.sh

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,17 +21,27 @@ popd
2121
# ASAN test is not working
2222
if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
2323
export ASAN_OPTIONS=detect_leaks=0:symbolize=1
24+
export UBSAN_OPTIONS=print_stacktrace=1
2425
export PYTORCH_TEST_WITH_ASAN=1
26+
export PYTORCH_TEST_WITH_UBSAN=1
2527
# TODO: Figure out how to avoid hard-coding these paths
2628
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-5.0/bin/llvm-symbolizer
2729
export LD_PRELOAD=/usr/lib/llvm-5.0/lib/clang/5.0.0/lib/linux/libclang_rt.asan-x86_64.so
2830
# Increase stack size, because ASAN red zones use more stack
2931
ulimit -s 81920
3032

33+
function get_exit_code() {
34+
set +e
35+
"$@"
36+
retcode=$?
37+
set -e
38+
return $retcode
39+
}
3140
(cd test && python -c "import torch")
32-
echo "The next two invocations are expected to crash; if they don't that means ASAN is misconfigured"
33-
(cd test && ! python -c "import torch; torch._C._crash_if_csrc_asan(3)")
34-
(cd test && ! python -c "import torch; torch._C._crash_if_aten_asan(3)")
41+
echo "The next three invocations are expected to crash; if they don't that means ASAN/UBSAN is misconfigured"
42+
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_csrc_asan(3)")
43+
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_csrc_ubsan(0)")
44+
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_aten_asan(3)")
3545
fi
3646

3747
export ATEN_DISABLE_AVX=

aten/src/ATen/Declarations.cwrap

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -828,6 +828,13 @@
828828
- arg: THTensor* self
829829
broadcast: other fallback
830830
- THTensor* other
831+
]]
832+
[[
833+
name: _th_min
834+
variants:
835+
- method
836+
- function
837+
options:
831838
- cname: min
832839
return: argument 0,1
833840
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
@@ -860,6 +867,13 @@
860867
- arg: THTensor* self
861868
broadcast: other fallback
862869
- THTensor* other
870+
]]
871+
[[
872+
name: _th_max
873+
variants:
874+
- method
875+
- function
876+
options:
863877
- cname: max
864878
return: argument 0,1
865879
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
@@ -875,12 +889,13 @@
875889
default: "false"
876890
]]
877891
[[
878-
name: kthvalue
892+
name: _th_kthvalue
879893
backends:
880894
- CPU
881895
variants:
882896
- method
883897
- function
898+
cname: kthvalue
884899
return: argument 0,1
885900
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
886901
arguments:
@@ -897,10 +912,11 @@
897912
default: "false"
898913
]]
899914
[[
900-
name: mode
915+
name: _th_mode
901916
variants:
902917
- method
903918
- function
919+
cname: mode
904920
return: argument 0,1
905921
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
906922
arguments:
@@ -926,6 +942,15 @@
926942
return: real
927943
arguments:
928944
- THTensor* self
945+
]]
946+
[[
947+
name: _th_median
948+
variants:
949+
- method
950+
- function
951+
cname: median
952+
return: argument 0,1
953+
options:
929954
- cname: median
930955
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
931956
arguments:

aten/src/ATen/SparseTensorImpl.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ void SparseTensorImpl::set_indices_and_values(const Tensor& indices, const Tenso
6262
// dimensions at the moment
6363
bool empty = values.numel() == 0;
6464
AT_CHECK(values.type().toSparse() == type(), "values type must match sparse tensor type");
65-
AT_CHECK(indices.type().scalarType() == kLong);
66-
AT_CHECK(indices.type().backend() == values.type().backend());
67-
AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device());
65+
AT_CHECK(indices.type().scalarType() == kLong, "indices must be an int64 tensor");
66+
AT_CHECK(indices.type().backend() == values.type().backend(), "backend of indices (", indices.type().backend(), ") must match backend of values (", values.type().backend(), ")");
67+
AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device(), "device of indices (", indices.get_device(), ") must match device of values (", values.get_device(), ")");
6868
if (!empty) {
6969
AT_CHECK(indices.dim() == 2, "indices must be nDim x nnz");
7070
AT_CHECK(indices.size(1) == values.size(0), "indices and values must have same nnz");

aten/src/ATen/TensorUtils.cpp

Lines changed: 69 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -20,32 +20,24 @@ std::ostream& operator<<(std::ostream & out, TensorGeometryArg t) {
2020
}
2121

2222
void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim) {
23-
if (t->dim() != dim) {
24-
std::ostringstream oss;
25-
oss << "Expected " << dim << "-dimensional tensor, but got "
26-
<< t->dim() << "-dimensional tensor for " << t
27-
<< " (while checking arguments for " << c << ")";
28-
throw std::runtime_error(oss.str());
29-
}
23+
AT_CHECK(t->dim() == dim,
24+
"Expected ", dim, "-dimensional tensor, but got ", t->dim(),
25+
"-dimensional tensor for ", t," (while checking arguments for ", c, ")");
3026
}
3127

3228
void checkDimRange(CheckedFrom c, const TensorGeometryArg& t, int64_t dim_start, int64_t dim_end) {
33-
if (t->dim() < dim_start || t->dim() >= dim_end) {
34-
std::ostringstream oss;
35-
oss << "Expected " << dim_start << " to " << (dim_end - 1) << " dimensions, but got "
36-
<< t->dim() << "-dimensional tensor for " << t
37-
<< " (while checking arguments for " << c << ")";
38-
throw std::runtime_error(oss.str());
39-
}
29+
AT_CHECK(
30+
t->dim() >= dim_start && t->dim() < dim_end,
31+
"Expected ", dim_start, " to ", (dim_end - 1), " dimensions, but got ",
32+
t->dim(), "-dimensional tensor for ", t, " (while checking arguments for ",
33+
c, ")");
4034
}
4135

4236
void checkContiguous(CheckedFrom c, const TensorGeometryArg& t) {
43-
if (!t->is_contiguous()) {
44-
std::ostringstream oss;
45-
oss << "Expected contiguous tensor, but got non-contiguous tensor for " << t
46-
<< " (while checking arguments for " << c << ")";
47-
throw std::runtime_error(oss.str());
48-
}
37+
AT_CHECK(
38+
t->is_contiguous(),
39+
"Expected contiguous tensor, but got non-contiguous tensor for ", t,
40+
" (while checking arguments for ", c, ")");
4941
}
5042

5143
void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts) {
@@ -57,23 +49,18 @@ void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts) {
5749

5850
void checkSize(CheckedFrom c, const TensorGeometryArg& t, IntList sizes) {
5951
checkDim(c, t, sizes.size());
60-
if (!t->sizes().equals(sizes)) {
61-
std::ostringstream oss;
62-
oss << "Expected tensor of size " << sizes << ", but got tensor of size "
63-
<< t->sizes() << " for " << t
64-
<< " (while checking arguments for " << c << ")";
65-
throw std::runtime_error(oss.str());
66-
}
52+
AT_CHECK(
53+
t->sizes().equals(sizes),
54+
"Expected tensor of size ", sizes, ", but got tensor of size ", t->sizes(),
55+
" for ", t, " (while checking arguments for ", c, ")");
6756
}
6857

6958
void checkSize(CheckedFrom c, const TensorGeometryArg& t, int64_t dim, int64_t size) {
70-
if (t->size(dim) != size) {
71-
std::ostringstream oss;
72-
oss << "Expected tensor to have size " << size << " at dimension " << dim
73-
<< ", but got size " << t->size(dim) << " for " << t
74-
<< " (while checking arguments for " << c << ")";
75-
throw std::runtime_error(oss.str());
76-
}
59+
AT_CHECK(
60+
t->size(dim) == size,
61+
"Expected tensor to have size ", size, " at dimension ", dim,
62+
", but got size ", t->size(dim), " for ", t,
63+
" (while checking arguments for ", c, ")");
7764
}
7865

7966
void checkAllSame(CheckedFrom c, ArrayRef<TensorArg> tensors, void(*fn)(CheckedFrom, const TensorArg&, const TensorArg&)) {
@@ -89,37 +76,32 @@ void checkAllSame(CheckedFrom c, ArrayRef<TensorArg> tensors, void(*fn)(CheckedF
8976
}
9077

9178
void checkSameSize(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
92-
if (!t1->sizes().equals(t2->sizes())) {
93-
std::ostringstream oss;
94-
oss << "Expected tensor for " << t1 << " to have same size as tensor for "
95-
<< t2 << "; but " << t1->sizes() << " does not equal " << t2->sizes()
96-
<< " (while checking arguments for " << c << ")";
97-
throw std::runtime_error(oss.str());
98-
}
79+
AT_CHECK(
80+
t1->sizes().equals(t2->sizes()),
81+
"Expected tensor for ", t1, " to have same size as tensor for ", t2,
82+
"; but ", t1->sizes(), " does not equal ", t2->sizes(),
83+
" (while checking arguments for ", c, ")");
9984
}
10085

10186
void checkAllSameSize(CheckedFrom c, ArrayRef<TensorArg> tensors) {
10287
checkAllSame(c, tensors, checkSameSize);
10388
}
10489

10590
void checkNumel(CheckedFrom c, const TensorGeometryArg& t, int64_t numel) {
106-
if (t->numel() != numel) {
107-
std::ostringstream oss;
108-
oss << "Expected tensor for " << t << " to have "
109-
<< numel << " elements; but it actually has " << t->numel() << " elements"
110-
<< " (while checking arguments for " << c << ")";
111-
throw std::runtime_error(oss.str());
112-
}
91+
AT_CHECK(
92+
t->numel() == numel,
93+
"Expected tensor for ", t, " to have ", numel,
94+
" elements; but it actually has ", t->numel(), " elements",
95+
" (while checking arguments for ", c, ")");
11396
}
11497

11598
void checkSameNumel(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
116-
if (t1->numel() != t2->numel()) {
117-
std::ostringstream oss;
118-
oss << "Expected tensor for " << t1 << " to have same number of elements as tensor for "
119-
<< t2 << "; but " << t1->numel() << " does not equal " << t2->numel()
120-
<< " (while checking arguments for " << c << ")";
121-
throw std::runtime_error(oss.str());
122-
}
99+
AT_CHECK(
100+
t1->numel() == t2->numel(),
101+
"Expected tensor for ", t1,
102+
" to have same number of elements as tensor for ", t2, "; but ",
103+
t1->numel(), " does not equal ", t2->numel(),
104+
" (while checking arguments for ", c, ")");
123105
}
124106

125107
void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors) {
@@ -136,42 +118,34 @@ void checkSameGPU(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
136118
oss << "Tensor for " << t2 << " is on CPU, ";
137119
}
138120
oss << "but expected " << ((!(t1->is_cuda() || t2->is_cuda())) ? "them" : "it")
139-
<< " to be on GPU (while checking arguments for " << c << ")";
140-
throw std::runtime_error(oss.str());
141-
}
142-
if (t1->get_device() != t2->get_device()) {
143-
std::ostringstream oss;
144-
oss << "Expected tensor for " << t1 << " to have the same device as "
145-
<< "tensor for " << t2 << "; but device " << t1->get_device() << " "
146-
<< "does not equal " << t2->get_device()
147-
<< " (while checking arguments for " << c << ")";
148-
throw std::runtime_error(oss.str());
121+
<< " to be on GPU (while checking arguments for " << c << ")";
122+
AT_ERROR(oss.str());
149123
}
124+
AT_CHECK(
125+
t1->get_device() == t2->get_device(),
126+
"Expected tensor for ", t1, " to have the same device as tensor for ", t2,
127+
"; but device ", t1->get_device(), " does not equal ", t2->get_device(),
128+
" (while checking arguments for ", c, ")");
150129
}
151130

152131
void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors) {
153132
checkAllSame(c, tensors, checkSameGPU);
154133
}
155134

156135
void checkSameType(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
157-
if (t1->type() != t2->type()) {
158-
std::ostringstream oss;
159-
oss << "Expected tensor for " << t1 << " to have the same type as "
160-
<< "tensor for " << t2 << "; but type " << t1->toString() << " "
161-
<< "does not equal " << t2->toString()
162-
<< " (while checking arguments for " << c << ")";
163-
throw std::runtime_error(oss.str());
164-
}
136+
AT_CHECK(
137+
t1->type() == t2->type(),
138+
"Expected tensor for ", t1, " to have the same type as tensor for ", t2,
139+
"; but type ", t1->toString(), " does not equal ", t2->toString(),
140+
" (while checking arguments for ", c, ")");
165141
}
166142

167143
void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType ty) {
168-
if (t->type().scalarType() != ty) {
169-
std::ostringstream oss;
170-
oss << "Expected tensor for " << t << " to have scalar type "
171-
<< toString(ty) << "; but got " << t->toString()
172-
<< " instead (while checking arguments for " << c << ")";
173-
throw std::runtime_error(oss.str());
174-
}
144+
AT_CHECK(
145+
t->type().scalarType() == ty,
146+
"Expected tensor for ", t, " to have scalar type ", toString(ty),
147+
"; but got ", t->toString(), " instead (while checking arguments for ", c,
148+
")");
175149
}
176150

177151
void checkScalarTypes(CheckedFrom c, const TensorArg& t,
@@ -190,7 +164,7 @@ void checkScalarTypes(CheckedFrom c, const TensorArg& t,
190164
}
191165
oss << "; but got " << t->toString()
192166
<< " instead (while checking arguments for " << c << ")";
193-
throw std::runtime_error(oss.str());
167+
AT_ERROR(oss.str());
194168
}
195169
}
196170

@@ -199,24 +173,18 @@ void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors) {
199173
}
200174

201175
void checkSameDim(CheckedFrom c, const TensorGeometryArg& t1, const TensorGeometryArg& t2) {
202-
if (t1->dim() != t2->dim()) {
203-
std::ostringstream oss;
204-
oss << "Expected tensor for " << t1 << " to have the same dimension as "
205-
<< "tensor for " << t2 << "; but " << t1->dim() << " "
206-
<< "does not equal " << t2->dim()
207-
<< " (while checking arguments for " << c << ")";
208-
throw std::runtime_error(oss.str());
209-
}
176+
AT_CHECK(
177+
t1->dim() == t2->dim(),
178+
"Expected tensor for ", t1, " to have the same dimension as tensor for ",
179+
t2, "; but ", t1->dim(), " does not equal ", t2->dim(),
180+
" (while checking arguments for ", c, ")");
210181
}
211182

212183
void checkDefined(CheckedFrom c, const TensorArg& t) {
213-
if (!t->defined()) {
214-
std::ostringstream oss;
215-
oss << "Expected tensor for " << t << " to be non-null, "
216-
<< "but it was undefined "
217-
<< " (while checking arguments for " << c << ")";
218-
throw std::runtime_error(oss.str());
219-
}
184+
AT_CHECK(
185+
t->defined(),
186+
"Expected tensor for ", t, " to be non-null, but it was undefined ",
187+
" (while checking arguments for ", c, ")");
220188
}
221189

222190
void checkAllDefined(CheckedFrom c, ArrayRef<TensorArg> ts) {
@@ -227,13 +195,11 @@ void checkAllDefined(CheckedFrom c, ArrayRef<TensorArg> ts) {
227195
}
228196

229197
void checkBackend(CheckedFrom c, const Tensor& t, Backend backend) {
230-
if (t.type().backend() != backend) {
231-
std::ostringstream oss;
232-
oss << "Expected tensor to have " << toString(backend) << " Backend, but got tensor with "
233-
<< toString(t.type().backend()) << " Backend "
234-
<< "(while checking arguments for " << c << ")";
235-
throw std::runtime_error(oss.str());
236-
}
198+
AT_CHECK(
199+
t.type().backend() == backend,
200+
"Expected tensor to have ", toString(backend),
201+
" Backend, but got tensor with ", toString(t.type().backend()), " Backend ",
202+
"(while checking arguments for ", c, ")");
237203
}
238204

239205
void checkBackend(CheckedFrom c, ArrayRef<Tensor> tensors, at::Backend backend) {

aten/src/ATen/Utils.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,16 @@
1010
#include <typeinfo>
1111
#include <numeric>
1212

13+
#if defined(__clang__)
14+
#define __ubsan_ignore_float_divide_by_zero__ __attribute__((no_sanitize("float-divide-by-zero")))
15+
#define __ubsan_ignore_function__ __attribute__((no_sanitize("function")))
16+
#define __ubsan_ignore_vptr__ __attribute__((no_sanitize("vptr")))
17+
#else
18+
#define __ubsan_ignore_float_divide_by_zero__
19+
#define __ubsan_ignore_function__
20+
#define __ubsan_ignore_vptr__
21+
#endif
22+
1323
namespace at {
1424

1525
AT_API int _crash_if_asan(int);

0 commit comments

Comments
 (0)