Skip to content

Merge from pytorch upstream #18

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 23 commits into from
Jul 6, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
e09d993
Move easy THStorage/THCStorage functions out of generic (#9136)
ezyang Jul 3, 2018
4f89777
Removing extraneous main function to fix buck test detection (#9121)
pjh5 Jul 3, 2018
f17b9e4
Fix boolean indexing. (#8920)
gchanan Jul 3, 2018
b432837
Add some missing error checks in sparse. (#9140)
ezyang Jul 3, 2018
b479494
loss plugin: Fix indexing into a scalar (#9143)
Jul 3, 2018
e3dbdb2
Fix the comments: code and comments dimensions mis-match (#9070)
nkhuyu Jul 3, 2018
49f88ac
Add grid lines for activation images, fixes #9130 (#9134)
vishwakftw Jul 4, 2018
4b2b690
Install THC/THCGeneral.hpp (#9159)
pietern Jul 4, 2018
08daed4
Fix bug in flip() (#9156)
vishwakftw Jul 4, 2018
f6027bb
Install hpp headers for CPP Extensions (#9182)
fmassa Jul 5, 2018
14cbd9a
Implement torch.pinverse : Pseudo-inverse (#9052)
vishwakftw Jul 5, 2018
1c9073b
Allow passing '0' to NO_MULTIPROCESSING_SPAWN (#9187)
Jul 5, 2018
213540c
Add meshgrid to PyTorch (#8581)
zasdfgbnm Jul 5, 2018
f45dfbc
Add support for ArgMax and ArgMin in C2 onnx backend and frontend (#9…
houseroad Jul 5, 2018
21c420c
Remove unused RowwiseArgMaxOp (#9119)
xiaomengy Jul 5, 2018
ff501c3
Turn on UBSAN in the OSS build (#8813)
Jul 5, 2018
a769fae
Fix TestAutograd.test_pinverse not actually testing (#9192)
ssnl Jul 6, 2018
7b25cbb
Test nn.Module on non-contiguous inputs (#9114)
ssnl Jul 6, 2018
eadc507
Use torch.save in _StorageBase.__reduce__ (#9184)
mrocklin Jul 6, 2018
1f1fb81
Use a static random_device in StorageSharing (#9080)
apaszke Jul 6, 2018
168a29f
Create native wrappers around dimension reduction functions. (#9197)
gchanan Jul 6, 2018
84884dc
Allow passing '0' to ASAN/UBSAN flags (#9202)
Jul 6, 2018
d28c837
Merge remote-tracking branch 'upstream/master'
iotamudelta Jul 6, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .jenkins/pytorch/build-asan.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ export ASAN_OPTIONS=detect_leaks=0:symbolize=1

# TODO: Make the ASAN flags a more unified env var
CC="clang" CXX="clang++" LDSHARED="clang --shared" \
CFLAGS="-fsanitize=address -shared-libasan" \
CFLAGS="-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -shared-libasan" \
NO_CUDA=1 DEBUG=1 \
python setup.py install
16 changes: 13 additions & 3 deletions .jenkins/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,27 @@ popd
# ASAN test is not working
if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
export ASAN_OPTIONS=detect_leaks=0:symbolize=1
export UBSAN_OPTIONS=print_stacktrace=1
export PYTORCH_TEST_WITH_ASAN=1
export PYTORCH_TEST_WITH_UBSAN=1
# TODO: Figure out how to avoid hard-coding these paths
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-5.0/bin/llvm-symbolizer
export LD_PRELOAD=/usr/lib/llvm-5.0/lib/clang/5.0.0/lib/linux/libclang_rt.asan-x86_64.so
# Increase stack size, because ASAN red zones use more stack
ulimit -s 81920

function get_exit_code() {
set +e
"$@"
retcode=$?
set -e
return $retcode
}
(cd test && python -c "import torch")
echo "The next two invocations are expected to crash; if they don't that means ASAN is misconfigured"
(cd test && ! python -c "import torch; torch._C._crash_if_csrc_asan(3)")
(cd test && ! python -c "import torch; torch._C._crash_if_aten_asan(3)")
echo "The next three invocations are expected to crash; if they don't that means ASAN/UBSAN is misconfigured"
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_csrc_asan(3)")
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_csrc_ubsan(0)")
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_aten_asan(3)")
fi

export ATEN_DISABLE_AVX=
Expand Down
29 changes: 27 additions & 2 deletions aten/src/ATen/Declarations.cwrap
Original file line number Diff line number Diff line change
Expand Up @@ -828,6 +828,13 @@
- arg: THTensor* self
broadcast: other fallback
- THTensor* other
]]
[[
name: _th_min
variants:
- method
- function
options:
- cname: min
return: argument 0,1
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
Expand Down Expand Up @@ -860,6 +867,13 @@
- arg: THTensor* self
broadcast: other fallback
- THTensor* other
]]
[[
name: _th_max
variants:
- method
- function
options:
- cname: max
return: argument 0,1
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
Expand All @@ -875,12 +889,13 @@
default: "false"
]]
[[
name: kthvalue
name: _th_kthvalue
backends:
- CPU
variants:
- method
- function
cname: kthvalue
return: argument 0,1
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
arguments:
Expand All @@ -897,10 +912,11 @@
default: "false"
]]
[[
name: mode
name: _th_mode
variants:
- method
- function
cname: mode
return: argument 0,1
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
arguments:
Expand All @@ -926,6 +942,15 @@
return: real
arguments:
- THTensor* self
]]
[[
name: _th_median
variants:
- method
- function
cname: median
return: argument 0,1
options:
- cname: median
scalar_check: self_->isScalar() || (keepdim == false && self_->dim() == 1)
arguments:
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/SparseTensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,9 @@ void SparseTensorImpl::set_indices_and_values(const Tensor& indices, const Tenso
// dimensions at the moment
bool empty = values.numel() == 0;
AT_CHECK(values.type().toSparse() == type(), "values type must match sparse tensor type");
AT_CHECK(indices.type().scalarType() == kLong);
AT_CHECK(indices.type().backend() == values.type().backend());
AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device());
AT_CHECK(indices.type().scalarType() == kLong, "indices must be an int64 tensor");
AT_CHECK(indices.type().backend() == values.type().backend(), "backend of indices (", indices.type().backend(), ") must match backend of values (", values.type().backend(), ")");
AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device(), "device of indices (", indices.get_device(), ") must match device of values (", values.get_device(), ")");
if (!empty) {
AT_CHECK(indices.dim() == 2, "indices must be nDim x nnz");
AT_CHECK(indices.size(1) == values.size(0), "indices and values must have same nnz");
Expand Down
172 changes: 69 additions & 103 deletions aten/src/ATen/TensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,32 +20,24 @@ std::ostream& operator<<(std::ostream & out, TensorGeometryArg t) {
}

void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim) {
if (t->dim() != dim) {
std::ostringstream oss;
oss << "Expected " << dim << "-dimensional tensor, but got "
<< t->dim() << "-dimensional tensor for " << t
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(t->dim() == dim,
"Expected ", dim, "-dimensional tensor, but got ", t->dim(),
"-dimensional tensor for ", t," (while checking arguments for ", c, ")");
}

void checkDimRange(CheckedFrom c, const TensorGeometryArg& t, int64_t dim_start, int64_t dim_end) {
if (t->dim() < dim_start || t->dim() >= dim_end) {
std::ostringstream oss;
oss << "Expected " << dim_start << " to " << (dim_end - 1) << " dimensions, but got "
<< t->dim() << "-dimensional tensor for " << t
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t->dim() >= dim_start && t->dim() < dim_end,
"Expected ", dim_start, " to ", (dim_end - 1), " dimensions, but got ",
t->dim(), "-dimensional tensor for ", t, " (while checking arguments for ",
c, ")");
}

void checkContiguous(CheckedFrom c, const TensorGeometryArg& t) {
if (!t->is_contiguous()) {
std::ostringstream oss;
oss << "Expected contiguous tensor, but got non-contiguous tensor for " << t
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t->is_contiguous(),
"Expected contiguous tensor, but got non-contiguous tensor for ", t,
" (while checking arguments for ", c, ")");
}

void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts) {
Expand All @@ -57,23 +49,18 @@ void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts) {

void checkSize(CheckedFrom c, const TensorGeometryArg& t, IntList sizes) {
checkDim(c, t, sizes.size());
if (!t->sizes().equals(sizes)) {
std::ostringstream oss;
oss << "Expected tensor of size " << sizes << ", but got tensor of size "
<< t->sizes() << " for " << t
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t->sizes().equals(sizes),
"Expected tensor of size ", sizes, ", but got tensor of size ", t->sizes(),
" for ", t, " (while checking arguments for ", c, ")");
}

void checkSize(CheckedFrom c, const TensorGeometryArg& t, int64_t dim, int64_t size) {
if (t->size(dim) != size) {
std::ostringstream oss;
oss << "Expected tensor to have size " << size << " at dimension " << dim
<< ", but got size " << t->size(dim) << " for " << t
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t->size(dim) == size,
"Expected tensor to have size ", size, " at dimension ", dim,
", but got size ", t->size(dim), " for ", t,
" (while checking arguments for ", c, ")");
}

void checkAllSame(CheckedFrom c, ArrayRef<TensorArg> tensors, void(*fn)(CheckedFrom, const TensorArg&, const TensorArg&)) {
Expand All @@ -89,37 +76,32 @@ void checkAllSame(CheckedFrom c, ArrayRef<TensorArg> tensors, void(*fn)(CheckedF
}

void checkSameSize(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
if (!t1->sizes().equals(t2->sizes())) {
std::ostringstream oss;
oss << "Expected tensor for " << t1 << " to have same size as tensor for "
<< t2 << "; but " << t1->sizes() << " does not equal " << t2->sizes()
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t1->sizes().equals(t2->sizes()),
"Expected tensor for ", t1, " to have same size as tensor for ", t2,
"; but ", t1->sizes(), " does not equal ", t2->sizes(),
" (while checking arguments for ", c, ")");
}

void checkAllSameSize(CheckedFrom c, ArrayRef<TensorArg> tensors) {
checkAllSame(c, tensors, checkSameSize);
}

void checkNumel(CheckedFrom c, const TensorGeometryArg& t, int64_t numel) {
if (t->numel() != numel) {
std::ostringstream oss;
oss << "Expected tensor for " << t << " to have "
<< numel << " elements; but it actually has " << t->numel() << " elements"
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t->numel() == numel,
"Expected tensor for ", t, " to have ", numel,
" elements; but it actually has ", t->numel(), " elements",
" (while checking arguments for ", c, ")");
}

void checkSameNumel(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
if (t1->numel() != t2->numel()) {
std::ostringstream oss;
oss << "Expected tensor for " << t1 << " to have same number of elements as tensor for "
<< t2 << "; but " << t1->numel() << " does not equal " << t2->numel()
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t1->numel() == t2->numel(),
"Expected tensor for ", t1,
" to have same number of elements as tensor for ", t2, "; but ",
t1->numel(), " does not equal ", t2->numel(),
" (while checking arguments for ", c, ")");
}

void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors) {
Expand All @@ -136,42 +118,34 @@ void checkSameGPU(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
oss << "Tensor for " << t2 << " is on CPU, ";
}
oss << "but expected " << ((!(t1->is_cuda() || t2->is_cuda())) ? "them" : "it")
<< " to be on GPU (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
if (t1->get_device() != t2->get_device()) {
std::ostringstream oss;
oss << "Expected tensor for " << t1 << " to have the same device as "
<< "tensor for " << t2 << "; but device " << t1->get_device() << " "
<< "does not equal " << t2->get_device()
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
<< " to be on GPU (while checking arguments for " << c << ")";
AT_ERROR(oss.str());
}
AT_CHECK(
t1->get_device() == t2->get_device(),
"Expected tensor for ", t1, " to have the same device as tensor for ", t2,
"; but device ", t1->get_device(), " does not equal ", t2->get_device(),
" (while checking arguments for ", c, ")");
}

void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors) {
checkAllSame(c, tensors, checkSameGPU);
}

void checkSameType(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
if (t1->type() != t2->type()) {
std::ostringstream oss;
oss << "Expected tensor for " << t1 << " to have the same type as "
<< "tensor for " << t2 << "; but type " << t1->toString() << " "
<< "does not equal " << t2->toString()
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t1->type() == t2->type(),
"Expected tensor for ", t1, " to have the same type as tensor for ", t2,
"; but type ", t1->toString(), " does not equal ", t2->toString(),
" (while checking arguments for ", c, ")");
}

void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType ty) {
if (t->type().scalarType() != ty) {
std::ostringstream oss;
oss << "Expected tensor for " << t << " to have scalar type "
<< toString(ty) << "; but got " << t->toString()
<< " instead (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t->type().scalarType() == ty,
"Expected tensor for ", t, " to have scalar type ", toString(ty),
"; but got ", t->toString(), " instead (while checking arguments for ", c,
")");
}

void checkScalarTypes(CheckedFrom c, const TensorArg& t,
Expand All @@ -190,7 +164,7 @@ void checkScalarTypes(CheckedFrom c, const TensorArg& t,
}
oss << "; but got " << t->toString()
<< " instead (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
AT_ERROR(oss.str());
}
}

Expand All @@ -199,24 +173,18 @@ void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors) {
}

void checkSameDim(CheckedFrom c, const TensorGeometryArg& t1, const TensorGeometryArg& t2) {
if (t1->dim() != t2->dim()) {
std::ostringstream oss;
oss << "Expected tensor for " << t1 << " to have the same dimension as "
<< "tensor for " << t2 << "; but " << t1->dim() << " "
<< "does not equal " << t2->dim()
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t1->dim() == t2->dim(),
"Expected tensor for ", t1, " to have the same dimension as tensor for ",
t2, "; but ", t1->dim(), " does not equal ", t2->dim(),
" (while checking arguments for ", c, ")");
}

void checkDefined(CheckedFrom c, const TensorArg& t) {
if (!t->defined()) {
std::ostringstream oss;
oss << "Expected tensor for " << t << " to be non-null, "
<< "but it was undefined "
<< " (while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t->defined(),
"Expected tensor for ", t, " to be non-null, but it was undefined ",
" (while checking arguments for ", c, ")");
}

void checkAllDefined(CheckedFrom c, ArrayRef<TensorArg> ts) {
Expand All @@ -227,13 +195,11 @@ void checkAllDefined(CheckedFrom c, ArrayRef<TensorArg> ts) {
}

void checkBackend(CheckedFrom c, const Tensor& t, Backend backend) {
if (t.type().backend() != backend) {
std::ostringstream oss;
oss << "Expected tensor to have " << toString(backend) << " Backend, but got tensor with "
<< toString(t.type().backend()) << " Backend "
<< "(while checking arguments for " << c << ")";
throw std::runtime_error(oss.str());
}
AT_CHECK(
t.type().backend() == backend,
"Expected tensor to have ", toString(backend),
" Backend, but got tensor with ", toString(t.type().backend()), " Backend ",
"(while checking arguments for ", c, ")");
}

void checkBackend(CheckedFrom c, ArrayRef<Tensor> tensors, at::Backend backend) {
Expand Down
10 changes: 10 additions & 0 deletions aten/src/ATen/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,16 @@
#include <typeinfo>
#include <numeric>

#if defined(__clang__)
#define __ubsan_ignore_float_divide_by_zero__ __attribute__((no_sanitize("float-divide-by-zero")))
#define __ubsan_ignore_function__ __attribute__((no_sanitize("function")))
#define __ubsan_ignore_vptr__ __attribute__((no_sanitize("vptr")))
#else
#define __ubsan_ignore_float_divide_by_zero__
#define __ubsan_ignore_function__
#define __ubsan_ignore_vptr__
#endif

namespace at {

AT_API int _crash_if_asan(int);
Expand Down
Loading