Skip to content

Commit b40c0bb

Browse files
authored
Merge pull request #157 from iotamudelta/ifu
Merge from upstream
2 parents 1245c83 + 67097e9 commit b40c0bb

File tree

322 files changed

+4690
-2028
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

322 files changed

+4690
-2028
lines changed

.jenkins/pytorch/build.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@ fi
66

77
# TODO: move this to Docker
88
# TODO: add both NCCL and MPI in CI test by fixing these test first
9-
# sudo apt-get update
10-
# sudo apt-get install libnccl-dev libnccl2
9+
sudo apt-get update
10+
sudo apt-get install libnccl-dev libnccl2
1111
# sudo apt-get install openmpi-bin libopenmpi-dev
1212

1313
# Required environment variable: $BUILD_ENVIRONMENT

CMakeLists.txt

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ option(USE_DISTRIBUTED "Use THD (distributed)" OFF)
144144
# Used when building Caffe2 through setup.py
145145
option(BUILDING_WITH_TORCH_LIBS "Tell cmake if Caffe2 is being built alongside torch libs" OFF)
146146

147-
if (ANDROID OR IOS)
147+
if (ANDROID OR IOS)
148148
set(BUILD_ATEN_MOBILE ON)
149149
endif()
150150

@@ -213,7 +213,9 @@ if(NOT MSVC)
213213
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-strict-overflow")
214214
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-strict-aliasing")
215215
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
216-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-stringop-overflow")
216+
if (CMAKE_COMPILER_IS_GNUCXX AND NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0.0))
217+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-stringop-overflow")
218+
endif()
217219
# These flags are not available in GCC-4.8.5. Set only when using clang.
218220
# Compared against https://gcc.gnu.org/onlinedocs/gcc-4.8.5/gcc/Option-Summary.html
219221
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ If you are not familiar with creating a Pull Request, here are some guides:
2323

2424
To develop PyTorch on your machine, here are some tips:
2525

26-
1. Uninstall all existing pytorch installs
26+
1. Uninstall all existing PyTorch installs:
2727
```
2828
conda uninstall pytorch
2929
pip uninstall torch

README.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,7 @@ We hope you never spend hours debugging your code because of bad stack traces or
105105
PyTorch has minimal framework overhead. We integrate acceleration libraries
106106
such as Intel MKL and NVIDIA (cuDNN, NCCL) to maximize speed.
107107
At the core, its CPU and GPU Tensor and neural network backends
108-
(TH, THC, THNN, THCUNN) are written as independent libraries with a C99 API.
109-
They are mature and have been tested for years.
108+
(TH, THC, THNN, THCUNN) are mature and have been tested for years.
110109

111110
Hence, PyTorch is quite fast – whether you run small or large neural networks.
112111

aten/src/ATen/Formatting.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
244244
if(!tensor_.defined()) {
245245
stream << "[ Tensor (undefined) ]";
246246
} else if (tensor_.is_sparse()) {
247-
stream << "[ " << tensor_.pImpl->toString() << "{}\n";
247+
stream << "[ " << tensor_.toString() << "{}\n";
248248
stream << "indices:\n" << tensor_._indices() << "\n";
249249
stream << "values:\n" << tensor_._values() << "\n";
250250
stream << "size:\n" << tensor_.sizes() << "\n";
@@ -254,7 +254,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
254254
Tensor tensor = tensor_.toType(cpudouble).contiguous();
255255
if(tensor.ndimension() == 0) {
256256
stream << defaultfloat << tensor.data<double>()[0] << std::endl;
257-
stream << "[ " << tensor_.pImpl->toString() << "{} ]";
257+
stream << "[ " << tensor_.toString() << "{} ]";
258258
} else if(tensor.ndimension() == 1) {
259259
if (tensor.numel() > 0) {
260260
double scale;
@@ -268,17 +268,17 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
268268
stream << std::setw(sz) << tensor_p[i]/scale << std::endl;
269269
}
270270
}
271-
stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0) << "} ]";
271+
stream << "[ " << tensor_.toString() << "{" << tensor.size(0) << "} ]";
272272
} else if(tensor.ndimension() == 2) {
273273
if (tensor.numel() > 0) {
274274
__printMatrix(stream, tensor, linesize, 0);
275275
}
276-
stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0) << "," << tensor.size(1) << "} ]";
276+
stream << "[ " << tensor_.toString() << "{" << tensor.size(0) << "," << tensor.size(1) << "} ]";
277277
} else {
278278
if (tensor.numel() > 0) {
279279
__printTensor(stream, tensor, linesize);
280280
}
281-
stream << "[ " << tensor_.pImpl->toString() << "{" << tensor.size(0);
281+
stream << "[ " << tensor_.toString() << "{" << tensor.size(0);
282282
for(int64_t i = 1; i < tensor.ndimension(); i++) {
283283
stream << "," << tensor.size(i);
284284
}

aten/src/ATen/Formatting.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ static inline void print(const Tensor & t, int64_t linesize=80) {
1818
}
1919

2020
static inline std::ostream& operator<<(std::ostream & out, Scalar s) {
21-
s = s.local();
2221
return out << (s.isFloatingPoint() ? s.toDouble() : s.toLong());
2322
}
2423

aten/src/ATen/Retainable.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@
22

33
#include <atomic>
44

5+
#include "ATen/core/ATenGeneral.h"
6+
57
namespace at {
68

79
// base class for refcounted things, allows for collects of generic
810
// refcounted objects that include tensors
9-
struct Retainable {
11+
struct AT_API Retainable {
1012
Retainable(): refcount(1), weak_refcount(1) {}
1113
void retain() {
1214
++refcount;

aten/src/ATen/Scalar.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,6 @@ Tensor Scalar::toTensor() const {
2121
}
2222
}
2323

24-
Scalar Scalar::local() const {
25-
return *this;
26-
}
27-
2824
Scalar Scalar::operator-() const {
2925
if (isFloatingPoint()) {
3026
return Scalar(-v.d);

aten/src/ATen/Scalar.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,6 @@ class AT_API Scalar {
2828

2929
#undef DEFINE_IMPLICIT_CTOR
3030

31-
// return a new scalar that is guarenteed to be not backed by a tensor.
32-
Scalar local() const;
33-
3431
#define DEFINE_ACCESSOR(type,name,member) \
3532
type to##name () const { \
3633
if (Tag::HAS_d == tag) { \

aten/src/ATen/SparseTensorImpl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ TensorImpl* SparseTensorImpl::maybe_zero_dim(bool condition_when_zero_dim) {
7575
" changing dimensionality via maybe_zero_dim");
7676
return this;
7777
}
78-
const Storage& SparseTensorImpl::storage() {
78+
const Storage& SparseTensorImpl::storage() const {
7979
AT_ERROR("sparse tensors do not have storage");
8080
}
8181
int64_t SparseTensorImpl::storage_offset() const {

aten/src/ATen/SparseTensorImpl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ struct AT_API SparseTensorImpl : public TensorImpl {
5757

5858
int64_t dim() const override;
5959
TensorImpl* maybe_zero_dim(bool condition_when_zero_dim) override;
60-
const Storage& storage() override;
60+
const Storage& storage() const override;
6161
int64_t storage_offset() const override;
6262

6363
// WARNING: This function does NOT preserve invariants of sparseDims/denseDims with

aten/src/ATen/Storage.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,11 @@ Storage::Storage(
88
size_t size,
99
Allocator* allocator,
1010
bool resizable)
11-
: storage_impl_(c10::make_intrusive<StorageImpl>(scalar_type, size, allocator, resizable)) {}
11+
: storage_impl_(c10::make_intrusive<StorageImpl>(
12+
at::scalarTypeToDataType(scalar_type),
13+
size,
14+
allocator,
15+
resizable)) {}
1216

1317
Storage::Storage(
1418
at::ScalarType scalar_type,
@@ -17,7 +21,7 @@ Storage::Storage(
1721
const std::function<void(void*)>& deleter,
1822
bool resizable)
1923
: storage_impl_(c10::make_intrusive<StorageImpl>(
20-
scalar_type,
24+
at::scalarTypeToDataType(scalar_type),
2125
size,
2226
std::move(data_ptr),
2327
/* allocator */ nullptr,

aten/src/ATen/Storage.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,12 @@ struct AT_API Storage {
3131
bool resizable() const { return storage_impl_->resizable(); }
3232
// get() use here is to get const-correctness
3333
void* data() const { return storage_impl_.get()->data(); }
34+
const at::DataType dtype() const {
35+
return storage_impl_->dtype();
36+
}
3437
const at::DataPtr& data_ptr() const { return storage_impl_->data_ptr(); }
3538
DeviceType device_type() const { return storage_impl_->device_type(); }
3639
at::Allocator* allocator() const { return storage_impl_.get()->allocator(); }
37-
at::ScalarType scalar_type() const { return storage_impl_->scalar_type(); }
3840
at::Device device() const { return storage_impl_->device(); }
3941

4042
StorageImpl* unsafeReleaseStorageImpl() {

aten/src/ATen/StorageImpl.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,27 +4,28 @@
44
namespace at {
55

66
StorageImpl::StorageImpl(
7-
at::ScalarType scalar_type,
7+
at::DataType data_type,
88
ptrdiff_t size,
99
at::DataPtr data_ptr,
1010
at::Allocator* allocator,
1111
bool resizable)
12-
: scalar_type_(scalar_type),
12+
: data_type_(data_type),
1313
data_ptr_(std::move(data_ptr)),
1414
size_(size),
1515
resizable_(resizable),
1616
allocator_(allocator),
1717
finalizer_(nullptr) {}
1818

1919
StorageImpl::StorageImpl(
20-
at::ScalarType scalar_type,
20+
at::DataType data_type,
2121
ptrdiff_t size,
2222
at::Allocator* allocator,
2323
bool resizable)
2424
: StorageImpl(
25-
scalar_type,
25+
data_type,
2626
size,
27-
allocator->allocate(at::elementSize(scalar_type) * size),
27+
allocator->allocate(
28+
at::elementSize(dataTypeToScalarType(data_type)) * size),
2829
allocator,
2930
resizable) {}
3031

aten/src/ATen/StorageImpl.h

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -18,18 +18,18 @@ namespace at {
1818

1919
struct Type;
2020

21-
struct AT_API StorageImpl : public c10::raw_intrusive_ptr_target<StorageImpl> {
21+
struct AT_API StorageImpl : public c10::intrusive_ptr_target {
2222
public:
2323
StorageImpl() = delete;
2424
virtual ~StorageImpl() {};
2525
StorageImpl(
26-
at::ScalarType scalar_type,
26+
at::DataType data_type,
2727
ptrdiff_t size,
2828
at::DataPtr data_ptr,
2929
at::Allocator* allocator,
3030
bool resizable);
3131
StorageImpl(
32-
at::ScalarType scalar_type,
32+
at::DataType data_type,
3333
ptrdiff_t size,
3434
at::Allocator* allocator,
3535
bool resizable);
@@ -43,13 +43,14 @@ struct AT_API StorageImpl : public c10::raw_intrusive_ptr_target<StorageImpl> {
4343
// the real data shouldn't call th::from_type
4444
template <typename T>
4545
inline T* data() const {
46-
auto scalar_type_T = at::CTypeToScalarType<th::from_type<T>>::to();
47-
if (scalar_type_ != scalar_type_T) {
46+
auto data_type_T =
47+
at::scalarTypeToDataType(at::CTypeToScalarType<th::from_type<T>>::to());
48+
if (dtype() != data_type_T) {
4849
AT_ERROR(
4950
"Attempt to access StorageImpl having data type ",
50-
at::toString(scalar_type_),
51+
dtype(),
5152
" as data type ",
52-
at::toString(scalar_type_T));
53+
data_type_T);
5354
}
5455
return unsafe_data<T>();
5556
}
@@ -70,7 +71,7 @@ struct AT_API StorageImpl : public c10::raw_intrusive_ptr_target<StorageImpl> {
7071
void operator=(const StorageImpl&) = delete;
7172

7273
size_t elementSize() const {
73-
return at::elementSize(scalar_type_);
74+
return at::elementSize(dataTypeToScalarType(data_type_));
7475
}
7576

7677
Type& type();
@@ -108,9 +109,9 @@ struct AT_API StorageImpl : public c10::raw_intrusive_ptr_target<StorageImpl> {
108109
at::Allocator* allocator() {
109110
return allocator_;
110111
};
111-
at::ScalarType scalar_type() const {
112-
return scalar_type_;
113-
};
112+
const DataType dtype() const {
113+
return data_type_;
114+
}
114115
const at::Allocator* allocator() const {
115116
return allocator_;
116117
};
@@ -129,7 +130,7 @@ struct AT_API StorageImpl : public c10::raw_intrusive_ptr_target<StorageImpl> {
129130
}
130131

131132
private:
132-
at::ScalarType scalar_type_;
133+
at::DataType data_type_;
133134
at::DataPtr data_ptr_;
134135
ptrdiff_t size_;
135136
bool resizable_;

0 commit comments

Comments
 (0)