Skip to content

Commit 6868c98

Browse files
authored
Merge pull request #196 from iotamudelta/ifu
Merge from upstream
2 parents f922d21 + 6d0674e commit 6868c98

File tree

196 files changed

+4814
-2262
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

196 files changed

+4814
-2262
lines changed

.jenkins/caffe2/build.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ else
226226
export MAX_JOBS=`expr $(nproc) - 1`
227227
fi
228228
229-
FULL_CAFFE2=1 python setup.py install --user
229+
USE_OPENCV=1 BUILD_BINARY=1 python setup.py install --user
230230
231231
# This is to save test binaries for testing
232232
cp -r torch/lib/tmp_install $INSTALL_PREFIX

CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ include(CMakeDependentOption)
5656
option(BUILD_TORCH "Build Torch" OFF)
5757
option(ATEN_NO_TEST "Do not build ATen test binaries" OFF)
5858
option(BUILD_ATEN_MOBILE "Build ATen for Android and iOS" OFF)
59-
option(BUILD_BINARY "Build C++ binaries" ON)
59+
option(BUILD_BINARY "Build C++ binaries" OFF)
6060
option(BUILD_DOCS "Build Caffe2 documentation" OFF)
6161
option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" ON)
6262
option(BUILD_PYTHON "Build Python binaries" ON)
@@ -115,7 +115,7 @@ option(USE_IDEEP "Use IDEEP interface in MKL BLAS" ON)
115115
option(USE_MKLML "Use MKLML interface in MKL BLAS" ON)
116116
option(USE_DISTRIBUTED "Use distributed" ON)
117117
cmake_dependent_option(
118-
USE_MPI "Use MPI. Only available if USE_DISTRIBUTED is on." ON
118+
USE_MPI "Use MPI for Caffe2. Only available if USE_DISTRIBUTED is on." OFF
119119
"USE_DISTRIBUTED" OFF)
120120
cmake_dependent_option(
121121
USE_GLOO "Use Gloo. Only available if USE_DISTRIBUTED is on." ON

aten/src/ATen/Context.cpp

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -107,19 +107,33 @@ bool Context::setFlushDenormal(bool on) {
107107
#endif
108108
}
109109

110-
Type& getMaybeVariableType(TensorOptions options) {
111-
return globalContext().getMaybeVariableType(
110+
Type& getType(TensorOptions options) {
111+
return globalContext().getType(
112112
options.backend(), options.dtype(), options.is_variable());
113113
}
114114

115-
Type& getMaybeVariableType(const TensorImpl* impl) {
115+
Type& getType(const TensorImpl* impl) {
116116
Backend backend = tensorTypeIdToBackend(impl->type_id());
117-
return globalContext().getMaybeVariableType(
117+
return globalContext().getType(
118118
backend, impl->scalar_type(), impl->is_variable());
119119
}
120120

121121
Allocator* getCPUAllocator() {
122122
return getTHDefaultAllocator();
123123
}
124124

125+
struct LegacyTypeInit : public LegacyTypeInitInterface {
126+
LegacyTypeInit(LegacyTypeInitArgs) {}
127+
void initCPU() const override {
128+
globalContext();
129+
}
130+
void initCUDA() const override {
131+
globalContext().lazyInitCUDA();
132+
}
133+
void initComplex() const override {
134+
globalContext().lazyInitComplex();
135+
}
136+
};
137+
REGISTER_LEGACY_TYPE_INIT(LegacyTypeInit);
138+
125139
}

aten/src/ATen/Context.h

Lines changed: 15 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,9 @@
88
#include "ATen/Utils.h"
99
#include "ATen/core/Error.h"
1010
#include "ATen/detail/CUDAHooksInterface.h"
11-
#include "ATen/detail/VariableHooksInterface.h"
11+
#include "ATen/core/VariableHooksInterface.h"
1212
#include "ATen/detail/ComplexHooksInterface.h"
13+
#include "ATen/core/LegacyTypeDispatch.h"
1314

1415
// This is temporary
1516
#include "ATen/core/ATenCoreTest.h"
@@ -24,43 +25,25 @@ class AT_API Context {
2425
public:
2526
Context();
2627
Type* getNonVariableTypeRaw(Backend p, ScalarType s) {
27-
return type_registry[static_cast<int>(p)][static_cast<int>(s)].get();
28+
return globalLegacyTypeDispatch().getNonVariableTypeRaw(p, s);
2829
}
2930
Type * getNonVariableTypeOpt(Backend p, ScalarType s) {
30-
if (p != Backend::Undefined) {
31-
initCUDAIfNeeded(backendToDeviceType(p));
32-
initComplexIfNeeded(s);
33-
}
34-
auto type = getNonVariableTypeRaw(p, s);
35-
36-
if(!type) {
37-
// there is only a single Undefined Type.
38-
if (p == Backend::Undefined || s == ScalarType::Undefined) {
39-
return getNonVariableTypeRaw(Backend::Undefined, ScalarType::Undefined);
40-
}
41-
}
42-
43-
return type;
31+
return globalLegacyTypeDispatch().getNonVariableTypeOpt(p, s);
4432
}
4533
Type & getNonVariableType(Backend p, ScalarType s) {
46-
auto* type = getNonVariableTypeOpt(p, s);
47-
if (!type) AT_ERROR(toString(p), toString(s), "Type is not enabled.");
48-
return *type;
34+
return globalLegacyTypeDispatch().getNonVariableType(p, s);
4935
}
5036
Type & getVariableType(Backend p, ScalarType s) {
51-
auto& baseType = getNonVariableType(p, s);
52-
return detail::getVariableHooks().getVariableTypeFromBaseType(baseType);
53-
}
54-
Type & getMaybeVariableType(Backend p, ScalarType s, bool is_variable) {
55-
if (is_variable) {
56-
return getVariableType(p, s);
57-
} else {
58-
return getNonVariableType(p, s);
59-
}
37+
return globalLegacyTypeDispatch().getVariableType(p, s);
38+
}
39+
Type & getType(Backend p, ScalarType s, bool is_variable) {
40+
return globalLegacyTypeDispatch().getType(p, s, is_variable);
6041
}
42+
// The passed in Type must be delete'able
43+
// TODO: Just make it take a unique_ptr
6144
void registerType(Backend b, ScalarType s, Type* t) {
62-
type_registry[static_cast<int>(b)][static_cast<int>(s)].reset(t);
63-
detail::getVariableHooks().registerVariableTypeFor(this, b, s);
45+
globalLegacyTypeDispatch().registerType(b, s,
46+
LegacyTypeDispatch::TypeUniquePtr{t, LegacyTypeDeleter([](Type* p) { delete p; }) });
6447
}
6548

6649
Generator & defaultGenerator(DeviceType device_type) {
@@ -127,11 +110,6 @@ class AT_API Context {
127110
std::unique_ptr<Generator>
128111
generator_registry[static_cast<int>(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)];
129112
private:
130-
// NB: type_registry has nullptr for all CUDA backends until
131-
// CUDA initialization has occurred
132-
std::unique_ptr<Type> type_registry
133-
[static_cast<int>(Backend::NumOptions)]
134-
[static_cast<int>(ScalarType::NumOptions)];
135113
void initCUDAIfNeeded(DeviceType p) {
136114
if (p == DeviceType::CUDA) {
137115
lazyInitCUDA();
@@ -150,8 +128,6 @@ class AT_API Context {
150128
std::atomic<size_t> next_id;
151129
std::unique_ptr<THCState, void(*)(THCState*)> thc_state;
152130
friend struct Type;
153-
friend void register_cpu_types(Context * context);
154-
friend void register_cuda_types(Context * context);
155131
};
156132

157133
AT_API Context & globalContext();
@@ -174,8 +150,8 @@ static inline Type& getNonVariableType(DeviceType p, ScalarType s) {
174150
return globalContext().getNonVariableType(deviceTypeToBackend(p), s);
175151
}
176152

177-
AT_API Type& getMaybeVariableType(TensorOptions options);
178-
AT_API Type& getMaybeVariableType(const TensorImpl*);
153+
AT_API Type& getType(TensorOptions options);
154+
AT_API Type& getType(const TensorImpl*);
179155

180156
AT_API Allocator* getCPUAllocator();
181157

0 commit comments

Comments
 (0)