Skip to content

Merge from upstream #104

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Aug 7, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion .jenkins/pytorch/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ fi
WERROR=1 python setup.py install

# Add the test binaries so that they won't be git clean'ed away
git add -f build/bin build/lib
git add -f build/bin

# Testing ATen install
if [[ "$BUILD_ENVIRONMENT" != *cuda* ]]; then
Expand All @@ -101,3 +101,11 @@ if [[ "$BUILD_ENVIRONMENT" == *xenial-cuda8-cudnn6-py3* ]]; then
make html
popd
fi

# Test no-Python build
if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
echo "Building libtorch"
# NB: Install outside of source directory (at the same level as the root
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
WERROR=1 VERBOSE=1 tools/cpp_build/build_caffe2.sh "$PWD/../cpp-build"
fi
6 changes: 0 additions & 6 deletions .jenkins/pytorch/macos-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,6 @@ export IMAGE_COMMIT_TAG=${BUILD_ENVIRONMENT}-${IMAGE_COMMIT_ID}

python setup.py install

# this is a bit hacky, but not too bad. Bundle the test binaries into
# the installation directory, so they can catch a free ride on the 7z
# train.
mkdir -p ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch/test_binaries/build
mv build/{bin,lib} ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch/test_binaries/build/

# Upload torch binaries when the build job is finished
7z a ${IMAGE_COMMIT_TAG}.7z ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch*
aws s3 cp ${IMAGE_COMMIT_TAG}.7z s3://ossci-macos-build/pytorch/${IMAGE_COMMIT_TAG}.7z --acl public-read
11 changes: 10 additions & 1 deletion .jenkins/pytorch/macos-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,22 @@ test_python_all() {
test_cpp_api() {
# C++ API

# NB: Install outside of source directory (at the same level as the root
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
# But still clean it before we perform our own build.
#
CPP_BUILD="$PWD/../cpp-build"
rm -rf $CPP_BUILD
mkdir -p $CPP_BUILD
WERROR=1 VERBOSE=1 tools/cpp_build/build_caffe2.sh "$CPP_BUILD"

python tools/download_mnist.py --quiet -d test/cpp/api/mnist

# Unfortunately it seems like the test can't load from miniconda3
# without these paths being set
export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/miniconda3/lib"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PWD/miniconda3/lib"
${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch/test_binaries/build/bin/test_api
"$CPP_BUILD"/caffe2/bin/test_api
}

if [ -z "${JOB_BASE_NAME}" ] || [[ "${JOB_BASE_NAME}" == *-test ]]; then
Expand Down
7 changes: 4 additions & 3 deletions .jenkins/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,14 @@ test_torchvision() {
test_libtorch() {
if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
echo "Testing libtorch"
CPP_BUILD="$PWD/../cpp-build"
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
./build/bin/test_jit
"$CPP_BUILD"/caffe2/bin/test_jit
else
./build/bin/test_jit "[cpu]"
"$CPP_BUILD"/caffe2/bin/test_jit "[cpu]"
fi
python tools/download_mnist.py --quiet -d test/cpp/api/mnist
OMP_NUM_THREADS=2 ./build/bin/test_api
OMP_NUM_THREADS=2 "$CPP_BUILD"/caffe2/bin/test_api
fi
}

Expand Down
58 changes: 58 additions & 0 deletions aten/src/ATen/Backend.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#pragma once
#include <stdexcept>

namespace at {

enum class Backend { CPU, CUDA, SparseCPU, SparseCUDA, Undefined, NumOptions };

constexpr Backend kCPU = Backend::CPU;
constexpr Backend kCUDA = Backend::CUDA;
constexpr Backend kSparseCPU = Backend::SparseCPU;
constexpr Backend kSparseCUDA = Backend::SparseCUDA;

static inline Backend toSparse(Backend b) {
switch (b) {
case Backend::CPU:
return Backend::SparseCPU;
case Backend::CUDA:
return Backend::SparseCUDA;
case Backend::SparseCPU:
return Backend::SparseCPU;
case Backend::SparseCUDA:
return Backend::SparseCUDA;
default:
throw std::runtime_error("Unknown backend");
}
}

static inline Backend toDense(Backend b) {
switch (b) {
case Backend::CPU:
return Backend::CPU;
case Backend::CUDA:
return Backend::CUDA;
case Backend::SparseCPU:
return Backend::CPU;
case Backend::SparseCUDA:
return Backend::CUDA;
default:
throw std::runtime_error("Unknown backend");
}
}

static inline const char* toString(Backend b) {
switch (b) {
case Backend::CPU:
return "CPU";
case Backend::CUDA:
return "CUDA";
case Backend::SparseCPU:
return "SparseCPU";
case Backend::SparseCUDA:
return "SparseCUDA";
default:
return "UNKNOWN_BACKEND";
}
}

} // namespace at
4 changes: 3 additions & 1 deletion aten/src/ATen/Device.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <ATen/ScalarType.h>
#include <ATen/core/Error.h>
#include <ATen/core/DeviceType.h>
#include <ATen/core/Error.h>

#include <cstddef>
#include <iosfwd>
Expand Down Expand Up @@ -38,7 +39,8 @@ struct Device {
}
}

/// Constructs a new `Device` from a `DeviceType` and an optional device index.
/// Constructs a new `Device` from a `DeviceType` and an optional device
/// index.
/* implicit */ Device(DeviceType type, int32_t index = -1)
: type_(type), index_(index) {
AT_CHECK(
Expand Down
170 changes: 3 additions & 167 deletions aten/src/ATen/ScalarType.h
Original file line number Diff line number Diff line change
@@ -1,168 +1,4 @@
#pragma once

#include "ATen/ATenGeneral.h"
#include "ATen/core/ArrayRef.h"
#include "ATen/core/Half.h"

#include <cstdint>
#include <iostream>

namespace at {

// NB: Order matters for this macro; it is relied upon in
// _promoteTypesLookup and the serialization format.
#define AT_FORALL_SCALAR_TYPES(_) \
_(uint8_t,Byte,i) /* 0 */ \
_(int8_t,Char,i) /* 1 */ \
_(int16_t,Short,i) /* 2 */ \
_(int,Int,i) /* 3 */ \
_(int64_t,Long,i) /* 4 */ \
_(at::Half,Half,d) /* 5 */ \
_(float,Float,d) /* 6 */ \
_(double,Double,d) /* 7 */

#define AT_FORALL_SCALAR_TYPES_EXCEPT_HALF(_) \
_(uint8_t,Byte,i) \
_(int8_t,Char,i) \
_(int16_t,Short,i) \
_(int,Int,i) \
_(int64_t,Long,i) \
_(float,Float,d) \
_(double,Double,d)

enum class ScalarType {
#define DEFINE_ENUM(_1,n,_2) \
n,
AT_FORALL_SCALAR_TYPES(DEFINE_ENUM)
#undef DEFINE_ENUM
Undefined, // 8
NumOptions
};

enum class Backend {
CPU,
CUDA,
SparseCPU,
SparseCUDA,
Undefined,
NumOptions
};

constexpr Backend kCPU = Backend::CPU;
constexpr Backend kCUDA = Backend::CUDA;
constexpr Backend kSparseCPU = Backend::SparseCPU;
constexpr Backend kSparseCUDA = Backend::SparseCUDA;

static inline Backend toSparse(Backend b) {
switch (b) {
case Backend::CPU: return Backend::SparseCPU;
case Backend::CUDA: return Backend::SparseCUDA;
case Backend::SparseCPU: return Backend::SparseCPU;
case Backend::SparseCUDA: return Backend::SparseCUDA;
default: throw std::runtime_error("Unknown backend");
}
}

static inline Backend toDense(Backend b) {
switch (b) {
case Backend::CPU: return Backend::CPU;
case Backend::CUDA: return Backend::CUDA;
case Backend::SparseCPU: return Backend::CPU;
case Backend::SparseCUDA: return Backend::CUDA;
default: throw std::runtime_error("Unknown backend");
}
}

static inline const char * toString(Backend b) {
switch(b) {
case Backend::CPU: return "CPU";
case Backend::CUDA: return "CUDA";
case Backend::SparseCPU: return "SparseCPU";
case Backend::SparseCUDA: return "SparseCUDA";
default: return "UNKNOWN_BACKEND";
}
}

#define DEFINE_CONSTANT(_,name,_2) \
constexpr ScalarType k##name = ScalarType::name;

AT_FORALL_SCALAR_TYPES(DEFINE_CONSTANT)
#undef DEFINE_CONSTANT

static inline const char * toString(ScalarType t) {
#define DEFINE_CASE(_,name,_2) \
case ScalarType:: name : return #name;

switch(t) {
AT_FORALL_SCALAR_TYPES(DEFINE_CASE)
default:
return "UNKNOWN_SCALAR";
}
#undef DEFINE_CASE
}

static inline size_t elementSize(ScalarType t) {
#define CASE_ELEMENTSIZE_CASE(ctype,name,_2) \
case ScalarType:: name : return sizeof(ctype);

switch(t) {
AT_FORALL_SCALAR_TYPES(CASE_ELEMENTSIZE_CASE)
default:
AT_ERROR("Unknown ScalarType");
}
#undef CASE_ELEMENTSIZE_CASE
}

static inline bool isIntegralType(ScalarType t) {
return (t == ScalarType::Byte ||
t == ScalarType::Char ||
t == ScalarType::Int ||
t == ScalarType::Long ||
t == ScalarType::Short);
}

static inline bool isFloatingType(ScalarType t) {
return (t == ScalarType::Double ||
t == ScalarType::Float ||
t == ScalarType::Half);
}

static inline ScalarType promoteTypes(ScalarType a, ScalarType b) {
// This is generated according to NumPy's promote_types
constexpr auto u1 = ScalarType::Byte;
constexpr auto i1 = ScalarType::Char;
constexpr auto i2 = ScalarType::Short;
constexpr auto i4 = ScalarType::Int;
constexpr auto i8 = ScalarType::Long;
constexpr auto f2 = ScalarType::Half;
constexpr auto f4 = ScalarType::Float;
constexpr auto f8 = ScalarType::Double;
constexpr auto ud = ScalarType::Undefined;
static constexpr ScalarType _promoteTypesLookup
[static_cast<int>(ScalarType::NumOptions)]
[static_cast<int>(ScalarType::NumOptions)] = {
/* u1 i1 i2 i4 i8 f2 f4 f8, ud */
/* u1 */ { u1, i2, i2, i4, i8, f2, f4, f8, ud },
/* i1 */ { i2, i1, i2, i4, i8, f2, f4, f8, ud },
/* i2 */ { i2, i2, i2, i4, i8, f4, f4, f8, ud },
/* i4 */ { i4, i4, i4, i4, i8, f8, f4, f8, ud },
/* i8 */ { i8, i8, i8, i8, i8, f8, f4, f8, ud },
/* f2 */ { f2, f2, f4, f8, f8, f2, f4, f8, ud },
/* f4 */ { f4, f4, f4, f4, f4, f4, f4, f8, ud },
/* f8 */ { f8, f8, f8, f8, f8, f8, f8, f8, ud },
/* ud */ { ud, ud, ud, ud, ud, ud, ud, ud, ud },
};
return _promoteTypesLookup[static_cast<int>(a)][static_cast<int>(b)];
}

struct Tensor;
typedef ArrayRef<int64_t> IntList;
typedef ArrayRef<Tensor> TensorList;

} // namespace at

inline std::ostream& operator<<(
std::ostream& stream,
at::ScalarType scalar_type) {
return stream << at::toString(scalar_type);
}
#include <ATen/ATenGeneral.h> // for BC reasons
#include <ATen/Backend.h>
#include <ATen/core/ScalarType.h>
2 changes: 1 addition & 1 deletion aten/src/ATen/core/ATenCoreTest.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once

#include <ATen/core/CoreAPI.h>
#include <ATen/core/Macros.h>

namespace at {

Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/Backtrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#include <string>
#include <typeinfo>

#include <ATen/core/CoreAPI.h>
#include <ATen/core/Macros.h>

namespace at {
/// Utility to demangle a C++ symbol name.
Expand Down
22 changes: 12 additions & 10 deletions aten/src/ATen/core/DeviceType.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// ATen/core (which would require a lot more build system hacking.)
// If you modify me, keep me synchronized with that file.

#include <ATen/core/CoreAPI.h>
#include <ATen/core/Macros.h>

#include <ostream>

Expand All @@ -12,19 +12,21 @@ namespace at {
// Underlying type declared to be int32_t for consistency with protobufs.
enum class DeviceType : int32_t {
CPU = 0,
CUDA = 1, // CUDA.
MKLDNN = 2, // Reserved for explicit MKLDNN
OPENGL = 3, // OpenGL
OPENCL = 4, // OpenCL
IDEEP = 5, // IDEEP.
HIP = 6, // AMD HIP
CUDA = 1, // CUDA.
MKLDNN = 2, // Reserved for explicit MKLDNN
OPENGL = 3, // OpenGL
OPENCL = 4, // OpenCL
IDEEP = 5, // IDEEP.
HIP = 6, // AMD HIP
// Change the following number if you add more devices in the code.
COMPILE_TIME_MAX_DEVICE_TYPES = 7,
ONLY_FOR_TEST = 20901701, // This device type is only for test.
ONLY_FOR_TEST = 20901701, // This device type is only for test.
};

AT_CORE_API std::string DeviceTypeName(at::DeviceType d, bool lower_case = false);
AT_CORE_API std::string DeviceTypeName(
at::DeviceType d,
bool lower_case = false);

}
} // namespace at

AT_CORE_API std::ostream& operator<<(std::ostream& stream, at::DeviceType type);
2 changes: 1 addition & 1 deletion aten/src/ATen/core/Error.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once

#include <ATen/core/CoreAPI.h>
#include <ATen/core/Macros.h>
#include <ATen/core/optional.h>

#include <cstddef>
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/Half-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#include <cstring>
#include <limits>
#include <ATen/core/CoreAPI.h>
#include <ATen/core/Macros.h>

#ifdef __CUDACC__
#include <cuda_fp16.h>
Expand Down
Loading