Skip to content

Merge from upstream #69

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 22 commits into from
Jul 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
9d6521c
Support n-dimensional empty tensors in CUDA non-reduction dimension f…
gchanan Jul 23, 2018
9525925
Low rank multivariate normal (#8635)
fehiepsi Jul 23, 2018
029cf1d
Improve error messages of wrong dimensions (#9694)
idansc Jul 23, 2018
14d4bdb
Reformat output data format to make it more general for other binarie…
sf-wind Jul 23, 2018
5094684
Create torch::from_blob for variables (#9605)
goldsborough Jul 23, 2018
88d6b6e
Fix D8722560 (#9717)
smessmer Jul 23, 2018
3e9e3ef
Improving diagnose RF NE with Cali (#9550)
Jul 23, 2018
aa8a9fa
Extend DispatchStub to support CUDA dispatch (#9664)
colesbury Jul 23, 2018
ee2cc68
Add ctc_beam_search_decoder op for caffe2 (#9622)
normster Jul 23, 2018
a9742e1
Add fallback to TensorCPU if there are unsupported types for IDEEP Te…
Jul 23, 2018
a949245
Switch interpreter to use IValue's primitive int/floats (#9718)
zdevito Jul 23, 2018
431415a
quick patch for PackPadded removal to propagate the correct size. (#9…
anderspapitto Jul 23, 2018
31ba2f1
Rename embedding variable to weight (#9720)
goldsborough Jul 23, 2018
d05a814
Change behavior of clone to clone to a device (#9609)
goldsborough Jul 23, 2018
5849354
Add operator<< overloads for TensorOptions (#9606)
goldsborough Jul 23, 2018
e3fb908
Allow multiple ops.def and clean up code gen in general
bwasti Jul 23, 2018
099b5ba
Tensor merge PRs from July 20 (#9713)
ezyang Jul 24, 2018
a387331
Re-enable test_segfault after recent dataloder changes
ssnl Jul 24, 2018
5df3eae
Add 1x1 specialization for conv with NCHW order (#9671)
xiaomengy Jul 24, 2018
9bf72b2
Add missing windows exports
zdevito Jul 24, 2018
ba634c1
Move strides to base class. (#9749)
ezyang Jul 24, 2018
e64bed0
Merge remote-tracking branch 'upstream/master'
iotamudelta Jul 24, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions .jenkins/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,10 @@ if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_aten_asan(3)")
fi

export ATEN_DISABLE_AVX=
export ATEN_DISABLE_AVX2=
if [[ "${JOB_BASE_NAME}" == *-NO_AVX-* ]]; then
export ATEN_DISABLE_AVX=1
fi
if [[ "${JOB_BASE_NAME}" == *-NO_AVX2-* ]]; then
export ATEN_DISABLE_AVX2=1
export ATEN_CPU_CAPABILITY=default
elif [[ "${JOB_BASE_NAME}" == *-NO_AVX2-* ]]; then
export ATEN_CPU_CAPABILITY=avx
fi

test_python_nn() {
Expand Down
14 changes: 14 additions & 0 deletions aten/src/ATen/Layout.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
#pragma once

#include <ATen/ScalarType.h>
#include <ATen/Error.h>

#include <iostream>

namespace at {
enum class Layout { Strided, Sparse };
Expand All @@ -18,3 +21,14 @@ inline Layout layout_from_backend(Backend backend) {
}
}
} // namespace at

inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
switch (layout) {
case at::kStrided:
return stream << "Strided";
case at::kSparse:
return stream << "Sparse";
default:
AT_ERROR("Unknown layout");
}
}
11 changes: 9 additions & 2 deletions aten/src/ATen/ScalarType.h
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
#pragma once

#include <stdint.h>

#include "ATen/ArrayRef.h"
#include "ATen/ATenGeneral.h"
#include "ATen/Half.h"

#include <cstdint>
#include <iostream>

namespace at {

// NB: Order matters for this macro; it is relied upon in
Expand Down Expand Up @@ -168,3 +169,9 @@ typedef ArrayRef<int64_t> IntList;
typedef ArrayRef<Tensor> TensorList;

} // namespace at

inline std::ostream& operator<<(
std::ostream& stream,
at::ScalarType scalar_type) {
return stream << at::toString(scalar_type);
}
2 changes: 1 addition & 1 deletion aten/src/ATen/SparseTensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ namespace at {
// we don't currently support zero-size dimensions, so we can't actually
// do this; so we just allocate zero-size tensors for everything.
SparseTensorImpl::SparseTensorImpl(Type * type)
: TensorImpl(type)
: TensorImpl(type, nullptr)
, size_{0}
, sparseDims_(1)
, denseDims_(0)
Expand Down
40 changes: 40 additions & 0 deletions aten/src/ATen/TensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
#include <ATen/Tensor.h>
#include <ATen/optional.h>

#include <TH/THTensor.hpp>

namespace at {
Tensor& TensorImpl::grad() {
AT_ERROR("grad is not implemented for Tensor");
Expand Down Expand Up @@ -33,4 +35,42 @@ void Tensor::backward(
bool create_graph) {
pImpl->backward(std::move(gradient), keep_graph, create_graph);
}

TensorImpl::~TensorImpl() {
if (tensor) tensor->release();
}

IntList TensorImpl::sizes() const {
// NB: dim in tensor is not synchronized with THTensor, so it's
// important to apply dim here
return IntList(THTensor_getSizePtr(tensor), dim());
}

IntList TensorImpl::strides() const {
// NB: dim in tensor is not synchronized with THTensor, so it's
// important to apply dim here
return IntList(THTensor_getStridePtr(tensor), dim());
}

void TensorImpl::release_resources() {
if (tensor) {
tensor->release();
tensor = nullptr;
}
}

int64_t TensorImpl::dim() const {
if (isScalar()) {
return 0;
}
return tensor->dim();
}

void * TensorImpl::unsafeGetTH(bool retain) {
if (retain) {
tensor->retain();
}
return tensor;
}

} // namespace at
38 changes: 23 additions & 15 deletions aten/src/ATen/TensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
#include "ATen/ScalarType.h"
#include "ATen/optional.h"

struct THTensor;

namespace at {
class Scalar;
struct Type;
Expand All @@ -15,23 +17,27 @@ struct Tensor;
} // namespace at

namespace at {
struct TensorImpl : public Retainable {
explicit TensorImpl(Type * type)
: is_scalar(false), type_(type) {}
struct AT_API TensorImpl : public Retainable {
explicit TensorImpl(Type * type, THTensor * tensor)
: is_scalar(false), type_(type), tensor(tensor) {}

virtual ~TensorImpl();

virtual void release_resources() override;

Type & type() const {
return *type_;
}
virtual const char * toString() const = 0;
virtual IntList sizes() const = 0;
virtual IntList strides() const = 0;
virtual int64_t dim() const = 0;
virtual IntList sizes() const;
virtual IntList strides() const;
virtual int64_t dim() const;
/**
* Perform a conversion of this tensor to a scalar, if numel() == 1.
* Otherwise, raise an error.
*/
virtual Scalar localScalar() = 0;
virtual void * unsafeGetTH(bool retain) = 0;
virtual void * unsafeGetTH(bool retain);
virtual std::unique_ptr<Storage> storage() = 0;
friend struct Type;

Expand Down Expand Up @@ -69,30 +75,32 @@ struct TensorImpl : public Retainable {
// Some methods below are defined in TensorImpl.cpp because Tensor is an
// incomplete type.

AT_API virtual void set_requires_grad(bool requires_grad) {
virtual void set_requires_grad(bool requires_grad) {
AT_ERROR("set_requires_grad is not implemented for Tensor");
}
AT_API virtual bool requires_grad() const {
virtual bool requires_grad() const {
AT_ERROR("requires_grad is not implemented for Tensor");
}

AT_API virtual Tensor& grad();
AT_API virtual const Tensor& grad() const;
virtual Tensor& grad();
virtual const Tensor& grad() const;

AT_API virtual Tensor detach() const;
AT_API virtual void detach_() {
virtual Tensor detach() const;
virtual void detach_() {
AT_ERROR("detach_ is not implemented for Tensor");
}

AT_API virtual void backward(
virtual void backward(
at::optional<Tensor> gradient,
bool keep_graph,
bool create_graph);

AT_API virtual void set_data(Tensor new_data);
virtual void set_data(Tensor new_data);

protected:
bool is_scalar;
Type * type_;
public:
THTensor * tensor;
};
} // namespace at
12 changes: 12 additions & 0 deletions aten/src/ATen/TensorOptions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
#include <ATen/ScalarType.h>
#include <ATen/optional.h>

#include <iostream>

namespace at {

TensorOptions::TensorOptions(bool use_thread_local_default_options) {
Expand All @@ -17,3 +19,13 @@ TensorOptions::TensorOptions(bool use_thread_local_default_options) {
}
}
} // namespace at

std::ostream& operator<<(
std::ostream& stream,
const at::TensorOptions& options) {
return stream << "TensorOptions(dtype=" << options.dtype()
<< ", device=" << options.device()
<< ", layout=" << options.layout()
<< ", requires_grad=" << std::boolalpha
<< options.requires_grad() << ")";
}
5 changes: 5 additions & 0 deletions aten/src/ATen/TensorOptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <ATen/Type.h>

#include <cstddef>
#include <iosfwd>
#include <utility>

namespace at {
Expand Down Expand Up @@ -277,3 +278,7 @@ inline Tensor Tensor::to(Device device, bool non_blocking) const {
return detail::to(*this, options().device(device), non_blocking);
}
} // namespace at

std::ostream& operator<<(
std::ostream& stream,
const at::TensorOptions& options);
2 changes: 1 addition & 1 deletion aten/src/ATen/UndefinedTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ namespace at {

// should this use the globalContext? Can it get a context passed in somehow?
UndefinedTensor::UndefinedTensor()
: TensorImpl(&(globalContext().getType(Backend::Undefined,ScalarType::Undefined))) {
: TensorImpl(&(globalContext().getType(Backend::Undefined,ScalarType::Undefined)), nullptr) {
}

const char * UndefinedTensor::toString() const {
Expand Down
6 changes: 4 additions & 2 deletions aten/src/ATen/code_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,17 @@ def replace(match):
comma_after = ', '
key = key[:-1]
v = lookup(key)
if indent is not None and isinstance(v, list):
if indent is not None:
if not isinstance(v, list):
v = [v]
return indent_lines(indent, v)
elif isinstance(v, list):
middle = ', '.join([str(x) for x in v])
if len(v) == 0:
return middle
return comma_before + middle + comma_after
else:
return (indent or '') + str(v)
return str(v)
return self.subtitution.sub(replace, self.pattern)


Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/copy_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def create_one_copy(dst_type, all_types):
cuda = ''
state = []
if src_type['Backend'] == 'CUDA' or dst_type['Backend'] == 'CUDA':
state.append('context->getTHCState()')
state.append('globalContext().getTHCState()')
if src_type['Backend'] == 'CUDA':
if dst_type['Backend'] == 'CUDA':
cuda = 'Cuda'
Expand Down Expand Up @@ -183,7 +183,7 @@ def create_one_copy_from(src_type, all_types):
if src_type['Backend'] == 'CUDA':
cuda = 'Cuda'
if dst_type['Backend'] == 'CUDA' or src_type['Backend'] == 'CUDA':
state.append('context->getTHCState()')
state.append('globalContext().getTHCState()')

body_env = nested_dict({
'src_scalar_name': src_type['ScalarName'],
Expand Down
35 changes: 24 additions & 11 deletions aten/src/ATen/function_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def TypedDict(name, attrs, total=True): # type: ignore
}""")

BUFFER_DEFINITION = CodeTemplate("""\
auto ${name}_ = new ${Tensor}(context);
auto ${name}_ = new ${Tensor}(${THTensor}_new());
auto ${name} = Tensor(${name}_, false);""")

CONDITIONAL_INITIALIZER = CodeTemplate("""\
Expand Down Expand Up @@ -277,7 +277,7 @@ def __init__(self, reason):
'THStorage*': CodeTemplate('checked_cast_storage<${Storage}>(&${arg_name},"${arg_name}",${arg_pos})'),
'THGenerator*':
CodeTemplate(
'check_generator<${Backend}Generator>(${arg_name}, &context->defaultGenerator(backend()))'),
'check_generator<${Backend}Generator>(${arg_name}, &globalContext().defaultGenerator(backend()))'),
# This is a cast done via direct-construction
'THSize*': CodeTemplate('THLongStorageView ${result_name}(${arg_name}, THLongStorageViewKind::SIZE);'),
# This is a cast done via direct-construction
Expand Down Expand Up @@ -306,14 +306,24 @@ def __init__(self, reason):

CHECKED_USE_NULLABLE = CodeTemplate('${arg_name}_ ? ${usage} : NULL')

ALLOC_NOARGS_WRAP = {
'THTensor*': 'detail::new_${Tensor}()',
'THBoolTensor*': 'detail::new_${Backend}ByteTensor()',
'THIndexTensor*': 'detail::new_${Backend}LongTensor()',
'THIntegerTensor*': 'detail::new_${Backend}IntTensor()',
'THSTensor*': 'detail::new_Sparse${Tensor}()',
'THDenseTensor*': 'detail::new_${DenseTensor}()',
'THDenseIndexTensor*': 'detail::new_${DenseBackend}LongTensor()',
}

ALLOC_WRAP = {
'THTensor*': 'new ${Tensor}(context${,arguments})',
'THBoolTensor*': 'new ${Backend}ByteTensor(context${,arguments})',
'THIndexTensor*': 'new ${Backend}LongTensor(context${,arguments})',
'THIntegerTensor*': 'new ${Backend}IntTensor(context${,arguments})',
'THSTensor*': 'new Sparse${Tensor}(context${,arguments})',
'THDenseTensor*': 'new ${DenseTensor}(context${,arguments})',
'THDenseIndexTensor*': 'new ${DenseBackend}LongTensor(context${,arguments})',
'THTensor*': 'new ${Tensor}(${arguments})',
'THBoolTensor*': 'new ${Backend}ByteTensor(${arguments})',
'THIndexTensor*': 'new ${Backend}LongTensor(${arguments})',
'THIntegerTensor*': 'new ${Backend}IntTensor(${arguments})',
'THSTensor*': 'new Sparse${Tensor}(${arguments})',
'THDenseTensor*': 'new ${DenseTensor}(${arguments})',
'THDenseIndexTensor*': 'new ${DenseBackend}LongTensor(${arguments})',
}

# Replacements for constants when calling into TH
Expand Down Expand Up @@ -1228,7 +1238,10 @@ def handle_sparse(env, option):
def allocate_arg(env, arg, output_count):
# type: (Environment, THFormal, int) -> List[str]
name = arg['name']
allocation = CodeTemplate(ALLOC_WRAP[arg['type']]).substitute(env, arguments=[])
state = ''
if is_cuda:
state = 'globalContext().getTHCState()'
allocation = CodeTemplate(ALLOC_NOARGS_WRAP[arg['type']]).substitute(env)
tensor_arg = '{}_'.format(name)
if arg.get('mask', False):
allocation = 'output_mask[{}] ? {} : nullptr'.format(output_count, allocation)
Expand Down Expand Up @@ -1257,7 +1270,7 @@ def handle_call(env, option, cimpl):
is_nn = option['mode'] == 'NN'
actuals = get_arguments(cimpl['arguments'], option)
if is_cuda or is_nn:
actuals = ['context->getTHCState()'] + actuals
actuals = ['globalContext().getTHCState()'] + actuals

cname = cimpl['cname']
if option.get('sparse', False):
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def generate_storage_type_and_tensor(backend, density, scalar_type, declarations
env['THStorage'] = 'THCuda{}Storage'.format(sname)
env['THTensor'] = 'THCuda{}Tensor'.format(sname)
env['THIndexTensor'] = 'THCudaLongTensor'
env['state'] = ['context->getTHCState()']
env['state'] = ['globalContext().getTHCState()']
env['isCUDA'] = 'true'
env['storage_device'] = 'return storage->device;'
env['Generator'] = 'CUDAGenerator'
Expand Down
Loading