From 43fd6b234dba08a67d6bb5a3eb18969b0cc41ea1 Mon Sep 17 00:00:00 2001 From: Edward Yang Date: Sun, 2 Sep 2018 15:16:33 -0700 Subject: [PATCH 1/3] Make Type a (mostly) pure virtual class; TypeDefault for impls (#11013) (#11013) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/11013 Previously, the parent class Type also contained a large number of implementations, for things like broadcasting and native functions that didn't need dispatch. We'd like to be able to reference this interface from Tensor even when we don't have any of these implementations are available. To do this, we convert Type into a truly pure virtual interface, and move all of the implementations to TypeDefault. Pull Request resolved: https://github.com/pytorch/pytorch/pull/11181 Differential Revision: D9561478 Pulled By: ezyang fbshipit-source-id: 13c49d80bc547551adf524b1cf1d691bfe311133 --- aten/src/ATen/UndefinedType.cpp | 6 +-- aten/src/ATen/UndefinedType.h | 4 +- aten/src/ATen/function_wrapper.py | 46 +++++++++++++------ aten/src/ATen/gen.py | 9 ++-- aten/src/ATen/templates/SparseTypeDerived.cpp | 2 +- aten/src/ATen/templates/Type.h | 29 +++++++----- .../templates/{Type.cpp => TypeDefault.cpp} | 27 ++++------- aten/src/ATen/templates/TypeDefault.h | 36 +++++++++++++++ aten/src/ATen/templates/TypeDerived.cpp | 2 +- aten/src/ATen/templates/TypeDerived.h | 4 +- tools/autograd/gen_variable_type.py | 2 +- tools/autograd/templates/VariableType.cpp | 2 +- tools/autograd/templates/VariableType.h | 4 +- 13 files changed, 116 insertions(+), 57 deletions(-) rename aten/src/ATen/templates/{Type.cpp => TypeDefault.cpp} (72%) create mode 100644 aten/src/ATen/templates/TypeDefault.h diff --git a/aten/src/ATen/UndefinedType.cpp b/aten/src/ATen/UndefinedType.cpp index 2bc3965c6d33ae..367db6fbb2868e 100644 --- a/aten/src/ATen/UndefinedType.cpp +++ b/aten/src/ATen/UndefinedType.cpp @@ -4,7 +4,7 @@ namespace at { UndefinedType::UndefinedType() - : Type(UndefinedTensorId(), /*is_variable=*/false, /*is_undefined=*/true) {} + : TypeDefault(UndefinedTensorId(), /*is_variable=*/false, /*is_undefined=*/true) {} ScalarType UndefinedType::scalarType() const { return ScalarType::Undefined; } @@ -50,13 +50,13 @@ size_t UndefinedType::elementSizeInBytes() const { Type & UndefinedType::toBackend(Backend b) const { if (b == Backend::Undefined) { - return Type::toBackend(b); + return TypeDefault::toBackend(b); } AT_ERROR("toBackend not implemented for UndefinedType to non-UndefinedType"); } Type & UndefinedType::toScalarType(ScalarType s) const { if (s == ScalarType::Undefined) { - return Type::toScalarType(s); + return TypeDefault::toScalarType(s); } AT_ERROR("toScalarType not implemented for UndefinedType to non-UndefinedType"); } diff --git a/aten/src/ATen/UndefinedType.h b/aten/src/ATen/UndefinedType.h index d216e3131dd693..c08b1a47156f5e 100644 --- a/aten/src/ATen/UndefinedType.h +++ b/aten/src/ATen/UndefinedType.h @@ -1,6 +1,6 @@ #pragma once -#include "ATen/Type.h" +#include "ATen/TypeDefault.h" #include "ATen/CheckGenerator.h" #ifdef _MSC_VER @@ -11,7 +11,7 @@ namespace at { -struct UndefinedType final : public Type { +struct UndefinedType final : public TypeDefault { explicit UndefinedType(); virtual ScalarType scalarType() const override; virtual Backend backend() const override; diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py index e6b2eb81917e72..45b0648254de60 100644 --- a/aten/src/ATen/function_wrapper.py +++ b/aten/src/ATen/function_wrapper.py @@ -39,11 +39,11 @@ def TypedDict(name, attrs, total=True): # type: ignore # declaration under Type.h (right now, we call this template # BROADCAST but it also handles default arguments) TYPE_METHOD_DECLARATION_BROADCAST = CodeTemplate("""\ -${return_type} ${api_name}(${type_method_formals_with_defaults}) const; +${return_type} ${api_name}(${type_method_formals_with_defaults}) const override; """) # 2. broadcasting functions are implemented in Type.cpp TYPE_METHOD_DEFINITION_BROADCAST = CodeTemplate("""\ -${return_type} Type::${api_name}(${type_method_formals}) const { +${return_type} TypeDefault::${api_name}(${type_method_formals}) const { ${device_guard_declaration} Tensor ${broadcast_returns}; std::tie(${broadcast_returns}) = ${broadcast_function}(${broadcast_actuals}, "${api_name}"); @@ -59,28 +59,36 @@ def TypedDict(name, attrs, total=True): # type: ignore # actual implementation. At the moment, this situation *only* occurs # for 'native' declarations (so the native dispatch is hardcoded into # the template here.) +PURE_VIRTUAL_TYPE_METHOD_DECLARATION = CodeTemplate("""\ +virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals_with_defaults}) const = 0; +""") +DEPRECATED_PURE_VIRTUAL_TYPE_METHOD_DECLARATION = CodeTemplate("""\ +AT_DEPRECATED(virtual ${return_type} \ +${method_prefix_derived}${api_name}(${type_method_formals_with_defaults}) const = 0); +""") +PURE_VIRTUAL_TYPE_METHOD_DECLARATION_BROADCAST = CodeTemplate("""\ +virtual ${return_type} ${api_name}(${type_method_formals_with_defaults}) const = 0; +""") + TYPE_METHOD_DECLARATION_ABSTRACT = CodeTemplate("""\ -virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals_with_defaults}) const; +${return_type} ${method_prefix_derived}${api_name}(${type_method_formals_with_defaults}) const override; """) TYPE_METHOD_DEFINITION_ABSTRACT = CodeTemplate("""\ -${return_type} Type::${method_prefix_derived}${api_name}(${type_method_formals}) const { +${return_type} TypeDefault::${method_prefix_derived}${api_name}(${type_method_formals}) const { AT_ERROR("${method_prefix_derived}${api_name} is not implemented for type ", toString()); } """) TYPE_METHOD_DECLARATION_CONCRETE = CodeTemplate("""\ -virtual ${return_type} ${api_name}(${type_method_formals_with_defaults}) const; -""") -DEPRECATED_TYPE_METHOD_DECLARATION_CONCRETE = CodeTemplate("""\ -AT_DEPRECATED(virtual ${return_type} ${api_name}(${type_method_formals_with_defaults}) const); +${return_type} ${api_name}(${type_method_formals_with_defaults}) const override; """) TYPE_METHOD_DEFINITION_CONCRETE = CodeTemplate("""\ -${return_type} Type::${api_name}(${type_method_formals}) const { +${return_type} TypeDefault::${api_name}(${type_method_formals}) const { ${device_guard_declaration} ${type_definition_body} } """) DEPRECATED_TYPE_METHOD_DEFINITION_CONCRETE = CodeTemplate("""\ -${return_type} Type::${api_name}(${type_method_formals}) const { +${return_type} TypeDefault::${api_name}(${type_method_formals}) const { TensorOptions options(*this); ${device_guard_declaration} return at::native::${api_name}(${type_method_actuals}, options); @@ -88,7 +96,7 @@ def TypedDict(name, attrs, total=True): # type: ignore """) # 4. add virtual override to TypeDerived.h TYPE_DERIVED_DECLARATION = CodeTemplate("""\ -virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override; +${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override; """) # 5. add override definition to TypeDerived.cpp TYPE_DERIVED_DEFINITION = CodeTemplate("""\ @@ -382,6 +390,7 @@ def __getitem__(self, x): TopEnvironment = TypedDict('TopEnvironment', { 'type_registrations': List[str], 'type_headers': List[str], + 'pure_virtual_type_method_declarations': List[str], 'type_method_declarations': List[str], 'type_method_definitions': List[str], 'type_method_inline_definitions': List[str], @@ -815,6 +824,8 @@ def process_option(option, output_options): # NN function with no _forward/_backward suffix don't have cimpls. # They call the _forward function and discard any buffer returns abstract = False + top_env['pure_virtual_type_method_declarations'].append( + PURE_VIRTUAL_TYPE_METHOD_DECLARATION.substitute(env)) top_env['type_method_declarations'].append( TYPE_METHOD_DECLARATION_CONCRETE.substitute(env)) body = emit_nn_body(option) @@ -822,11 +833,17 @@ def process_option(option, output_options): TYPE_METHOD_DEFINITION_CONCRETE.substitute( env, type_definition_body=body)) elif broadcast_arg is None: + top_env['pure_virtual_type_method_declarations'].append( + PURE_VIRTUAL_TYPE_METHOD_DECLARATION.substitute(env)) top_env['type_method_declarations'].append( TYPE_METHOD_DECLARATION_ABSTRACT.substitute(env)) top_env['type_method_definitions'].append( TYPE_METHOD_DEFINITION_ABSTRACT.substitute(env)) else: + top_env['pure_virtual_type_method_declarations'].append( + PURE_VIRTUAL_TYPE_METHOD_DECLARATION_BROADCAST.substitute(env)) + top_env['pure_virtual_type_method_declarations'].append( + PURE_VIRTUAL_TYPE_METHOD_DECLARATION.substitute(env)) top_env['type_method_declarations'].append( TYPE_METHOD_DECLARATION_BROADCAST.substitute(env)) top_env['type_method_declarations'].append( @@ -1031,9 +1048,12 @@ def find_formal(formal_name, formals): # Factory methods are not dispatched over `Type`. if not is_factory_method: if option['deprecated']: - top_env['type_method_declarations'].append(DEPRECATED_TYPE_METHOD_DECLARATION_CONCRETE.substitute(env)) + top_env['pure_virtual_type_method_declarations'].append( + DEPRECATED_PURE_VIRTUAL_TYPE_METHOD_DECLARATION.substitute(env)) else: - top_env['type_method_declarations'].append(TYPE_METHOD_DECLARATION_CONCRETE.substitute(env)) + top_env['pure_virtual_type_method_declarations'].append( + PURE_VIRTUAL_TYPE_METHOD_DECLARATION.substitute(env)) + top_env['type_method_declarations'].append(TYPE_METHOD_DECLARATION_CONCRETE.substitute(env)) dispatch = option['type_method_definition_dispatch'] option['native_type_method_dispatch'] = dispatch diff --git a/aten/src/ATen/gen.py b/aten/src/ATen/gen.py index f7a4deb58dc941..d23831be4732bc 100644 --- a/aten/src/ATen/gen.py +++ b/aten/src/ATen/gen.py @@ -107,7 +107,8 @@ def check_all_files_written(self): SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/SparseTypeDerived.cpp") TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h") TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h") -TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp") +TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h") +TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp") REGISTER_CPU_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.h") REGISTER_CPU_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.cpp") @@ -166,6 +167,7 @@ def check_all_files_written(self): 'cpu_type_headers': [], 'cuda_type_registrations': [], 'cuda_type_headers': [], + 'pure_virtual_type_method_declarations': [], 'type_method_declarations': [], 'type_method_definitions': [], 'type_method_inline_definitions': [], @@ -329,7 +331,7 @@ def iterate_types(): # so that the script runs quickly when we are just querying the # outputs def declare_outputs(): - files = ['Declarations.yaml', 'Type.h', 'Type.cpp', 'Tensor.h', + files = ['Declarations.yaml', 'Type.h', 'TypeDefault.cpp', 'TypeDefault.h', 'Tensor.h', 'TensorMethods.h', 'Functions.h', 'CPUCopy.cpp', 'NativeFunctions.h', 'RegisterCPU.cpp', 'RegisterCPU.h'] @@ -399,7 +401,8 @@ def generate_outputs(): backend, density, scalar_type, declarations)) file_manager.write('Type.h', TYPE_H, top_env) - file_manager.write('Type.cpp', TYPE_CPP, top_env) + file_manager.write('TypeDefault.h', TYPE_DEFAULT_H, top_env) + file_manager.write('TypeDefault.cpp', TYPE_DEFAULT_CPP, top_env) file_manager.write('RegisterCPU.h', REGISTER_CPU_H, top_env) file_manager.write('RegisterCPU.cpp', REGISTER_CPU_CPP, top_env) diff --git a/aten/src/ATen/templates/SparseTypeDerived.cpp b/aten/src/ATen/templates/SparseTypeDerived.cpp index 2ef9dbf398fa2f..8985e7d373b765 100644 --- a/aten/src/ATen/templates/SparseTypeDerived.cpp +++ b/aten/src/ATen/templates/SparseTypeDerived.cpp @@ -28,7 +28,7 @@ namespace at { ${Type}::${Type}() - : Type(${Backend}TensorId(), /*is_variable=*/false, /*is_undefined=*/false) {} + : TypeDefault(${Backend}TensorId(), /*is_variable=*/false, /*is_undefined=*/false) {} ScalarType ${Type}::scalarType() const { return ScalarType::${ScalarName}; } diff --git a/aten/src/ATen/templates/Type.h b/aten/src/ATen/templates/Type.h index 10c52ac14b6975..0296deaab8e1ce 100644 --- a/aten/src/ATen/templates/Type.h +++ b/aten/src/ATen/templates/Type.h @@ -47,6 +47,7 @@ enum class TypeID { struct AT_API Type { explicit Type(TensorTypeId type_id, bool is_variable, bool is_undefined) : type_id_(type_id), is_variable_(is_variable), is_undefined_(is_undefined) {} + virtual ~Type() {} virtual ScalarType scalarType() const = 0; virtual Backend backend() const = 0; @@ -65,8 +66,8 @@ struct AT_API Type { virtual Storage unsafeStorageFromTH(void * th_pointer, bool retain) const = 0; virtual const char * toString() const = 0; virtual size_t elementSizeInBytes() const = 0; - virtual Type & toBackend(Backend b) const; - virtual Type & toScalarType(ScalarType s) const; + virtual Type & toBackend(Backend b) const = 0; + virtual Type & toScalarType(ScalarType s) const = 0; Type & toSparse() const { return this->toBackend(at::toSparse(this->backend())); } @@ -91,23 +92,27 @@ struct AT_API Type { return backendToDeviceType(backend()); } - Tensor copy(const Tensor & src, bool non_blocking=false) const; - Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking=false) const; + virtual Tensor copy(const Tensor & src, bool non_blocking=false) const = 0; + virtual Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking=false) const = 0; virtual Tensor & s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const = 0; virtual Tensor & _s_copy_from(const Tensor & self, Tensor & dst, bool non_blocking) const = 0; - Tensor tensorFromBlob(void * data, IntList sizes, const std::function & deleter=noop_deleter) const; - Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function & deleter=noop_deleter) const; - Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const; - Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const; - Tensor scalarTensor(Scalar s) const; + virtual Tensor tensorFromBlob(void * data, IntList sizes, const std::function & deleter=noop_deleter) const = 0; + virtual Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function & deleter=noop_deleter) const = 0; + virtual Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const = 0; + virtual Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const = 0; + virtual Tensor scalarTensor(Scalar s) const = 0; - bool operator==(const Type& other) const; - bool operator!=(const Type& other) const; + bool operator==(const Type& other) const { + return this == &other; + } + bool operator!=(const Type& other) const { + return this != &other; + } // example // virtual Tensor * add(Tensor & a, Tensor & b) = 0; - ${type_method_declarations} + ${pure_virtual_type_method_declarations} protected: TensorTypeId type_id_; bool is_variable_; diff --git a/aten/src/ATen/templates/Type.cpp b/aten/src/ATen/templates/TypeDefault.cpp similarity index 72% rename from aten/src/ATen/templates/Type.cpp rename to aten/src/ATen/templates/TypeDefault.cpp index a15085c74b1a1d..26b23de7d2ad33 100644 --- a/aten/src/ATen/templates/Type.cpp +++ b/aten/src/ATen/templates/TypeDefault.cpp @@ -1,4 +1,4 @@ -#include "ATen/Type.h" +#include "ATen/TypeDefault.h" // ${generated_comment} @@ -13,13 +13,13 @@ namespace at { -Tensor & Type::copy_(Tensor & self, const Tensor & src, bool non_blocking) const { +Tensor & TypeDefault::copy_(Tensor & self, const Tensor & src, bool non_blocking) const { Tensor b_src; std::tie(b_src) = expand_inplace(self, src, "copy"); return s_copy_(self, b_src, non_blocking); } -Tensor Type::copy(const Tensor & src, bool non_blocking) const { +Tensor TypeDefault::copy(const Tensor & src, bool non_blocking) const { // TODO(psag): have a DeviceGuard here AT_CHECK(src.defined(), "attempt to copy an undefined tensor"); if (is_sparse()) { @@ -37,10 +37,10 @@ Tensor Type::copy(const Tensor & src, bool non_blocking) const { } } -Type & Type::toBackend(Backend b) const { +Type & TypeDefault::toBackend(Backend b) const { return at::globalContext().getNonVariableType(b,scalarType()); } -Type & Type::toScalarType(ScalarType s) const { +Type & TypeDefault::toScalarType(ScalarType s) const { return at::globalContext().getNonVariableType(backend(),s); } static std::vector defaultStrides(IntList sizes) { @@ -64,31 +64,24 @@ static int64_t computeStorageSize(IntList sizes, IntList strides) { } return size; } -Tensor Type::tensorFromBlob(void * data, IntList sizes, const std::function & deleter) const { +Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, const std::function & deleter) const { return tensorFromBlob(data, sizes, defaultStrides(sizes), deleter); } -Tensor Type::tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function & deleter) const { +Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function & deleter) const { auto storage = storageFromBlob(data, computeStorageSize(sizes, strides), deleter); return tensor(storage, 0, sizes, strides); } -Tensor Type::tensorWithAllocator(IntList sizes, Allocator* allocator) const { +Tensor TypeDefault::tensorWithAllocator(IntList sizes, Allocator* allocator) const { return tensorWithAllocator(sizes, defaultStrides(sizes), std::move(allocator)); } -Tensor Type::tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const { +Tensor TypeDefault::tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const { auto storage = storageWithAllocator(computeStorageSize(sizes, strides), std::move(allocator)); return tensor(storage, 0, sizes, strides); } -Tensor Type::scalarTensor(Scalar s) const { +Tensor TypeDefault::scalarTensor(Scalar s) const { return tensor({}).fill_(s); } -bool Type::operator==(const Type& other) const { - return this == &other; -} -bool Type::operator!=(const Type& other) const { - return this != &other; -} - ${type_method_definitions} } diff --git a/aten/src/ATen/templates/TypeDefault.h b/aten/src/ATen/templates/TypeDefault.h new file mode 100644 index 00000000000000..69731c717478e1 --- /dev/null +++ b/aten/src/ATen/templates/TypeDefault.h @@ -0,0 +1,36 @@ +#pragma once + +// ${generated_comment} + +#include "ATen/Type.h" + +namespace at { + +struct AT_API TypeDefault : public Type { + explicit TypeDefault(TensorTypeId type_id, bool is_variable, bool is_undefined) + : Type(type_id, is_variable, is_undefined) {} + + // Make sure overload resolution considers the nullary virtual method. + // (A single argument overload is generated in the list.) + bool is_cuda() const override = 0; + bool is_sparse() const override = 0; + bool is_distributed() const override = 0; + + Type & toBackend(Backend b) const override; + Type & toScalarType(ScalarType s) const override; + + Tensor copy(const Tensor & src, bool non_blocking=false) const override; + Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking=false) const override; + + Tensor tensorFromBlob(void * data, IntList sizes, const std::function & deleter=noop_deleter) const override; + Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function & deleter=noop_deleter) const override; + Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const override; + Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const override; + Tensor scalarTensor(Scalar s) const override; + + // example + // virtual Tensor * add(Tensor & a, Tensor & b) = 0; + ${type_method_declarations} +}; + +} // namespace at diff --git a/aten/src/ATen/templates/TypeDerived.cpp b/aten/src/ATen/templates/TypeDerived.cpp index 4335a8f2209a20..c24f7faf963a4a 100644 --- a/aten/src/ATen/templates/TypeDerived.cpp +++ b/aten/src/ATen/templates/TypeDerived.cpp @@ -39,7 +39,7 @@ static int getPointerDevice(void* ptr) { #endif ${Type}::${Type}() - : Type(${Backend}TensorId(), /*is_variable=*/false, /*is_undefined=*/false) {} + : TypeDefault(${Backend}TensorId(), /*is_variable=*/false, /*is_undefined=*/false) {} ScalarType ${Type}::scalarType() const { return ScalarType::${ScalarName}; } diff --git a/aten/src/ATen/templates/TypeDerived.h b/aten/src/ATen/templates/TypeDerived.h index ec08e1a336daf6..663a913da172cc 100644 --- a/aten/src/ATen/templates/TypeDerived.h +++ b/aten/src/ATen/templates/TypeDerived.h @@ -2,7 +2,7 @@ // ${generated_comment} -#include "ATen/Type.h" +#include "ATen/TypeDefault.h" #include "ATen/Context.h" #include "ATen/TensorMethods.h" #include "ATen/CheckGenerator.h" @@ -15,7 +15,7 @@ namespace at { -struct ${Type} final : public Type { +struct ${Type} final : public TypeDefault { explicit ${Type}(); virtual ScalarType scalarType() const override; virtual Backend backend() const override; diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index a1b9f8203cdd85..9da62c3f0054cd 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -108,7 +108,7 @@ """) CALL_VIA_TYPE = CodeTemplate("""\ -Type::${method_prefix_derived}${api_name}(${type_method_args})""") +TypeDefault::${method_prefix_derived}${api_name}(${type_method_args})""") CALL_VIA_DERIVED = CodeTemplate("""\ baseType->${method_prefix_derived}${base_name}(${unpacked_args})""") diff --git a/tools/autograd/templates/VariableType.cpp b/tools/autograd/templates/VariableType.cpp index 73c102d619707b..275a29bb834245 100644 --- a/tools/autograd/templates/VariableType.cpp +++ b/tools/autograd/templates/VariableType.cpp @@ -43,7 +43,7 @@ using namespace torch::autograd::generated; namespace torch { namespace autograd { VariableType::VariableType(Context* context, Type* baseType) - : Type(baseType->type_id(), /*is_variable=*/true, /*is_undefined=*/false) + : TypeDefault(baseType->type_id(), /*is_variable=*/true, /*is_undefined=*/false) , baseType(baseType) , id_(context->freshTypeID()) { str = std::string("Variable[") + baseType->toString() + "]"; diff --git a/tools/autograd/templates/VariableType.h b/tools/autograd/templates/VariableType.h index 85abf290ef6fd8..7b04b78e53ee70 100644 --- a/tools/autograd/templates/VariableType.h +++ b/tools/autograd/templates/VariableType.h @@ -4,6 +4,8 @@ #include +#include + #include #include // for size_t @@ -30,7 +32,7 @@ using at::optional; void register_variable_type_for(at::Type* baseType); -struct TORCH_API VariableType final : public at::Type { +struct TORCH_API VariableType final : public at::TypeDefault { VariableType(Context* context, at::Type* baseType); virtual at::ScalarType scalarType() const override; virtual at::Backend backend() const override; From 0a8c8c1dbead2f845e524ae32c19167d80363148 Mon Sep 17 00:00:00 2001 From: Edward Yang Date: Sun, 2 Sep 2018 15:24:01 -0700 Subject: [PATCH 2/3] Rename real to scalar_t. (#11163) Summary: This is necessary to allow us to use the complex header which defines real (and is very sad if real is macro'ed). We should also fix accreal, ureal, Real and REAL, but only 'real' is the real blocker. ``` codemod -d aten/src/TH --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t codemod -d aten/src/THC --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t codemod -d aten/src/THNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t codemod -d aten/src/THCUNN --extensions c,cc,cpp,cu,cuh,h,TARGETS,py,hpp '\breal\b' scalar_t ``` Signed-off-by: Edward Z. Yang Pull Request resolved: https://github.com/pytorch/pytorch/pull/11163 Reviewed By: SsnL Differential Revision: D9619906 Pulled By: ezyang fbshipit-source-id: 922cb3a763c0bffecbd81200c1cefc6b8ea70942 --- aten/src/TH/THGenerateByteType.h | 6 +- aten/src/TH/THGenerateCharType.h | 6 +- aten/src/TH/THGenerateDoubleType.h | 6 +- aten/src/TH/THGenerateFloatType.h | 6 +- aten/src/TH/THGenerateHalfType.h | 4 +- aten/src/TH/THGenerateIntType.h | 6 +- aten/src/TH/THGenerateLongType.h | 6 +- aten/src/TH/THGenerateShortType.h | 6 +- aten/src/TH/generic/THBlas.cpp | 60 +-- aten/src/TH/generic/THBlas.h | 16 +- aten/src/TH/generic/THLapack.cpp | 32 +- aten/src/TH/generic/THLapack.h | 32 +- aten/src/TH/generic/THStorage.cpp | 38 +- aten/src/TH/generic/THStorage.h | 16 +- aten/src/TH/generic/THStorageCopy.cpp | 10 +- aten/src/TH/generic/THStorageCopy.h | 2 +- aten/src/TH/generic/THTensor.cpp | 20 +- aten/src/TH/generic/THTensor.h | 20 +- aten/src/TH/generic/THTensorApply.hpp | 28 +- aten/src/TH/generic/THTensorConv.cpp | 464 +++++++++--------- aten/src/TH/generic/THTensorConv.h | 104 ++-- aten/src/TH/generic/THTensorCopy.cpp | 48 +- aten/src/TH/generic/THTensorEvenMoreMath.cpp | 184 +++---- aten/src/TH/generic/THTensorFastGetSet.hpp | 22 +- aten/src/TH/generic/THTensorLapack.cpp | 136 ++--- aten/src/TH/generic/THTensorLapack.h | 2 +- aten/src/TH/generic/THTensorMath.cpp | 274 +++++------ aten/src/TH/generic/THTensorMath.h | 114 ++--- aten/src/TH/generic/THTensorMoreMath.cpp | 408 +++++++-------- aten/src/TH/generic/THTensorRandom.cpp | 66 +-- aten/src/TH/generic/THVector.h | 92 ++-- aten/src/TH/generic/THVectorDefault.cpp | 52 +- aten/src/TH/generic/THVectorDispatch.cpp | 48 +- aten/src/THC/THCGenerateByteType.h | 4 +- aten/src/THC/THCGenerateCharType.h | 4 +- aten/src/THC/THCGenerateDoubleType.h | 4 +- aten/src/THC/THCGenerateFloatType.h | 4 +- aten/src/THC/THCGenerateHalfType.h | 4 +- aten/src/THC/THCGenerateIntType.h | 4 +- aten/src/THC/THCGenerateLongType.h | 4 +- aten/src/THC/THCGenerateShortType.h | 4 +- aten/src/THC/THCNumerics.cuh | 8 +- aten/src/THC/THCTensorIndex.cu | 18 +- aten/src/THC/THCTensorMathPointwise.cuh | 24 +- aten/src/THC/THCTensorRandom.cu | 8 +- aten/src/THC/generic/THCStorage.cpp | 32 +- aten/src/THC/generic/THCStorage.cu | 4 +- aten/src/THC/generic/THCStorage.h | 16 +- aten/src/THC/generic/THCStorageCopy.cpp | 4 +- aten/src/THC/generic/THCStorageCopy.cu | 4 +- aten/src/THC/generic/THCStorageCopy.h | 2 +- aten/src/THC/generic/THCTensor.cpp | 18 +- aten/src/THC/generic/THCTensor.h | 20 +- aten/src/THC/generic/THCTensorCopy.cpp | 16 +- aten/src/THC/generic/THCTensorCopy.cu | 24 +- aten/src/THC/generic/THCTensorIndex.cu | 156 +++--- aten/src/THC/generic/THCTensorIndex.h | 2 +- aten/src/THC/generic/THCTensorMasked.cu | 16 +- aten/src/THC/generic/THCTensorMasked.h | 4 +- aten/src/THC/generic/THCTensorMath.cu | 66 +-- aten/src/THC/generic/THCTensorMath.h | 6 +- aten/src/THC/generic/THCTensorMathBlas.cu | 60 +-- aten/src/THC/generic/THCTensorMathBlas.h | 10 +- aten/src/THC/generic/THCTensorMathCompare.cu | 84 ++-- aten/src/THC/generic/THCTensorMathCompare.h | 24 +- aten/src/THC/generic/THCTensorMathCompareT.cu | 60 +-- aten/src/THC/generic/THCTensorMathMagma.cu | 116 ++--- aten/src/THC/generic/THCTensorMathPairwise.cu | 90 ++-- aten/src/THC/generic/THCTensorMathPairwise.h | 26 +- .../src/THC/generic/THCTensorMathPointwise.cu | 214 ++++---- aten/src/THC/generic/THCTensorMathPointwise.h | 20 +- aten/src/THC/generic/THCTensorMathReduce.cu | 94 ++-- aten/src/THC/generic/THCTensorMathReduce.h | 14 +- aten/src/THC/generic/THCTensorMathScan.cu | 18 +- aten/src/THC/generic/THCTensorMode.cu | 22 +- aten/src/THC/generic/THCTensorRandom.cu | 64 +-- .../src/THC/generic/THCTensorScatterGather.cu | 106 ++-- aten/src/THC/generic/THCTensorScatterGather.h | 2 +- aten/src/THC/generic/THCTensorSort.cu | 22 +- aten/src/THC/generic/THCTensorTopK.cu | 10 +- aten/src/THCUNN/VolumetricAveragePooling.cu | 2 +- aten/src/THCUNN/generic/Abs.cu | 4 +- aten/src/THCUNN/generic/AbsCriterion.cu | 26 +- aten/src/THCUNN/generic/BCECriterion.cu | 34 +- aten/src/THCUNN/generic/BatchNormalization.cu | 18 +- aten/src/THCUNN/generic/ClassNLLCriterion.cu | 36 +- aten/src/THCUNN/generic/Col2Im.cu | 2 +- aten/src/THCUNN/generic/DistKLDivCriterion.cu | 26 +- aten/src/THCUNN/generic/ELU.cu | 18 +- aten/src/THCUNN/generic/FeatureLPPooling.cu | 26 +- aten/src/THCUNN/generic/GatedLinearUnit.cu | 4 +- aten/src/THCUNN/generic/HardTanh.cu | 22 +- aten/src/THCUNN/generic/IndexLinear.cu | 46 +- aten/src/THCUNN/generic/L1Cost.cu | 12 +- aten/src/THCUNN/generic/LeakyReLU.cu | 12 +- aten/src/THCUNN/generic/LogSigmoid.cu | 4 +- aten/src/THCUNN/generic/LookupTable.cu | 10 +- aten/src/THCUNN/generic/LookupTableBag.cu | 6 +- aten/src/THCUNN/generic/MSECriterion.cu | 34 +- aten/src/THCUNN/generic/MarginCriterion.cu | 20 +- .../generic/MultiLabelMarginCriterion.cu | 12 +- .../THCUNN/generic/MultiMarginCriterion.cu | 26 +- aten/src/THCUNN/generic/PReLU.cu | 20 +- aten/src/THCUNN/generic/RReLU.cu | 18 +- aten/src/THCUNN/generic/Sigmoid.cu | 2 +- aten/src/THCUNN/generic/SmoothL1Criterion.cu | 26 +- .../src/THCUNN/generic/SoftMarginCriterion.cu | 24 +- aten/src/THCUNN/generic/SoftPlus.cu | 12 +- aten/src/THCUNN/generic/SoftShrink.cu | 8 +- aten/src/THCUNN/generic/SparseLinear.cu | 4 +- .../generic/SpatialAdaptiveAveragePooling.cu | 8 +- .../generic/SpatialAdaptiveMaxPooling.cu | 8 +- .../THCUNN/generic/SpatialAveragePooling.cu | 12 +- .../generic/SpatialClassNLLCriterion.cu | 34 +- .../THCUNN/generic/SpatialConvolutionLocal.cu | 14 +- .../THCUNN/generic/SpatialConvolutionMM.cu | 26 +- aten/src/THCUNN/generic/SpatialCrossMapLRN.cu | 18 +- .../generic/SpatialDepthwiseConvolution.cu | 48 +- .../generic/SpatialDilatedConvolution.cu | 26 +- .../generic/SpatialDilatedMaxPooling.cu | 8 +- .../generic/SpatialFractionalMaxPooling.cu | 30 +- .../generic/SpatialFullDilatedConvolution.cu | 26 +- .../generic/SpatialReflectionPadding.cu | 24 +- .../generic/SpatialReplicationPadding.cu | 24 +- aten/src/THCUNN/generic/SpatialSubSampling.cu | 44 +- .../generic/SpatialUpSamplingBilinear.cu | 12 +- .../generic/SpatialUpSamplingNearest.cu | 12 +- aten/src/THCUNN/generic/Sqrt.cu | 6 +- aten/src/THCUNN/generic/Square.cu | 4 +- aten/src/THCUNN/generic/Tanh.cu | 2 +- .../src/THCUNN/generic/TemporalConvolution.cu | 14 +- aten/src/THCUNN/generic/TemporalMaxPooling.cu | 8 +- .../generic/TemporalReflectionPadding.cu | 24 +- .../generic/TemporalReplicationPadding.cu | 24 +- .../THCUNN/generic/TemporalRowConvolution.cu | 26 +- .../generic/TemporalUpSamplingLinear.cu | 12 +- .../generic/TemporalUpSamplingNearest.cu | 12 +- aten/src/THCUNN/generic/Threshold.cu | 24 +- .../VolumetricAdaptiveAveragePooling.cu | 8 +- .../generic/VolumetricAdaptiveMaxPooling.cu | 8 +- .../generic/VolumetricAveragePooling.cu | 24 +- .../THCUNN/generic/VolumetricConvolution.cu | 26 +- .../generic/VolumetricDilatedConvolution.cu | 26 +- .../generic/VolumetricDilatedMaxPooling.cu | 12 +- .../generic/VolumetricFractionalMaxPooling.cu | 30 +- .../VolumetricFullDilatedConvolution.cu | 26 +- .../THCUNN/generic/VolumetricMaxUnpooling.cu | 12 +- .../generic/VolumetricReplicationPadding.cu | 26 +- .../generic/VolumetricUpSamplingNearest.cu | 12 +- .../generic/VolumetricUpSamplingTrilinear.cu | 12 +- aten/src/THNN/generic/Abs.c | 4 +- aten/src/THNN/generic/AbsCriterion.c | 14 +- aten/src/THNN/generic/BCECriterion.c | 40 +- aten/src/THNN/generic/BatchNormalization.c | 46 +- aten/src/THNN/generic/ClassNLLCriterion.c | 22 +- aten/src/THNN/generic/Col2Im.c | 14 +- aten/src/THNN/generic/DistKLDivCriterion.c | 12 +- aten/src/THNN/generic/ELU.c | 18 +- aten/src/THNN/generic/FeatureLPPooling.c | 22 +- aten/src/THNN/generic/GatedLinearUnit.c | 4 +- aten/src/THNN/generic/HardShrink.c | 8 +- aten/src/THNN/generic/HardTanh.c | 26 +- aten/src/THNN/generic/Im2Col.c | 4 +- aten/src/THNN/generic/IndexLinear.c | 122 ++--- aten/src/THNN/generic/L1Cost.c | 4 +- aten/src/THNN/generic/LeakyReLU.c | 14 +- aten/src/THNN/generic/Linear.c | 2 +- aten/src/THNN/generic/LogSigmoid.c | 14 +- aten/src/THNN/generic/LookupTable.c | 26 +- aten/src/THNN/generic/MSECriterion.c | 16 +- aten/src/THNN/generic/MarginCriterion.c | 14 +- .../THNN/generic/MultiLabelMarginCriterion.c | 38 +- aten/src/THNN/generic/MultiMarginCriterion.c | 50 +- aten/src/THNN/generic/PReLU.c | 56 +-- aten/src/THNN/generic/RReLU.c | 30 +- aten/src/THNN/generic/Sigmoid.c | 4 +- aten/src/THNN/generic/SmoothL1Criterion.c | 22 +- aten/src/THNN/generic/SoftMarginCriterion.c | 18 +- aten/src/THNN/generic/SoftPlus.c | 14 +- aten/src/THNN/generic/SoftShrink.c | 8 +- aten/src/THNN/generic/SparseLinear.c | 42 +- .../generic/SpatialAdaptiveAveragePooling.c | 42 +- .../THNN/generic/SpatialAdaptiveMaxPooling.c | 40 +- aten/src/THNN/generic/SpatialAveragePooling.c | 30 +- .../THNN/generic/SpatialClassNLLCriterion.c | 30 +- .../THNN/generic/SpatialConvolutionLocal.c | 4 +- aten/src/THNN/generic/SpatialConvolutionMM.c | 8 +- aten/src/THNN/generic/SpatialConvolutionMap.c | 34 +- .../THNN/generic/SpatialDilatedConvolution.c | 44 +- .../THNN/generic/SpatialDilatedMaxPooling.c | 40 +- .../generic/SpatialFractionalMaxPooling.c | 48 +- .../THNN/generic/SpatialFullConvolutionMap.c | 32 +- .../generic/SpatialFullDilatedConvolution.c | 44 +- aten/src/THNN/generic/SpatialMaxUnpooling.c | 32 +- .../THNN/generic/SpatialReflectionPadding.c | 32 +- .../THNN/generic/SpatialReplicationPadding.c | 32 +- aten/src/THNN/generic/SpatialSubSampling.c | 70 +-- .../THNN/generic/SpatialUpSamplingBilinear.c | 40 +- .../THNN/generic/SpatialUpSamplingNearest.c | 24 +- aten/src/THNN/generic/Sqrt.c | 8 +- aten/src/THNN/generic/Square.c | 14 +- aten/src/THNN/generic/Tanh.c | 12 +- aten/src/THNN/generic/TemporalConvolution.c | 2 +- aten/src/THNN/generic/TemporalMaxPooling.c | 56 +-- .../THNN/generic/TemporalReflectionPadding.c | 34 +- .../THNN/generic/TemporalReplicationPadding.c | 32 +- .../src/THNN/generic/TemporalRowConvolution.c | 32 +- aten/src/THNN/generic/TemporalSubSampling.c | 2 +- .../THNN/generic/TemporalUpSamplingLinear.c | 32 +- .../THNN/generic/TemporalUpSamplingNearest.c | 24 +- aten/src/THNN/generic/Threshold.c | 14 +- .../VolumetricAdaptiveAveragePooling.c | 42 +- .../generic/VolumetricAdaptiveMaxPooling.c | 40 +- .../THNN/generic/VolumetricAveragePooling.c | 40 +- aten/src/THNN/generic/VolumetricConvolution.c | 8 +- .../THNN/generic/VolumetricConvolutionMM.c | 24 +- .../generic/VolumetricDilatedConvolution.c | 44 +- .../generic/VolumetricDilatedMaxPooling.c | 40 +- .../generic/VolumetricFractionalMaxPooling.c | 48 +- .../VolumetricFullDilatedConvolution.c | 54 +- .../src/THNN/generic/VolumetricMaxUnpooling.c | 36 +- .../generic/VolumetricReplicationPadding.c | 32 +- .../generic/VolumetricUpSamplingNearest.c | 24 +- .../generic/VolumetricUpSamplingTrilinear.c | 48 +- aten/src/THNN/generic/unfold.c | 42 +- tools/amd_build/pyHIPIFY/hipify-python.py | 10 +- torch/csrc/generic/Storage.cpp | 12 +- torch/csrc/generic/StorageMethods.cpp | 10 +- torch/csrc/generic/StorageSharing.cpp | 16 +- torch/csrc/generic/serialization.cpp | 44 +- torch/csrc/generic/utils.h | 2 +- 231 files changed, 3684 insertions(+), 3684 deletions(-) diff --git a/aten/src/TH/THGenerateByteType.h b/aten/src/TH/THGenerateByteType.h index 0ec234de4d367d..3b038420806541 100644 --- a/aten/src/TH/THGenerateByteType.h +++ b/aten/src/TH/THGenerateByteType.h @@ -2,17 +2,17 @@ #error "You must define TH_GENERIC_FILE before including THGenerateByteType.h" #endif -#define real uint8_t +#define scalar_t uint8_t #define ureal uint8_t #define accreal int64_t #define Real Byte #define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) +#define TH_CONVERT_ACCREAL_TO_REAL(_val) (scalar_t)(_val) #define THInf UCHAR_MAX #define TH_REAL_IS_BYTE #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE -#undef real +#undef scalar_t #undef ureal #undef accreal #undef Real diff --git a/aten/src/TH/THGenerateCharType.h b/aten/src/TH/THGenerateCharType.h index 9c172f109ff739..5a71baa61f0eeb 100644 --- a/aten/src/TH/THGenerateCharType.h +++ b/aten/src/TH/THGenerateCharType.h @@ -2,17 +2,17 @@ #error "You must define TH_GENERIC_FILE before including THGenerateCharType.h" #endif -#define real int8_t +#define scalar_t int8_t #define ureal uint8_t #define accreal int64_t #define Real Char #define THInf SCHAR_MAX #define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) +#define TH_CONVERT_ACCREAL_TO_REAL(_val) (scalar_t)(_val) #define TH_REAL_IS_CHAR #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE -#undef real +#undef scalar_t #undef ureal #undef accreal #undef Real diff --git a/aten/src/TH/THGenerateDoubleType.h b/aten/src/TH/THGenerateDoubleType.h index fffee606dab2a7..fb67a52b3ccedc 100644 --- a/aten/src/TH/THGenerateDoubleType.h +++ b/aten/src/TH/THGenerateDoubleType.h @@ -2,17 +2,17 @@ #error "You must define TH_GENERIC_FILE before including THGenerateDoubleType.h" #endif -#define real double +#define scalar_t double #define accreal double #define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) +#define TH_CONVERT_ACCREAL_TO_REAL(_val) (scalar_t)(_val) #define Real Double #define THInf DBL_MAX #define TH_REAL_IS_DOUBLE #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE #undef accreal -#undef real +#undef scalar_t #undef Real #undef THInf #undef TH_REAL_IS_DOUBLE diff --git a/aten/src/TH/THGenerateFloatType.h b/aten/src/TH/THGenerateFloatType.h index a31b50c55cae80..c4b97b52362cbf 100644 --- a/aten/src/TH/THGenerateFloatType.h +++ b/aten/src/TH/THGenerateFloatType.h @@ -2,17 +2,17 @@ #error "You must define TH_GENERIC_FILE before including THGenerateFloatType.h" #endif -#define real float +#define scalar_t float #define accreal double #define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) +#define TH_CONVERT_ACCREAL_TO_REAL(_val) (scalar_t)(_val) #define Real Float #define THInf FLT_MAX #define TH_REAL_IS_FLOAT #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE #undef accreal -#undef real +#undef scalar_t #undef Real #undef THInf #undef TH_REAL_IS_FLOAT diff --git a/aten/src/TH/THGenerateHalfType.h b/aten/src/TH/THGenerateHalfType.h index 47ff1e8d7a42bb..09d4c878d0f11e 100644 --- a/aten/src/TH/THGenerateHalfType.h +++ b/aten/src/TH/THGenerateHalfType.h @@ -3,7 +3,7 @@ #endif #include "THHalf.h" -#define real THHalf +#define scalar_t THHalf #define accreal float #define TH_CONVERT_REAL_TO_ACCREAL(_val) TH_half2float(_val) #define TH_CONVERT_ACCREAL_TO_REAL(_val) TH_float2half(_val) @@ -12,7 +12,7 @@ #define TH_REAL_IS_HALF #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE -#undef real +#undef scalar_t #undef accreal #undef Real #undef THInf diff --git a/aten/src/TH/THGenerateIntType.h b/aten/src/TH/THGenerateIntType.h index 5135bc5b6d8207..2d31760ad9157c 100644 --- a/aten/src/TH/THGenerateIntType.h +++ b/aten/src/TH/THGenerateIntType.h @@ -2,17 +2,17 @@ #error "You must define TH_GENERIC_FILE before including THGenerateIntType.h" #endif -#define real int32_t +#define scalar_t int32_t #define ureal uint32_t #define accreal int64_t #define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) +#define TH_CONVERT_ACCREAL_TO_REAL(_val) (scalar_t)(_val) #define Real Int #define THInf INT_MAX #define TH_REAL_IS_INT #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE -#undef real +#undef scalar_t #undef ureal #undef accreal #undef Real diff --git a/aten/src/TH/THGenerateLongType.h b/aten/src/TH/THGenerateLongType.h index d2b9af0776581c..636cf94584005b 100644 --- a/aten/src/TH/THGenerateLongType.h +++ b/aten/src/TH/THGenerateLongType.h @@ -2,17 +2,17 @@ #error "You must define TH_GENERIC_FILE before including THGenerateLongType.h" #endif -#define real int64_t +#define scalar_t int64_t #define ureal uint64_t #define accreal int64_t #define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) +#define TH_CONVERT_ACCREAL_TO_REAL(_val) (scalar_t)(_val) #define Real Long #define THInf LONG_MAX #define TH_REAL_IS_LONG #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE -#undef real +#undef scalar_t #undef ureal #undef accreal #undef Real diff --git a/aten/src/TH/THGenerateShortType.h b/aten/src/TH/THGenerateShortType.h index 5b83c476335f58..afda4dce8e1651 100644 --- a/aten/src/TH/THGenerateShortType.h +++ b/aten/src/TH/THGenerateShortType.h @@ -2,17 +2,17 @@ #error "You must define TH_GENERIC_FILE before including THGenerateShortType.h" #endif -#define real int16_t +#define scalar_t int16_t #define ureal uint16_t #define accreal int64_t #define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) +#define TH_CONVERT_ACCREAL_TO_REAL(_val) (scalar_t)(_val) #define Real Short #define THInf SHRT_MAX #define TH_REAL_IS_SHORT #line 1 TH_GENERIC_FILE #include TH_GENERIC_FILE -#undef real +#undef scalar_t #undef ureal #undef accreal #undef Real diff --git a/aten/src/TH/generic/THBlas.cpp b/aten/src/TH/generic/THBlas.cpp index d06ae6a9d89aff..352ccc644e75b5 100644 --- a/aten/src/TH/generic/THBlas.cpp +++ b/aten/src/TH/generic/THBlas.cpp @@ -39,7 +39,7 @@ TH_EXTERNC void sgemm_(char *transa, char *transb, int *m, int *n, int *k, float -void THBlas_(swap)(int64_t n, real *x, int64_t incx, real *y, int64_t incy) +void THBlas_(swap)(int64_t n, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) { if(n == 1) { @@ -66,14 +66,14 @@ void THBlas_(swap)(int64_t n, real *x, int64_t incx, real *y, int64_t incy) int64_t i; for(i = 0; i < n; i++) { - real z = x[i*incx]; + scalar_t z = x[i*incx]; x[i*incx] = y[i*incy]; y[i*incy] = z; } } } -void THBlas_(scal)(int64_t n, real a, real *x, int64_t incx) +void THBlas_(scal)(int64_t n, scalar_t a, scalar_t *x, int64_t incx) { if(n == 1) incx = 1; @@ -104,7 +104,7 @@ void THBlas_(scal)(int64_t n, real a, real *x, int64_t incx) } } -void THBlas_(copy)(int64_t n, real *x, int64_t incx, real *y, int64_t incy) +void THBlas_(copy)(int64_t n, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) { if(n == 1) { @@ -134,7 +134,7 @@ void THBlas_(copy)(int64_t n, real *x, int64_t incx, real *y, int64_t incy) } } -void THBlas_(axpy)(int64_t n, real a, real *x, int64_t incx, real *y, int64_t incy) +void THBlas_(axpy)(int64_t n, scalar_t a, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) { if(n == 1) { @@ -164,7 +164,7 @@ void THBlas_(axpy)(int64_t n, real a, real *x, int64_t incx, real *y, int64_t in } } -real THBlas_(dot)(int64_t n, real *x, int64_t incx, real *y, int64_t incy) +scalar_t THBlas_(dot)(int64_t n, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) { if(n == 1) { @@ -180,22 +180,22 @@ real THBlas_(dot)(int64_t n, real *x, int64_t incx, real *y, int64_t incy) int i_incy = (int)incy; #if defined(TH_REAL_IS_DOUBLE) - return (real) ddot_(&i_n, x, &i_incx, y, &i_incy); + return (scalar_t) ddot_(&i_n, x, &i_incx, y, &i_incy); #else - return (real) sdot_(&i_n, x, &i_incx, y, &i_incy); + return (scalar_t) sdot_(&i_n, x, &i_incx, y, &i_incy); #endif } #endif { int64_t i; - real sum = 0; + scalar_t sum = 0; for(i = 0; i < n; i++) sum += x[i*incx]*y[i*incy]; return sum; } } -void THBlas_(gemv)(char trans, int64_t m, int64_t n, real alpha, real *a, int64_t lda, real *x, int64_t incx, real beta, real *y, int64_t incy) +void THBlas_(gemv)(char trans, int64_t m, int64_t n, scalar_t alpha, scalar_t *a, int64_t lda, scalar_t *x, int64_t incx, scalar_t beta, scalar_t *y, int64_t incy) { if(n == 1) lda = m; @@ -228,8 +228,8 @@ void THBlas_(gemv)(char trans, int64_t m, int64_t n, real alpha, real *a, int64_ { for(i = 0; i < n; i++) { - real sum = 0; - real *row_ = a+lda*i; + scalar_t sum = 0; + scalar_t *row_ = a+lda*i; for(j = 0; j < m; j++) sum += x[j*incx]*row_[j]; if (beta == 0) @@ -245,8 +245,8 @@ void THBlas_(gemv)(char trans, int64_t m, int64_t n, real alpha, real *a, int64_ for(j = 0; j < n; j++) { - real *column_ = a+lda*j; - real z = alpha*x[j*incx]; + scalar_t *column_ = a+lda*j; + scalar_t z = alpha*x[j*incx]; for(i = 0; i < m; i++) y[i*incy] += z*column_[i]; } @@ -254,7 +254,7 @@ void THBlas_(gemv)(char trans, int64_t m, int64_t n, real alpha, real *a, int64_ } } -void THBlas_(ger)(int64_t m, int64_t n, real alpha, real *x, int64_t incx, real *y, int64_t incy, real *a, int64_t lda) +void THBlas_(ger)(int64_t m, int64_t n, scalar_t alpha, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy, scalar_t *a, int64_t lda) { if(n == 1) lda = m; @@ -284,15 +284,15 @@ void THBlas_(ger)(int64_t m, int64_t n, real alpha, real *x, int64_t incx, real int64_t i, j; for(j = 0; j < n; j++) { - real *column_ = a+j*lda; - real z = alpha*y[j*incy]; + scalar_t *column_ = a+j*lda; + scalar_t z = alpha*y[j*incy]; for(i = 0; i < m; i++) column_[i] += z*x[i*incx] ; } } } -void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, real alpha, real *a, int64_t lda, real *b, int64_t ldb, real beta, real *c, int64_t ldc) +void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, scalar_t alpha, scalar_t *a, int64_t lda, scalar_t *b, int64_t ldb, scalar_t beta, scalar_t *c, int64_t ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); @@ -351,13 +351,13 @@ void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, re int64_t i, j, l; if(!transa_ && !transb_) { - real *a_ = a; + scalar_t *a_ = a; for(i = 0; i < m; i++) { - real *b_ = b; + scalar_t *b_ = b; for(j = 0; j < n; j++) { - real sum = 0; + scalar_t sum = 0; for(l = 0; l < k; l++) sum += a_[l*lda]*b_[l]; b_ += ldb; @@ -371,13 +371,13 @@ void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, re } else if(transa_ && !transb_) { - real *a_ = a; + scalar_t *a_ = a; for(i = 0; i < m; i++) { - real *b_ = b; + scalar_t *b_ = b; for(j = 0; j < n; j++) { - real sum = 0; + scalar_t sum = 0; for(l = 0; l < k; l++) sum += a_[l]*b_[l]; b_ += ldb; @@ -391,13 +391,13 @@ void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, re } else if(!transa_ && transb_) { - real *a_ = a; + scalar_t *a_ = a; for(i = 0; i < m; i++) { - real *b_ = b; + scalar_t *b_ = b; for(j = 0; j < n; j++) { - real sum = 0; + scalar_t sum = 0; for(l = 0; l < k; l++) sum += a_[l*lda]*b_[l*ldb]; b_++; @@ -411,13 +411,13 @@ void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, re } else { - real *a_ = a; + scalar_t *a_ = a; for(i = 0; i < m; i++) { - real *b_ = b; + scalar_t *b_ = b; for(j = 0; j < n; j++) { - real sum = 0; + scalar_t sum = 0; for(l = 0; l < k; l++) sum += a_[l]*b_[l*ldb]; b_++; diff --git a/aten/src/TH/generic/THBlas.h b/aten/src/TH/generic/THBlas.h index c36e796a0cb5e8..b1f818011a4a34 100644 --- a/aten/src/TH/generic/THBlas.h +++ b/aten/src/TH/generic/THBlas.h @@ -3,17 +3,17 @@ #else /* Level 1 */ -TH_API void THBlas_(swap)(int64_t n, real *x, int64_t incx, real *y, int64_t incy); -TH_API void THBlas_(scal)(int64_t n, real a, real *x, int64_t incx); -TH_API void THBlas_(copy)(int64_t n, real *x, int64_t incx, real *y, int64_t incy); -TH_API void THBlas_(axpy)(int64_t n, real a, real *x, int64_t incx, real *y, int64_t incy); -TH_API real THBlas_(dot)(int64_t n, real *x, int64_t incx, real *y, int64_t incy); +TH_API void THBlas_(swap)(int64_t n, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy); +TH_API void THBlas_(scal)(int64_t n, scalar_t a, scalar_t *x, int64_t incx); +TH_API void THBlas_(copy)(int64_t n, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy); +TH_API void THBlas_(axpy)(int64_t n, scalar_t a, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy); +TH_API scalar_t THBlas_(dot)(int64_t n, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy); /* Level 2 */ -TH_API void THBlas_(gemv)(char trans, int64_t m, int64_t n, real alpha, real *a, int64_t lda, real *x, int64_t incx, real beta, real *y, int64_t incy); -TH_API void THBlas_(ger)(int64_t m, int64_t n, real alpha, real *x, int64_t incx, real *y, int64_t incy, real *a, int64_t lda); +TH_API void THBlas_(gemv)(char trans, int64_t m, int64_t n, scalar_t alpha, scalar_t *a, int64_t lda, scalar_t *x, int64_t incx, scalar_t beta, scalar_t *y, int64_t incy); +TH_API void THBlas_(ger)(int64_t m, int64_t n, scalar_t alpha, scalar_t *x, int64_t incx, scalar_t *y, int64_t incy, scalar_t *a, int64_t lda); /* Level 3 */ -TH_API void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, real alpha, real *a, int64_t lda, real *b, int64_t ldb, real beta, real *c, int64_t ldc); +TH_API void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, scalar_t alpha, scalar_t *a, int64_t lda, scalar_t *b, int64_t ldb, scalar_t beta, scalar_t *c, int64_t ldc); #endif diff --git a/aten/src/TH/generic/THLapack.cpp b/aten/src/TH/generic/THLapack.cpp index 8f3ccc8ac6260e..1fed395a73a6d0 100644 --- a/aten/src/TH/generic/THLapack.cpp +++ b/aten/src/TH/generic/THLapack.cpp @@ -38,7 +38,7 @@ TH_EXTERNC void dpstrf_(char *uplo, int *n, double *a, int *lda, int *piv, int * /* Compute the solution to a real system of linear equations A * X = B */ -void THLapack_(gesv)(int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int* info) +void THLapack_(gesv)(int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int* info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -53,7 +53,7 @@ void THLapack_(gesv)(int n, int nrhs, real *a, int lda, int *ipiv, real *b, int } /* Solve a triangular system of the form A * X = B or A^T * X = B */ -void THLapack_(trtrs)(char uplo, char trans, char diag, int n, int nrhs, real *a, int lda, real *b, int ldb, int* info) +void THLapack_(trtrs)(char uplo, char trans, char diag, int n, int nrhs, scalar_t *a, int lda, scalar_t *b, int ldb, int* info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -69,7 +69,7 @@ void THLapack_(trtrs)(char uplo, char trans, char diag, int n, int nrhs, real *a /* Solve overdetermined or underdetermined real linear systems involving an M-by-N matrix A, or its transpose, using a QR or LQ factorization of A */ -void THLapack_(gels)(char trans, int m, int n, int nrhs, real *a, int lda, real *b, int ldb, real *work, int lwork, int *info) +void THLapack_(gels)(char trans, int m, int n, int nrhs, scalar_t *a, int lda, scalar_t *b, int ldb, scalar_t *work, int lwork, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -84,7 +84,7 @@ void THLapack_(gels)(char trans, int m, int n, int nrhs, real *a, int lda, real /* Compute all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A */ -void THLapack_(syev)(char jobz, char uplo, int n, real *a, int lda, real *w, real *work, int lwork, int *info) +void THLapack_(syev)(char jobz, char uplo, int n, scalar_t *a, int lda, scalar_t *w, scalar_t *work, int lwork, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -99,7 +99,7 @@ void THLapack_(syev)(char jobz, char uplo, int n, real *a, int lda, real *w, rea /* Compute for an N-by-N real nonsymmetric matrix A, the eigenvalues and, optionally, the left and/or right eigenvectors */ -void THLapack_(geev)(char jobvl, char jobvr, int n, real *a, int lda, real *wr, real *wi, real* vl, int ldvl, real *vr, int ldvr, real *work, int lwork, int *info) +void THLapack_(geev)(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *wr, scalar_t *wi, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -114,7 +114,7 @@ void THLapack_(geev)(char jobvl, char jobvr, int n, real *a, int lda, real *wr, /* Compute the singular value decomposition (SVD) of a real M-by-N matrix A, optionally computing the left and/or right singular vectors */ -void THLapack_(gesvd)(char jobu, char jobvt, int m, int n, real *a, int lda, real *s, real *u, int ldu, real *vt, int ldvt, real *work, int lwork, int *info) +void THLapack_(gesvd)(char jobu, char jobvt, int m, int n, scalar_t *a, int lda, scalar_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -128,7 +128,7 @@ void THLapack_(gesvd)(char jobu, char jobvt, int m, int n, real *a, int lda, rea } /* LU decomposition */ -void THLapack_(getrf)(int m, int n, real *a, int lda, int *ipiv, int *info) +void THLapack_(getrf)(int m, int n, scalar_t *a, int lda, int *ipiv, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -141,7 +141,7 @@ void THLapack_(getrf)(int m, int n, real *a, int lda, int *ipiv, int *info) #endif } -void THLapack_(getrs)(char trans, int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int *info) +void THLapack_(getrs)(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -155,7 +155,7 @@ void THLapack_(getrs)(char trans, int n, int nrhs, real *a, int lda, int *ipiv, } /* Matrix Inverse */ -void THLapack_(getri)(int n, real *a, int lda, int *ipiv, real *work, int lwork, int* info) +void THLapack_(getri)(int n, scalar_t *a, int lda, int *ipiv, scalar_t *work, int lwork, int* info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -169,7 +169,7 @@ void THLapack_(getri)(int n, real *a, int lda, int *ipiv, real *work, int lwork, } /* Cholesky factorization */ -void THLapack_(potrf)(char uplo, int n, real *a, int lda, int *info) +void THLapack_(potrf)(char uplo, int n, scalar_t *a, int lda, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -183,7 +183,7 @@ void THLapack_(potrf)(char uplo, int n, real *a, int lda, int *info) } /* Solve A*X = B with a symmetric positive definite matrix A using the Cholesky factorization */ -void THLapack_(potrs)(char uplo, int n, int nrhs, real *a, int lda, real *b, int ldb, int *info) +void THLapack_(potrs)(char uplo, int n, int nrhs, scalar_t *a, int lda, scalar_t *b, int ldb, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -197,7 +197,7 @@ void THLapack_(potrs)(char uplo, int n, int nrhs, real *a, int lda, real *b, int } /* Cholesky factorization based Matrix Inverse */ -void THLapack_(potri)(char uplo, int n, real *a, int lda, int *info) +void THLapack_(potri)(char uplo, int n, scalar_t *a, int lda, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -211,7 +211,7 @@ void THLapack_(potri)(char uplo, int n, real *a, int lda, int *info) } /* Cholesky factorization with complete pivoting */ -void THLapack_(pstrf)(char uplo, int n, real *a, int lda, int *piv, int *rank, real tol, real *work, int *info) +void THLapack_(pstrf)(char uplo, int n, scalar_t *a, int lda, int *piv, int *rank, scalar_t tol, scalar_t *work, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -225,7 +225,7 @@ void THLapack_(pstrf)(char uplo, int n, real *a, int lda, int *piv, int *rank, r } /* QR decomposition */ -void THLapack_(geqrf)(int m, int n, real *a, int lda, real *tau, real *work, int lwork, int *info) +void THLapack_(geqrf)(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -239,7 +239,7 @@ void THLapack_(geqrf)(int m, int n, real *a, int lda, real *tau, real *work, int } /* Build Q from output of geqrf */ -void THLapack_(orgqr)(int m, int n, int k, real *a, int lda, real *tau, real *work, int lwork, int *info) +void THLapack_(orgqr)(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) @@ -253,7 +253,7 @@ void THLapack_(orgqr)(int m, int n, int k, real *a, int lda, real *tau, real *wo } /* Multiply Q with a matrix using the output of geqrf */ -void THLapack_(ormqr)(char side, char trans, int m, int n, int k, real *a, int lda, real *tau, real *c, int ldc, real *work, int lwork, int *info) +void THLapack_(ormqr)(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info) { #ifdef USE_LAPACK #if defined(TH_REAL_IS_DOUBLE) diff --git a/aten/src/TH/generic/THLapack.h b/aten/src/TH/generic/THLapack.h index fe64daed5cc39d..284a0c47693695 100644 --- a/aten/src/TH/generic/THLapack.h +++ b/aten/src/TH/generic/THLapack.h @@ -3,38 +3,38 @@ #else /* AX=B */ -TH_API void THLapack_(gesv)(int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int* info); +TH_API void THLapack_(gesv)(int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int* info); /* Solve a triangular system of the form A * X = B or A^T * X = B */ -TH_API void THLapack_(trtrs)(char uplo, char trans, char diag, int n, int nrhs, real *a, int lda, real *b, int ldb, int* info); +TH_API void THLapack_(trtrs)(char uplo, char trans, char diag, int n, int nrhs, scalar_t *a, int lda, scalar_t *b, int ldb, int* info); /* ||AX-B|| */ -TH_API void THLapack_(gels)(char trans, int m, int n, int nrhs, real *a, int lda, real *b, int ldb, real *work, int lwork, int *info); +TH_API void THLapack_(gels)(char trans, int m, int n, int nrhs, scalar_t *a, int lda, scalar_t *b, int ldb, scalar_t *work, int lwork, int *info); /* Eigenvals */ -TH_API void THLapack_(syev)(char jobz, char uplo, int n, real *a, int lda, real *w, real *work, int lwork, int *info); +TH_API void THLapack_(syev)(char jobz, char uplo, int n, scalar_t *a, int lda, scalar_t *w, scalar_t *work, int lwork, int *info); /* Non-sym eigenvals */ -TH_API void THLapack_(geev)(char jobvl, char jobvr, int n, real *a, int lda, real *wr, real *wi, real* vl, int ldvl, real *vr, int ldvr, real *work, int lwork, int *info); +TH_API void THLapack_(geev)(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *wr, scalar_t *wi, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, int *info); /* svd */ -TH_API void THLapack_(gesvd)(char jobu, char jobvt, int m, int n, real *a, int lda, real *s, real *u, int ldu, real *vt, int ldvt, real *work, int lwork, int *info); +TH_API void THLapack_(gesvd)(char jobu, char jobvt, int m, int n, scalar_t *a, int lda, scalar_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, int *info); /* LU decomposition */ -TH_API void THLapack_(getrf)(int m, int n, real *a, int lda, int *ipiv, int *info); -TH_API void THLapack_(getrs)(char trans, int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int *info); +TH_API void THLapack_(getrf)(int m, int n, scalar_t *a, int lda, int *ipiv, int *info); +TH_API void THLapack_(getrs)(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info); /* Matrix Inverse */ -TH_API void THLapack_(getri)(int n, real *a, int lda, int *ipiv, real *work, int lwork, int* info); +TH_API void THLapack_(getri)(int n, scalar_t *a, int lda, int *ipiv, scalar_t *work, int lwork, int* info); /* Positive Definite matrices */ /* Cholesky factorization */ -TH_API void THLapack_(potrf)(char uplo, int n, real *a, int lda, int *info); +TH_API void THLapack_(potrf)(char uplo, int n, scalar_t *a, int lda, int *info); /* Matrix inverse based on Cholesky factorization */ -TH_API void THLapack_(potri)(char uplo, int n, real *a, int lda, int *info); +TH_API void THLapack_(potri)(char uplo, int n, scalar_t *a, int lda, int *info); /* Solve A*X = B with a symmetric positive definite matrix A using the Cholesky factorization */ -TH_API void THLapack_(potrs)(char uplo, int n, int nrhs, real *a, int lda, real *b, int ldb, int *info); +TH_API void THLapack_(potrs)(char uplo, int n, int nrhs, scalar_t *a, int lda, scalar_t *b, int ldb, int *info); /* Cholesky factorization with complete pivoting. */ -TH_API void THLapack_(pstrf)(char uplo, int n, real *a, int lda, int *piv, int *rank, real tol, real *work, int *info); +TH_API void THLapack_(pstrf)(char uplo, int n, scalar_t *a, int lda, int *piv, int *rank, scalar_t tol, scalar_t *work, int *info); /* QR decomposition */ -TH_API void THLapack_(geqrf)(int m, int n, real *a, int lda, real *tau, real *work, int lwork, int *info); +TH_API void THLapack_(geqrf)(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); /* Build Q from output of geqrf */ -TH_API void THLapack_(orgqr)(int m, int n, int k, real *a, int lda, real *tau, real *work, int lwork, int *info); +TH_API void THLapack_(orgqr)(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); /* Multiply Q with a matrix from output of geqrf */ -TH_API void THLapack_(ormqr)(char side, char trans, int m, int n, int k, real *a, int lda, real *tau, real *c, int ldc, real *work, int lwork, int *info); +TH_API void THLapack_(ormqr)(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info); #endif diff --git a/aten/src/TH/generic/THStorage.cpp b/aten/src/TH/generic/THStorage.cpp index 992cbd5bb7509f..7ed962567a31ff 100644 --- a/aten/src/TH/generic/THStorage.cpp +++ b/aten/src/TH/generic/THStorage.cpp @@ -4,9 +4,9 @@ #include -real* THStorage_(data)(const THStorage *self) +scalar_t* THStorage_(data)(const THStorage *self) { - return self->data(); + return self->data(); } ptrdiff_t THStorage_(size)(const THStorage *self) @@ -16,18 +16,18 @@ ptrdiff_t THStorage_(size)(const THStorage *self) size_t THStorage_(elementSize)() { - return sizeof(real); + return sizeof(scalar_t); } THStorage* THStorage_(new)(void) { - return THStorage_new(at::CTypeToScalarType::to()); + return THStorage_new(at::CTypeToScalarType::to()); } THStorage* THStorage_(newWithSize)(ptrdiff_t size) { THStorage* storage = c10::make_intrusive( - at::scalarTypeToDataType(at::CTypeToScalarType::to()), + at::scalarTypeToDataType(at::CTypeToScalarType::to()), size, getTHDefaultAllocator(), true).release(); @@ -38,7 +38,7 @@ THStorage* THStorage_(newWithAllocator)(ptrdiff_t size, at::Allocator *allocator) { THStorage* storage = c10::make_intrusive( - at::scalarTypeToDataType(at::CTypeToScalarType::to()), + at::scalarTypeToDataType(at::CTypeToScalarType::to()), size, allocator, true).release(); @@ -48,7 +48,7 @@ THStorage* THStorage_(newWithAllocator)(ptrdiff_t size, THStorage* THStorage_(newWithMapping)(const char *filename, ptrdiff_t size, int flags) { - auto scalar_type = at::CTypeToScalarType::to(); + auto scalar_type = at::CTypeToScalarType::to(); size_t actual_size = -1; THStorage* storage = c10::make_intrusive( at::scalarTypeToDataType(scalar_type), @@ -65,37 +65,37 @@ THStorage* THStorage_(newWithMapping)(const char *filename, ptrdiff_t size, int return storage; } -THStorage* THStorage_(newWithSize1)(real data0) +THStorage* THStorage_(newWithSize1)(scalar_t data0) { THStorage *self = THStorage_(newWithSize)(1); - real *data = THStorage_(data)(self); + scalar_t *data = THStorage_(data)(self); data[0] = data0; return self; } -THStorage* THStorage_(newWithSize2)(real data0, real data1) +THStorage* THStorage_(newWithSize2)(scalar_t data0, scalar_t data1) { THStorage *self = THStorage_(newWithSize)(2); - real *data = THStorage_(data)(self); + scalar_t *data = THStorage_(data)(self); data[0] = data0; data[1] = data1; return self; } -THStorage* THStorage_(newWithSize3)(real data0, real data1, real data2) +THStorage* THStorage_(newWithSize3)(scalar_t data0, scalar_t data1, scalar_t data2) { THStorage *self = THStorage_(newWithSize)(3); - real *data = THStorage_(data)(self); + scalar_t *data = THStorage_(data)(self); data[0] = data0; data[1] = data1; data[2] = data2; return self; } -THStorage* THStorage_(newWithSize4)(real data0, real data1, real data2, real data3) +THStorage* THStorage_(newWithSize4)(scalar_t data0, scalar_t data1, scalar_t data2, scalar_t data3) { THStorage *self = THStorage_(newWithSize)(4); - real *data = THStorage_(data)(self); + scalar_t *data = THStorage_(data)(self); data[0] = data0; data[1] = data1; data[2] = data2; @@ -116,7 +116,7 @@ void THStorage_(free)(THStorage *storage) THStorage* THStorage_(newWithDataAndAllocator)(at::DataPtr&& data, ptrdiff_t size, at::Allocator* allocator) { THStorage* storage = c10::make_intrusive( - at::scalarTypeToDataType(at::CTypeToScalarType::to()), + at::scalarTypeToDataType(at::CTypeToScalarType::to()), size, std::move(data), allocator, @@ -129,20 +129,20 @@ void THStorage_(resize)(THStorage *storage, ptrdiff_t size) return THStorage_resize(storage, size); } -void THStorage_(fill)(THStorage *storage, real value) +void THStorage_(fill)(THStorage *storage, scalar_t value) { ptrdiff_t i; for(i = 0; i < storage->numel(); i++) THStorage_(data)(storage)[i] = value; } -void THStorage_(set)(THStorage *self, ptrdiff_t idx, real value) +void THStorage_(set)(THStorage *self, ptrdiff_t idx, scalar_t value) { THArgCheck((idx >= 0) && (idx < self->numel()), 2, "out of bounds"); THStorage_(data)(self)[idx] = value; } -real THStorage_(get)(const THStorage *self, ptrdiff_t idx) +scalar_t THStorage_(get)(const THStorage *self, ptrdiff_t idx) { THArgCheck((idx >= 0) && (idx < self->numel()), 2, "out of bounds"); return THStorage_(data)(self)[idx]; diff --git a/aten/src/TH/generic/THStorage.h b/aten/src/TH/generic/THStorage.h index 74cfd3e12c14cd..e088db9184fa75 100644 --- a/aten/src/TH/generic/THStorage.h +++ b/aten/src/TH/generic/THStorage.h @@ -41,20 +41,20 @@ typedef struct at_Storage_Impl at_Storage_Impl; #define THIntStorage THStorage #define THLongStorage THStorage -TH_API real* THStorage_(data)(const THStorage*); +TH_API scalar_t* THStorage_(data)(const THStorage*); TH_API ptrdiff_t THStorage_(size)(const THStorage*); TH_API size_t THStorage_(elementSize)(void); /* slow access -- checks everything */ -TH_API void THStorage_(set)(THStorage*, ptrdiff_t, real); -TH_API real THStorage_(get)(const THStorage*, ptrdiff_t); +TH_API void THStorage_(set)(THStorage*, ptrdiff_t, scalar_t); +TH_API scalar_t THStorage_(get)(const THStorage*, ptrdiff_t); TH_API THStorage* THStorage_(new)(void); TH_API THStorage* THStorage_(newWithSize)(ptrdiff_t size); -TH_API THStorage* THStorage_(newWithSize1)(real); -TH_API THStorage* THStorage_(newWithSize2)(real, real); -TH_API THStorage* THStorage_(newWithSize3)(real, real, real); -TH_API THStorage* THStorage_(newWithSize4)(real, real, real, real); +TH_API THStorage* THStorage_(newWithSize1)(scalar_t); +TH_API THStorage* THStorage_(newWithSize2)(scalar_t, scalar_t); +TH_API THStorage* THStorage_(newWithSize3)(scalar_t, scalar_t, scalar_t); +TH_API THStorage* THStorage_(newWithSize4)(scalar_t, scalar_t, scalar_t, scalar_t); TH_API THStorage* THStorage_(newWithMapping)(const char *filename, ptrdiff_t size, int flags); TH_API THStorage* THStorage_(newWithAllocator)(ptrdiff_t size, @@ -73,6 +73,6 @@ TH_API void THStorage_(swap)(THStorage *storage1, THStorage *storage2); /* might differ with other API (like CUDA) */ TH_API void THStorage_(free)(THStorage *storage); TH_API void THStorage_(resize)(THStorage *storage, ptrdiff_t size); -TH_API void THStorage_(fill)(THStorage *storage, real value); +TH_API void THStorage_(fill)(THStorage *storage, scalar_t value); #endif diff --git a/aten/src/TH/generic/THStorageCopy.cpp b/aten/src/TH/generic/THStorageCopy.cpp index 442f7dbde2925d..1de588bbd2d75b 100644 --- a/aten/src/TH/generic/THStorageCopy.cpp +++ b/aten/src/TH/generic/THStorageCopy.cpp @@ -2,10 +2,10 @@ #define TH_GENERIC_FILE "generic/THStorageCopy.cpp" #else -void THStorage_(rawCopy)(THStorage *storage, real *src) +void THStorage_(rawCopy)(THStorage *storage, scalar_t *src) { ptrdiff_t i; - real *data = THStorage_(data)(storage); + scalar_t *data = THStorage_(data)(storage); for(i = 0; i < storage->numel(); i++) data[i] = src[i]; } @@ -26,7 +26,7 @@ void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage auto data = THStorage_(data)(storage); \ auto src_data = TH##TYPENAMESRC##Storage_data(src); \ for(i = 0; i < storage->numel(); i++) \ - data[i] = static_cast(src_data[i]); \ + data[i] = static_cast(src_data[i]); \ } #define IMPLEMENT_THStorage_COPY_FROM_HALF(TYPENAMESRC) \ @@ -37,7 +37,7 @@ void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage auto data = THStorage_(data)(storage); \ auto src_data = TH##TYPENAMESRC##Storage_data(src); \ for(i = 0; i < storage->numel(); i++) \ - data[i] = (real)TH_half2float(src_data[i]); \ + data[i] = (scalar_t)TH_half2float(src_data[i]); \ } #define IMPLEMENT_THStorage_COPY_TO_HALF(TYPENAMESRC) \ @@ -59,7 +59,7 @@ void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage auto data = THStorage_(data)(storage); \ auto src_data = TH##TYPENAMESRC##Storage_data(src); \ for(i = 0; i < storage->numel(); i++) \ - data[i] = static_cast(src_data[i]); \ + data[i] = static_cast(src_data[i]); \ } #ifndef TH_REAL_IS_HALF diff --git a/aten/src/TH/generic/THStorageCopy.h b/aten/src/TH/generic/THStorageCopy.h index ce8a2a690d5a37..cee1899e7af5e5 100644 --- a/aten/src/TH/generic/THStorageCopy.h +++ b/aten/src/TH/generic/THStorageCopy.h @@ -4,7 +4,7 @@ /* Support for copy between different Storage types */ -TH_API void THStorage_(rawCopy)(THStorage *storage, real *src); +TH_API void THStorage_(rawCopy)(THStorage *storage, scalar_t *src); TH_API void THStorage_(copy)(THStorage *storage, THStorage *src); TH_API void THStorage_(copyByte)(THStorage *storage, struct THByteStorage *src); TH_API void THStorage_(copyChar)(THStorage *storage, struct THCharStorage *src); diff --git a/aten/src/TH/generic/THTensor.cpp b/aten/src/TH/generic/THTensor.cpp index b4cb32e81e618d..3f373ee2119c26 100644 --- a/aten/src/TH/generic/THTensor.cpp +++ b/aten/src/TH/generic/THTensor.cpp @@ -45,8 +45,8 @@ int64_t THTensor_(stride)(const THTensor *self, int dim) return self->stride(dim); } -real *THTensor_(data)(const THTensor *self) { - return self->data(); +scalar_t *THTensor_(data)(const THTensor *self) { + return self->data(); } /**** creation methods ****/ @@ -578,56 +578,56 @@ void THTensor_(resizeNd)(THTensor *self, int nDimension, const int64_t *size, co return THTensor_resizeNd(self, nDimension, size, stride); } -void THTensor_(set1d)(THTensor *tensor, int64_t x0, real value) +void THTensor_(set1d)(THTensor *tensor, int64_t x0, scalar_t value) { THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension"); THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range"); THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0), value); } -real THTensor_(get1d)(const THTensor *tensor, int64_t x0) +scalar_t THTensor_(get1d)(const THTensor *tensor, int64_t x0) { THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension"); THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range"); return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0)); } -void THTensor_(set2d)(THTensor *tensor, int64_t x0, int64_t x1, real value) +void THTensor_(set2d)(THTensor *tensor, int64_t x0, int64_t x1, scalar_t value) { THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 2, 1, "tensor must have two dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range"); THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1), value); } -real THTensor_(get2d)(const THTensor *tensor, int64_t x0, int64_t x1) +scalar_t THTensor_(get2d)(const THTensor *tensor, int64_t x0, int64_t x1) { THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 2, 1, "tensor must have two dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range"); return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)); } -void THTensor_(set3d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, real value) +void THTensor_(set3d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, scalar_t value) { THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 3, 1, "tensor must have three dimensions"); THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range"); THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2), value); } -real THTensor_(get3d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2) +scalar_t THTensor_(get3d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2) { THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 3, 1, "tensor must have three dimensions"); THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range"); return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)); } -void THTensor_(set4d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, real value) +void THTensor_(set4d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value) { THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 4, 1, "tensor must have four dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range"); THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3), value); } -real THTensor_(get4d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3) +scalar_t THTensor_(get4d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3) { THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 4, 1, "tensor must have four dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range"); diff --git a/aten/src/TH/generic/THTensor.h b/aten/src/TH/generic/THTensor.h index 2c32323327918a..d5316919d4e31e 100644 --- a/aten/src/TH/generic/THTensor.h +++ b/aten/src/TH/generic/THTensor.h @@ -36,7 +36,7 @@ TH_API int THTensor_(nDimensionLegacyNoScalars)(const THTensor *self); TH_API int THTensor_(nDimensionLegacyAll)(const THTensor *self); TH_API int64_t THTensor_(size)(const THTensor *self, int dim); TH_API int64_t THTensor_(stride)(const THTensor *self, int dim); -TH_API real *THTensor_(data)(const THTensor *self); +TH_API scalar_t *THTensor_(data)(const THTensor *self); /**** creation methods ****/ @@ -119,15 +119,15 @@ TH_API void THTensor_(free)(THTensor *self); TH_API void THTensor_(freeCopyTo)(THTensor *self, THTensor *dst); /* Slow access methods [check everything] */ -TH_API void THTensor_(set1d)(THTensor *tensor, int64_t x0, real value); -TH_API void THTensor_(set2d)(THTensor *tensor, int64_t x0, int64_t x1, real value); -TH_API void THTensor_(set3d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, real value); -TH_API void THTensor_(set4d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, real value); - -TH_API real THTensor_(get1d)(const THTensor *tensor, int64_t x0); -TH_API real THTensor_(get2d)(const THTensor *tensor, int64_t x0, int64_t x1); -TH_API real THTensor_(get3d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2); -TH_API real THTensor_(get4d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3); +TH_API void THTensor_(set1d)(THTensor *tensor, int64_t x0, scalar_t value); +TH_API void THTensor_(set2d)(THTensor *tensor, int64_t x0, int64_t x1, scalar_t value); +TH_API void THTensor_(set3d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, scalar_t value); +TH_API void THTensor_(set4d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value); + +TH_API scalar_t THTensor_(get1d)(const THTensor *tensor, int64_t x0); +TH_API scalar_t THTensor_(get2d)(const THTensor *tensor, int64_t x0, int64_t x1); +TH_API scalar_t THTensor_(get3d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2); +TH_API scalar_t THTensor_(get4d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3); /* Debug methods */ TH_API THDescBuff THTensor_(desc)(const THTensor *tensor); diff --git a/aten/src/TH/generic/THTensorApply.hpp b/aten/src/TH/generic/THTensorApply.hpp index 8df73cefa08be5..77ca49d4aa5dac 100644 --- a/aten/src/TH/generic/THTensorApply.hpp +++ b/aten/src/TH/generic/THTensorApply.hpp @@ -31,14 +31,14 @@ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \ - TYPE *TENSOR##_data = TENSOR->data() + TH_TENSOR_offset; \ + TYPE *TENSOR##_data = TENSOR->data() + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ { \ - TYPE *TENSOR##_data = TENSOR->data(); \ + TYPE *TENSOR##_data = TENSOR->data(); \ ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \ CODE \ } @@ -57,16 +57,16 @@ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ - TYPE1 *TENSOR1##_data = TENSOR1->data() + TH_TENSOR_offset; \ - TYPE2 *TENSOR2##_data = TENSOR2->data() + TH_TENSOR_offset; \ + TYPE1 *TENSOR1##_data = TENSOR1->data() + TH_TENSOR_offset; \ + TYPE2 *TENSOR2##_data = TENSOR2->data() + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ { \ - TYPE1 *TENSOR1##_data = TENSOR1->data(); \ - TYPE2 *TENSOR2##_data = TENSOR2->data(); \ + TYPE1 *TENSOR1##_data = TENSOR1->data(); \ + TYPE2 *TENSOR2##_data = TENSOR2->data(); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } @@ -85,18 +85,18 @@ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ - TYPE1 *TENSOR1##_data = TENSOR1->data() + TH_TENSOR_offset; \ - TYPE2 *TENSOR2##_data = TENSOR2->data() + TH_TENSOR_offset; \ - TYPE3 *TENSOR3##_data = TENSOR3->data() + TH_TENSOR_offset; \ + TYPE1 *TENSOR1##_data = TENSOR1->data() + TH_TENSOR_offset; \ + TYPE2 *TENSOR2##_data = TENSOR2->data() + TH_TENSOR_offset; \ + TYPE3 *TENSOR3##_data = TENSOR3->data() + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ { \ - TYPE1 *TENSOR1##_data = TENSOR1->data(); \ - TYPE2 *TENSOR2##_data = TENSOR2->data(); \ - TYPE3 *TENSOR3##_data = TENSOR3->data(); \ + TYPE1 *TENSOR1##_data = TENSOR1->data(); \ + TYPE2 *TENSOR2##_data = TENSOR2->data(); \ + TYPE3 *TENSOR3##_data = TENSOR3->data(); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } @@ -154,7 +154,7 @@ if (std::isnan(val)) break; #define th_isnan_break(val) #endif -static inline real THTensor_(powOne)(real x, real y) { +static inline scalar_t THTensor_(powOne)(scalar_t x, scalar_t y) { #if defined(TH_REAL_IS_FLOAT) return powf(x, y); #elif defined(TH_REAL_IS_DOUBLE) @@ -162,7 +162,7 @@ static inline real THTensor_(powOne)(real x, real y) { #else THArgCheck(y >= 0, 1, "Integers to negative integer powers are not allowed"); - real result = 1; + scalar_t result = 1; while (y) { if (y & 1) { result *= x; diff --git a/aten/src/TH/generic/THTensorConv.cpp b/aten/src/TH/generic/THTensorConv.cpp index 58fefad6c10e37..dc56eef6be8074 100644 --- a/aten/src/TH/generic/THTensorConv.cpp +++ b/aten/src/TH/generic/THTensorConv.cpp @@ -5,10 +5,10 @@ /* 2D Input, 2D kernel : convolve given image with the given kernel. */ -void THTensor_(validXCorr2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +void THTensor_(validXCorr2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or_ = (ir - kr) / sr + 1; @@ -21,9 +21,9 @@ void THTensor_(validXCorr2Dptr)(real *r_, for(yy = 0; yy < or_; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + yy*sr*ic + xx*sc; - real *pw_ = k_; - real sum = 0; + scalar_t *pi_ = t_ + yy*sr*ic + xx*sc; + scalar_t *pw_ = k_; + scalar_t sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; @@ -39,10 +39,10 @@ void THTensor_(validXCorr2Dptr)(real *r_, } else { /* SSE-based convolution */ for(yy = 0; yy < or_; yy++) { - real *pi_ = t_ + yy*sr*ic; - real *pw_ = k_; + scalar_t *pi_ = t_ + yy*sr*ic; + scalar_t *pw_ = k_; for (ky = 0; ky < kr; ky++) { - real *pis_ = pi_; + scalar_t *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(r_, r_, pis_, alpha*pw_[kx], oc); pis_++; @@ -58,10 +58,10 @@ void THTensor_(validXCorr2Dptr)(real *r_, /* 2D Input, 2D kernel : convolve given image with the given kernel. */ -void THTensor_(validConv2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +void THTensor_(validConv2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or_ = (ir - kr) / sr + 1; @@ -74,9 +74,9 @@ void THTensor_(validConv2Dptr)(real *r_, for(yy = 0; yy < or_; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + yy*sr*ic + xx*sc; - real *pw_ = k_ + kr*kc - 1; - real sum = 0; + scalar_t *pi_ = t_ + yy*sr*ic + xx*sc; + scalar_t *pw_ = k_ + kr*kc - 1; + scalar_t sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[-kx]; @@ -92,10 +92,10 @@ void THTensor_(validConv2Dptr)(real *r_, } else { /* SSE-based convolution */ for(yy = 0; yy < or_; yy++) { - real *pw_ = k_ + kr*kc - 1; - real *pi_ = t_ + yy*sr*ic; + scalar_t *pw_ = k_ + kr*kc - 1; + scalar_t *pi_ = t_ + yy*sr*ic; for (ky = 0; ky < kr; ky++) { - real *pis_ = pi_; + scalar_t *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(r_, r_, pis_, alpha*pw_[-kx], oc); pis_++; @@ -111,10 +111,10 @@ void THTensor_(validConv2Dptr)(real *r_, /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ -void THTensor_(fullConv2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +void THTensor_(fullConv2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; @@ -126,11 +126,11 @@ void THTensor_(fullConv2Dptr)(real *r_, for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + yy*sr*oc + xx*sc; - real *pw_ = k_; + scalar_t *po_ = r_ + yy*sr*oc + xx*sc; + scalar_t *pw_ = k_; for(ky = 0; ky < kr; ky++) { - real z = *t_ * alpha; + scalar_t z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[kx]; } @@ -144,10 +144,10 @@ void THTensor_(fullConv2Dptr)(real *r_, } else { /* SSE-based convolution */ for(yy = 0; yy < ir; yy++) { - real *po_ = r_ + yy*sr*oc; - real *pw_ = k_; + scalar_t *po_ = r_ + yy*sr*oc; + scalar_t *pw_ = k_; for (ky = 0; ky < kr; ky++) { - real *pos_ = po_; + scalar_t *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(pos_, pos_, t_, alpha*pw_[kx], ic); pos_++; @@ -163,10 +163,10 @@ void THTensor_(fullConv2Dptr)(real *r_, /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ -void THTensor_(fullXCorr2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +void THTensor_(fullXCorr2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; @@ -178,12 +178,12 @@ void THTensor_(fullXCorr2Dptr)(real *r_, for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + yy*sr*oc + xx*sc; - real *pw_ = k_ + kr*kc -1; + scalar_t *po_ = r_ + yy*sr*oc + xx*sc; + scalar_t *pw_ = k_ + kr*kc -1; int64_t kx, ky; for(ky = 0; ky < kr; ky++) { - real z = *t_ * alpha; + scalar_t z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } @@ -197,10 +197,10 @@ void THTensor_(fullXCorr2Dptr)(real *r_, } else { /* SSE-based convolution */ for(yy = 0; yy < ir; yy++) { - real *po_ = r_ + yy*sr*oc; - real *pw_ = k_ + kr*kc -1; + scalar_t *po_ = r_ + yy*sr*oc; + scalar_t *pw_ = k_ + kr*kc -1; for (ky = 0; ky < kr; ky++) { - real *pos_ = po_; + scalar_t *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(pos_, pos_, t_, pw_[-kx]*alpha, ic); pos_++; @@ -218,10 +218,10 @@ void THTensor_(fullXCorr2Dptr)(real *r_, for sr,sc=1 this is equivalent to validXCorr2Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ -void THTensor_(validXCorr2DRevptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +void THTensor_(validXCorr2DRevptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or_ = ir - (kr - 1) * sr; @@ -233,9 +233,9 @@ void THTensor_(validXCorr2DRevptr)(real *r_, /* regular convolution */ for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { - real *po_ = r_; - real *pi_ = t_ + yy*sr*ic + xx*sc; - real z = *k_++ * alpha; + scalar_t *po_ = r_; + scalar_t *pi_ = t_ + yy*sr*ic + xx*sc; + scalar_t z = *k_++ * alpha; for(ky = 0; ky < or_; ky++) { for(kx = 0; kx < oc; kx++) @@ -250,9 +250,9 @@ void THTensor_(validXCorr2DRevptr)(real *r_, /* SSE-based convolution */ for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { - real *po_ = r_; - real *pi_ = t_ + yy*sr*ic + xx*sc; - real z = *k_++ * alpha; + scalar_t *po_ = r_; + scalar_t *pi_ = t_ + yy*sr*ic + xx*sc; + scalar_t z = *k_++ * alpha; for(ky = 0; ky < or_; ky++) { THVector_(cadd)(po_, po_, pi_, z, oc); @@ -266,10 +266,10 @@ void THTensor_(validXCorr2DRevptr)(real *r_, /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ -void THTensor_(validXCorr3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +void THTensor_(validXCorr3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; @@ -285,9 +285,9 @@ void THTensor_(validXCorr3Dptr)(real *r_, for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; - real *pw_ = k_; - real sum = 0; + scalar_t *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; + scalar_t *pw_ = k_; + scalar_t sum = 0; int64_t kz, kx, ky; for(kz = 0; kz < kt; kz++) { @@ -311,10 +311,10 @@ void THTensor_(validXCorr3Dptr)(real *r_, /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ -void THTensor_(validConv3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +void THTensor_(validConv3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; @@ -330,9 +330,9 @@ void THTensor_(validConv3Dptr)(real *r_, for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; - real *pw_ = k_ + kt*kr*kc - 1; - real sum = 0; + scalar_t *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; + scalar_t *pw_ = k_ + kt*kr*kc - 1; + scalar_t sum = 0; int64_t kz, kx, ky; for(kz = 0; kz < kt; kz++) { @@ -357,10 +357,10 @@ void THTensor_(validConv3Dptr)(real *r_, /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ -void THTensor_(fullConv3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +void THTensor_(fullConv3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or_ = (ir - 1) * sr + kr; @@ -375,15 +375,15 @@ void THTensor_(fullConv3Dptr)(real *r_, for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + zz*st*or_*oc + yy*sr*oc + xx*sc; - real *pw_ = k_; + scalar_t *po_ = r_ + zz*st*or_*oc + yy*sr*oc + xx*sc; + scalar_t *pw_ = k_; int64_t kz, kx, ky; /* printf("Output Plane : %ld,%ld,%ld, input val=%g\n",zz,yy,xx,*t_); */ for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { - real z = *t_ * alpha; + scalar_t z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { /* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */ po_[kx] += z * pw_[kx]; @@ -405,10 +405,10 @@ void THTensor_(fullConv3Dptr)(real *r_, /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ -void THTensor_(fullXCorr3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +void THTensor_(fullXCorr3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or_ = (ir - 1) * sr + kr; @@ -423,14 +423,14 @@ void THTensor_(fullXCorr3Dptr)(real *r_, for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + zz*st*or_*oc + yy*sr*oc + xx*sc; - real *pw_ = k_ + kt*kr*kc -1; + scalar_t *po_ = r_ + zz*st*or_*oc + yy*sr*oc + xx*sc; + scalar_t *pw_ = k_ + kt*kr*kc -1; int64_t kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { - real z = *t_ * alpha; + scalar_t z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } @@ -450,10 +450,10 @@ void THTensor_(fullXCorr3Dptr)(real *r_, for sr,sc=1 this is equivalent to validXCorr3Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ -void THTensor_(validXCorr3DRevptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +void THTensor_(validXCorr3DRevptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = it - (kt - 1) * st; @@ -467,9 +467,9 @@ void THTensor_(validXCorr3DRevptr)(real *r_, { for(xx = 0; xx < kc; xx++) { - real *po_ = r_; - real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; - real z = *k_++ * alpha; + scalar_t *po_ = r_; + scalar_t *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; + scalar_t z = *k_++ * alpha; int64_t kz, kx, ky; for(kz = 0; kz < ot; kz++) { @@ -487,10 +487,10 @@ void THTensor_(validXCorr3DRevptr)(real *r_, } } -void THTensor_(conv2d)(real* output_data, - real alpha, - real* ptr_input, int64_t nInputRows, int64_t nInputCols, - real* ptr_weight, int64_t nKernelRows, int64_t nKernelCols, +void THTensor_(conv2d)(scalar_t* output_data, + scalar_t alpha, + scalar_t* ptr_input, int64_t nInputRows, int64_t nInputCols, + scalar_t* ptr_weight, int64_t nKernelRows, int64_t nKernelCols, int64_t srow, int64_t scol, const char *vf, const char *xc) { @@ -524,10 +524,10 @@ void THTensor_(conv2d)(real* output_data, srow, scol); } -void THTensor_(conv3d)(real* output_data, - real alpha, - real* ptr_input, int64_t nInputDepth, int64_t nInputRows, int64_t nInputCols, - real* ptr_weight, int64_t nKernelDepth, int64_t nKernelRows, int64_t nKernelCols, +void THTensor_(conv3d)(scalar_t* output_data, + scalar_t alpha, + scalar_t* ptr_input, int64_t nInputDepth, int64_t nInputRows, int64_t nInputCols, + scalar_t* ptr_weight, int64_t nKernelDepth, int64_t nKernelRows, int64_t nKernelCols, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { @@ -578,7 +578,7 @@ int64_t THTensor_(convsize)(int64_t x, int64_t k, int64_t s, const char* vf) for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ -void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol) +void THTensor_(conv2DRevger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; @@ -586,9 +586,9 @@ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, int64_t istride0, kstride0; THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k; @@ -618,9 +618,9 @@ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { @@ -629,7 +629,7 @@ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, #pragma omp parallel for private(k) for (k = 0; k < r_->size(0)*r_->size(1); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; @@ -641,7 +641,7 @@ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, #pragma omp parallel for private(k) for (k = 0; k < r_->size(0)*r_->size(1); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; @@ -653,14 +653,14 @@ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, { int64_t i; /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; + scalar_t *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ - real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; + scalar_t *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ - real *ptr_input = input_data+i*istride0; + scalar_t *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, @@ -684,7 +684,7 @@ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ -void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol) +void THTensor_(conv2DRevgerm)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol) { int64_t nbatch, nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; @@ -692,9 +692,9 @@ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, int64_t istride0, kstride0, istride1, kstride1; THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k; @@ -728,9 +728,9 @@ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { @@ -739,7 +739,7 @@ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, #pragma omp parallel for private(k) for (k = 0; k < r_->size(0)*r_->size(1); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; @@ -751,7 +751,7 @@ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, #pragma omp parallel for private(k) for (k = 0; k < r_->size(0)*r_->size(1); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; @@ -768,11 +768,11 @@ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, for(p = 0; p < nbatch; p++) { /* get kernel */ - real *ptr_weight = weight_data + p*kstride0 + k*kstride1; + scalar_t *ptr_weight = weight_data + p*kstride0 + k*kstride1; /* get output */ - real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; + scalar_t *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ - real *ptr_input = input_data + p*istride0 + i*istride1; + scalar_t *ptr_input = input_data + p*istride0 + i*istride1; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, @@ -795,7 +795,7 @@ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, like rank1 update A <- xx' + beta*A */ -void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) +void THTensor_(conv2Dger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; @@ -804,9 +804,9 @@ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k; @@ -843,9 +843,9 @@ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { @@ -853,7 +853,7 @@ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT #pragma omp parallel for private(k) for (k = 0; k < r_->size(0)*r_->size(1); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; @@ -865,7 +865,7 @@ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT #pragma omp parallel for private(k) for (k = 0; k < r_->size(0)*r_->size(1); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; @@ -877,14 +877,14 @@ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT { int64_t i; /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; + scalar_t *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ - real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; + scalar_t *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ - real *ptr_input = input_data+i*istride0; + scalar_t *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ if (*vf == 'F') @@ -927,7 +927,7 @@ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT matrix vector product like y <- Ax + beta*y */ -void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) +void THTensor_(conv2Dmv)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; @@ -935,9 +935,9 @@ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe int64_t istride0, kstride0, kstride1; THTensor *input; THTensor* kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k; @@ -981,9 +981,9 @@ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { @@ -991,7 +991,7 @@ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe #pragma omp parallel for private(k) for (k = 0; k < r_->size(0); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; @@ -1003,7 +1003,7 @@ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe #pragma omp parallel for private(k) for (k = 0; k < r_->size(0); k++) { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; @@ -1015,13 +1015,13 @@ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe { int64_t i; /* get output */ - real *ptr_output = output_data + k*nOutputCols*nOutputRows; + scalar_t *ptr_output = output_data + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ - real *ptr_weight = weight_data + k*kstride0 + i*kstride1; + scalar_t *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ - real *ptr_input = input_data + i*istride0; + scalar_t *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ if (*vf == 'F') @@ -1064,7 +1064,7 @@ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe matrix vector product like y <- Ax + beta*y */ -void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) +void THTensor_(conv2Dmm)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; @@ -1074,9 +1074,9 @@ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe THTensor* kernel; int64_t nbatch; ptrdiff_t nelem; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; int64_t p; AT_CHECK(!t_->is_empty() && t_->dim() == 4, "input: non-empty 4D Tensor expected, got size: ", t_->sizes()); @@ -1119,9 +1119,9 @@ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { @@ -1132,7 +1132,7 @@ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe int64_t k; for (k = 0; k < r_->size(1); k++) { - real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; @@ -1148,7 +1148,7 @@ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe int64_t k; for (k = 0; k < r_->size(1); k++) { - real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; + scalar_t* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; @@ -1164,13 +1164,13 @@ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe { int64_t i; /* get output */ - real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; + scalar_t *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ - real *ptr_weight = weight_data + k*kstride0 + i*kstride1; + scalar_t *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ - real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; + scalar_t *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; /* do image, kernel convolution */ if (*vf == 'F') @@ -1214,7 +1214,7 @@ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe scalar multiplication like y <- x*y + beta*y */ -void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) +void THTensor_(conv2Dmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; THTensor* kernel; @@ -1223,9 +1223,9 @@ void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THT int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputRows, nOutputCols; - real *ptr_input; - real *ptr_weight; - real *output_data; + scalar_t *ptr_input; + scalar_t *ptr_weight; + scalar_t *output_data; ptrdiff_t nelem; AT_CHECK(!t_->is_empty() && t_->dim() == 2, "input: non-empty 2D Tensor expected, got size: ", t_->sizes()); @@ -1253,9 +1253,9 @@ void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THT else if (beta != 1) THTensor_(mul)(r_, r_, beta); - ptr_input = input->data(); - ptr_weight = kernel->data(); - output_data = r_->data(); + ptr_input = input->data(); + ptr_weight = kernel->data(); + output_data = r_->data(); /* do image, kernel convolution */ @@ -1273,7 +1273,7 @@ void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THT component wise multiplication like y <- y.*x + beta*y */ -void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) +void THTensor_(conv2Dcmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; @@ -1281,9 +1281,9 @@ void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, TH int64_t istride0, kstride0; THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k; @@ -1321,16 +1321,16 @@ void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, TH else if (beta != 1) THTensor_(mul)(r_, r_, beta); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); for(k = 0; k < nOutputPlane; k++) { /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; + scalar_t *ptr_weight = weight_data + k*kstride0; /* get input */ - real *ptr_input = input_data + k*istride0; + scalar_t *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv2d)(output_data, @@ -1350,7 +1350,7 @@ void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, TH component wise multiplication like with a permutation map y <- y.*x + beta*y */ -void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, int64_t srow, int64_t scol, const char *vf, const char *xc) +void THTensor_(conv2Dmap)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, THTensor *map, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; @@ -1358,9 +1358,9 @@ void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THT int64_t istride0, kstride0; THTensor *input; THTensor* kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; int64_t nmaps; ptrdiff_t nelem; int64_t k; @@ -1401,9 +1401,9 @@ void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THT else if (beta != 1) THTensor_(mul)(r_, r_, beta); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); nmaps = map->size(0); @@ -1414,11 +1414,11 @@ void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THT int64_t to = (int64_t)THTensor_(get2d)(map,k,1)-1; /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; + scalar_t *ptr_weight = weight_data + k*kstride0; /* get input */ - real *ptr_input = input_data + from*istride0; + scalar_t *ptr_input = input_data + from*istride0; /* get output */ - real *ptr_output = output_data + to*nOutputRows*nOutputCols; + scalar_t *ptr_output = output_data + to*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv2d)(ptr_output, @@ -1438,7 +1438,7 @@ void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THT for sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ -void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, +void THTensor_(conv3DRevger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; @@ -1447,9 +1447,9 @@ void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, int64_t istride0, kstride0; THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k, i; @@ -1490,19 +1490,19 @@ void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, else if (beta != 1) THTensor_(mul)(r_, r_, beta); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); for(k = 0; k < nKernelPlane; k++) { /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; + scalar_t *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ - real *ptr_input = input_data+i*istride0; + scalar_t *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr3DRevptr)(output_data, @@ -1524,7 +1524,7 @@ void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, like rank1 update A <- xx' + beta*A */ -void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, +void THTensor_(conv3Dger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; @@ -1533,9 +1533,9 @@ void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT int64_t istride0, kstride0; THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k, i; @@ -1581,19 +1581,19 @@ void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT else if (beta != 1) THTensor_(mul)(r_, r_, beta); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); for(k = 0; k < nKernelPlane; k++) { /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; + scalar_t *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ - real *ptr_input = input_data+i*istride0; + scalar_t *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, @@ -1615,7 +1615,7 @@ void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THT matrix vector product like y <- Ax + beta*y */ -void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, +void THTensor_(conv3Dmv)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; @@ -1624,9 +1624,9 @@ void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe int64_t istride0, kstride0, kstride1; THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k, i; @@ -1676,18 +1676,18 @@ void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe else if (beta != 1) THTensor_(mul)(r_, r_, beta); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); for(k = 0; k < nOutputPlane; k++) { for(i = 0; i < nInputPlane; i++) { /* get kernel */ - real *ptr_weight = weight_data + k*kstride0 + i*kstride1; + scalar_t *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ - real *ptr_input = input_data + i*istride0; + scalar_t *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, @@ -1708,7 +1708,7 @@ void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTe scalar multiplication like y <- x*y + beta*y */ -void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, +void THTensor_(conv3Dmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; @@ -1720,9 +1720,9 @@ void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THT int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputDepth, nOutputRows, nOutputCols; - real *ptr_input; - real *ptr_weight; - real *output_data; + scalar_t *ptr_input; + scalar_t *ptr_weight; + scalar_t *output_data; ptrdiff_t nelem; AT_CHECK(!t_->is_empty() && t_->dim() == 3, "input: non-empty 3D Tensor expected, got size: ", t_->sizes()); @@ -1756,9 +1756,9 @@ void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THT else if (beta != 1) THTensor_(mul)(r_, r_, beta); - ptr_input = input->data(); - ptr_weight = kernel->data(); - output_data = r_->data(); + ptr_input = input->data(); + ptr_weight = kernel->data(); + output_data = r_->data(); /* do image, kernel convolution */ @@ -1776,7 +1776,7 @@ void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THT component wise multiplication like y <- y.*x + beta*y */ -void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, +void THTensor_(conv3Dcmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; @@ -1786,9 +1786,9 @@ void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, TH THTensor *input; THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; ptrdiff_t nelem; int64_t k; @@ -1831,16 +1831,16 @@ void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, TH else if (beta != 1) THTensor_(mul)(r_, r_, beta); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); for(k = 0; k < nOutputPlane; k++) { /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; + scalar_t *ptr_weight = weight_data + k*kstride0; /* get input */ - real *ptr_input = input_data + k*istride0; + scalar_t *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, @@ -1861,7 +1861,7 @@ void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, TH component wise multiplication like with a permutation map y <- y.*x + beta*y */ -void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, +void THTensor_(conv3Dmap)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, THTensor *map, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; @@ -1872,9 +1872,9 @@ void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THT THTensor *input; THTensor *kernel; ptrdiff_t nelem; - real *input_data; - real *weight_data; - real *output_data; + scalar_t *input_data; + scalar_t *weight_data; + scalar_t *output_data; int64_t nmaps; int64_t k; @@ -1921,9 +1921,9 @@ void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THT else if (beta != 1) THTensor_(mul)(r_, r_, beta); - input_data = input->data(); - weight_data = kernel->data(); - output_data = r_->data(); + input_data = input->data(); + weight_data = kernel->data(); + output_data = r_->data(); nmaps = map->size(0); @@ -1934,11 +1934,11 @@ void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THT int64_t to = (int64_t)THTensor_(get2d)(map,k,1)-1; /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; + scalar_t *ptr_weight = weight_data + k*kstride0; /* get input */ - real *ptr_input = input_data + from*istride0; + scalar_t *ptr_input = input_data + from*istride0; /* get output */ - real *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols; + scalar_t *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv3d)(ptr_output, diff --git a/aten/src/TH/generic/THTensorConv.h b/aten/src/TH/generic/THTensorConv.h index 279ece63628118..d62ae8579408a9 100644 --- a/aten/src/TH/generic/THTensorConv.h +++ b/aten/src/TH/generic/THTensorConv.h @@ -2,78 +2,78 @@ #define TH_GENERIC_FILE "generic/THTensorConv.h" #else -TH_API void THTensor_(validXCorr2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +TH_API void THTensor_(validXCorr2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc); -TH_API void THTensor_(validConv2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +TH_API void THTensor_(validConv2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc); -TH_API void THTensor_(fullXCorr2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +TH_API void THTensor_(fullXCorr2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc); -TH_API void THTensor_(fullConv2Dptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +TH_API void THTensor_(fullConv2Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc); -TH_API void THTensor_(validXCorr2DRevptr)(real *r_, - real alpha, - real *t_, int64_t ir, int64_t ic, - real *k_, int64_t kr, int64_t kc, +TH_API void THTensor_(validXCorr2DRevptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc); -TH_API void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol); -TH_API void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol); -TH_API void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv2DRevger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol); +TH_API void THTensor_(conv2DRevgerm)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol); +TH_API void THTensor_(conv2Dger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv2Dmv)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv2Dmm)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv2Dmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv2Dcmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(validXCorr3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +TH_API void THTensor_(validXCorr3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc); -TH_API void THTensor_(validConv3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +TH_API void THTensor_(validConv3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc); -TH_API void THTensor_(fullXCorr3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +TH_API void THTensor_(fullXCorr3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc); -TH_API void THTensor_(fullConv3Dptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +TH_API void THTensor_(fullConv3Dptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc); -TH_API void THTensor_(validXCorr3DRevptr)(real *r_, - real alpha, - real *t_, int64_t it, int64_t ir, int64_t ic, - real *k_, int64_t kt, int64_t kr, int64_t kc, +TH_API void THTensor_(validXCorr3DRevptr)(scalar_t *r_, + scalar_t alpha, + scalar_t *t_, int64_t it, int64_t ir, int64_t ic, + scalar_t *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc); -TH_API void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol); -TH_API void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); -TH_API void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv3DRevger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol); +TH_API void THTensor_(conv3Dger)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv3Dmv)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv3Dmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); +TH_API void THTensor_(conv3Dcmul)(THTensor *r_, scalar_t beta, scalar_t alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc); #endif diff --git a/aten/src/TH/generic/THTensorCopy.cpp b/aten/src/TH/generic/THTensorCopy.cpp index f0cd26582968f5..a9e0564fb574c8 100644 --- a/aten/src/TH/generic/THTensorCopy.cpp +++ b/aten/src/TH/generic/THTensorCopy.cpp @@ -36,24 +36,24 @@ void THTensor_(copyTranspose)(THTensor *tensor, THTensor *src) { #endif THTensor *buf = THTensor_(newWithSize2d)(BLOCK_SZ, BLOCK_SZ); - real *sp = src->data(); - real *rp = tensor->data(); - real *bp = buf->data(); + scalar_t *sp = src->data(); + scalar_t *rp = tensor->data(); + scalar_t *bp = buf->data(); int64_t NR = THTensor_(size)(src, 0); int64_t NC = THTensor_(size)(src, 1); for (int64_t R = 0; R < NR; R += BLOCK_SZ) { for (int64_t C = 0; C < NC; C += BLOCK_SZ) { - real *spo = sp + R + C * NR; - real *rpo = rp + C + R * NC; + scalar_t *spo = sp + R + C * NR; + scalar_t *rpo = rp + C + R * NC; int nr = MIN(NR - R, BLOCK_SZ); int nc = MIN(NC - C, BLOCK_SZ); // 1. copy columns from src to buf for (int c = 0; c < nc; c++) { - memcpy(bp + c * BLOCK_SZ, spo + c * NR, nr * sizeof(real)); + memcpy(bp + c * BLOCK_SZ, spo + c * NR, nr * sizeof(scalar_t)); } // 2. transpose buf in place @@ -62,7 +62,7 @@ void THTensor_(copyTranspose)(THTensor *tensor, THTensor *src) { for (int r = 0; r < rc_max; r++) { int end = MIN(r, rc_min); for (int c = 0; c < end; c++) { - real tmp = bp[r + BLOCK_SZ * c]; + scalar_t tmp = bp[r + BLOCK_SZ * c]; bp[r + BLOCK_SZ * c] = bp[r * BLOCK_SZ + c]; bp[r * BLOCK_SZ + c] = tmp; } @@ -70,7 +70,7 @@ void THTensor_(copyTranspose)(THTensor *tensor, THTensor *src) { // 3. copy rows from buf to dst for (int r = 0; r < nr; r++) { - memcpy(rpo + r * NC, bp + r * BLOCK_SZ, nc * sizeof(real)); + memcpy(rpo + r * NC, bp + r * BLOCK_SZ, nc * sizeof(scalar_t)); } } } @@ -93,8 +93,8 @@ void THTensor_(copy)(THTensor *tensor, THTensor *src) #endif if (tensorSize == srcSize) { if ( tensorContig && srcContig) { - real *sp = src->data(); - real *rp = tensor->data(); + scalar_t *sp = src->data(); + scalar_t *rp = tensor->data(); #ifndef TH_REAL_IS_HALF #ifdef _OPENMP #pragma omp parallel if ( (tensorSize > TH_OMP_OVERHEAD_THRESHOLD_COPY) && (!inOMP) ) @@ -104,8 +104,8 @@ void THTensor_(copy)(THTensor *tensor, THTensor *src) ptrdiff_t offset = tid * (tensorSize / num_threads); ptrdiff_t end = (tid == num_threads - 1) ? tensorSize : offset + tensorSize / num_threads; ptrdiff_t len = end - offset; - real *tensorData = rp + offset; - real *srcData = sp + offset; + scalar_t *tensorData = rp + offset; + scalar_t *srcData = sp + offset; THVector_(copy)(tensorData, srcData, len); } #else @@ -122,10 +122,10 @@ void THTensor_(copy)(THTensor *tensor, THTensor *src) rp[i] = sp[i]; } } else { - memcpy(rp, sp, srcSize * sizeof(real)); + memcpy(rp, sp, srcSize * sizeof(scalar_t)); } #else - memcpy(rp, sp, srcSize * sizeof(real)); + memcpy(rp, sp, srcSize * sizeof(scalar_t)); #endif #endif @@ -139,7 +139,7 @@ void THTensor_(copy)(THTensor *tensor, THTensor *src) if (inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY2_OMP(srcSize, tensorContig, srcContig, real, tensor, real, src, *tensor_data = *src_data;, TH_OMP_OVERHEAD_THRESHOLD_COPY) + TH_TENSOR_APPLY2_OMP(srcSize, tensorContig, srcContig, scalar_t, tensor, scalar_t, src, *tensor_data = *src_data;, TH_OMP_OVERHEAD_THRESHOLD_COPY) } #else serial_path = 1; @@ -150,7 +150,7 @@ void THTensor_(copy)(THTensor *tensor, THTensor *src) } if (serial_path) { - TH_TENSOR_APPLY2(real, tensor, real, src, *tensor_data = *src_data;) + TH_TENSOR_APPLY2(scalar_t, tensor, scalar_t, src, *tensor_data = *src_data;) } } @@ -198,30 +198,30 @@ using inter_copy_type_t = typename inter_copy_type::type; #define IMPLEMENT_THTensor_COPY(TYPENAMESRC, TYPE_SRC) \ void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ { \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, \ - *tensor_data = static_cast( \ - static_cast>(*src_data));) \ + TH_TENSOR_APPLY2(scalar_t, tensor, TYPE_SRC, src, \ + *tensor_data = static_cast( \ + static_cast>(*src_data));) \ } #define IMPLEMENT_THTensor_COPY_TO_HALF(TYPENAMESRC, TYPE_SRC) \ void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ { \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, *tensor_data = TH_float2half((float)*src_data);) \ + TH_TENSOR_APPLY2(scalar_t, tensor, TYPE_SRC, src, *tensor_data = TH_float2half((float)*src_data);) \ } #define IMPLEMENT_THTensor_COPY_FROM_HALF(TYPENAMESRC, TYPE_SRC) \ void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ { \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, \ - *tensor_data = static_cast( \ - static_cast>( \ + TH_TENSOR_APPLY2(scalar_t, tensor, TYPE_SRC, src, \ + *tensor_data = static_cast( \ + static_cast>( \ TH_half2float(*src_data)));) \ } #define IMPLEMENT_THTensor_COPY_TO_FROM_HALF(TYPENAMESRC, TYPE_SRC) \ void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ { \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, *tensor_data = *src_data;) \ + TH_TENSOR_APPLY2(scalar_t, tensor, TYPE_SRC, src, *tensor_data = *src_data;) \ } #ifndef TH_REAL_IS_HALF diff --git a/aten/src/TH/generic/THTensorEvenMoreMath.cpp b/aten/src/TH/generic/THTensorEvenMoreMath.cpp index c698bf2e093a7f..f301d9808df3db 100644 --- a/aten/src/TH/generic/THTensorEvenMoreMath.cpp +++ b/aten/src/TH/generic/THTensorEvenMoreMath.cpp @@ -4,12 +4,12 @@ #include -void THTensor_(fill)(THTensor *r_, real value) +void THTensor_(fill)(THTensor *r_, scalar_t value) { if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) { - TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len);); + TH_TENSOR_APPLY_CONTIG(scalar_t, r_, THVector_(fill)(r__data, value, r__len);); } else { - TH_TENSOR_APPLY(real, r_, + TH_TENSOR_APPLY(scalar_t, r_, if (r__stride == 1) { THVector_(fill)(r__data, value, r__size); r__i = r__size; @@ -27,9 +27,9 @@ void THTensor_(zero)(THTensor *r_) THTensor_(fill)(r_, 0); } -void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) +void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, scalar_t value) { - TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, + TH_TENSOR_APPLY2(scalar_t, tensor, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); @@ -45,7 +45,7 @@ void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) { THTensor *srct = THTensor_(newContiguous)(src); - real *src_data = srct->data(); + scalar_t *src_data = srct->data(); ptrdiff_t cntr = 0; ptrdiff_t nelem = THTensor_(nElement)(srct); if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask)) @@ -53,7 +53,7 @@ void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) c10::raw::intrusive_ptr::decref(srct); THError("Number of elements of destination tensor != Number of elements in mask"); } - TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, + TH_TENSOR_APPLY2(scalar_t, tensor, unsigned char, mask, if (*mask_data > 1) { c10::raw::intrusive_ptr::decref(srct); @@ -80,14 +80,14 @@ void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask) { ptrdiff_t numel = THByteTensor_sumall(mask); - real *tensor_data; + scalar_t *tensor_data; #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif THTensor_(resize1d)(tensor,numel); - tensor_data = tensor->data(); - TH_TENSOR_APPLY2(real, src, unsigned char, mask, + tensor_data = tensor->data(); + TH_TENSOR_APPLY2(scalar_t, src, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); @@ -116,7 +116,7 @@ void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) #endif /* First Pass to determine size of subscripts */ - TH_TENSOR_APPLY(real, tensor, + TH_TENSOR_APPLY(scalar_t, tensor, if IS_NONZERO(*tensor_data) { ++numel; }); @@ -127,7 +127,7 @@ void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) /* Second pass populates subscripts */ subscript_data = THLongTensor_data(subscript); - TH_TENSOR_APPLY(real, tensor, + TH_TENSOR_APPLY(scalar_t, tensor, if IS_NONZERO(*tensor_data) { div = 1; @@ -146,7 +146,7 @@ void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTens ptrdiff_t i, numel; THTensor *tSlice, *sSlice; int64_t *index_data; - real *tensor_data, *src_data; + scalar_t *tensor_data, *src_data; THArgCheck(THTensor_nDimensionLegacyNoScalars(index) == 1, 3, "Index is supposed to be 1-dimensional"); THArgCheck(dim < THTensor_nDimensionLegacyNoScalars(src), 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); @@ -165,8 +165,8 @@ void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTens if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor)) { - tensor_data = tensor->data(); - src_data = src->data(); + tensor_data = tensor->data(); + src_data = src->data(); auto src_size0 = THTensor_sizeLegacyNoScalars(src, 0); ptrdiff_t rowsize = src_size0 == 0 ? 1: THTensor_(nElement)(src) / src_size0; @@ -186,7 +186,7 @@ void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTens } else { #pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idim() <= 1) @@ -277,8 +277,8 @@ void THTensor_(take)(THTensor *r_, THTensor *src, THLongTensor *index) index = THLongTensor_newContiguous(index); int64_t* index_data = THLongTensor_data(index); ptrdiff_t srcElements = THTensor_(nElement)(src); - real* src_data = src->data(); - real* dst_data = dst->data(); + scalar_t* src_data = src->data(); + scalar_t* dst_data = dst->data(); ptrdiff_t nIndices = THLongTensor_nElement(index); int isContiguous = THTensor_(isContiguous)(src); @@ -319,11 +319,11 @@ void THTensor_(put)(THTensor *tensor, THLongTensor *index, THTensor *src, int ac index = THLongTensor_newContiguous(index); src = THTensor_(newContiguous)(src); - real* data = tensor->data(); + scalar_t* data = tensor->data(); ptrdiff_t numel = THTensor_(nElement)(tensor); int is_contiguous = THTensor_(isContiguous)(tensor); - TH_TENSOR_APPLY2(int64_t, index, real, src, + TH_TENSOR_APPLY2(int64_t, index, scalar_t, src, THTensor_(checkLinearIndex)(*index_data, numel); int64_t linearIndex = THTensor_(wrapLinearIndex)(*index_data, numel); int64_t dataOffset = is_contiguous ? linearIndex : THTensor_(dataOffset)(tensor, linearIndex); @@ -379,7 +379,7 @@ void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTenso THLongTensor_free(index); } -void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val) +void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, scalar_t val) { ptrdiff_t i, numel; THTensor *tSlice; @@ -422,7 +422,7 @@ void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *i elems_per_row = THTensor_sizeLegacyNoScalars(index, dim); - TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, + TH_TENSOR_DIM_APPLY3(scalar_t, tensor, scalar_t, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, for (i = 0; i < elems_per_row; ++i) { @@ -448,7 +448,7 @@ void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor elems_per_row = THTensor_sizeLegacyNoScalars(index, dim); - TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, + TH_TENSOR_DIM_APPLY3(scalar_t, tensor, scalar_t, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_SCATTER, for (i = 0; i < elems_per_row; ++i) { @@ -474,7 +474,7 @@ void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTen elems_per_row = THTensor_sizeLegacyNoScalars(index, dim); - TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, + TH_TENSOR_DIM_APPLY3(scalar_t, tensor, scalar_t, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_SCATTER, for (i = 0; i < elems_per_row; ++i) { @@ -488,7 +488,7 @@ void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTen }) } -void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val) +void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, scalar_t val) { int64_t elems_per_row, i, idx; @@ -498,7 +498,7 @@ void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real elems_per_row = THTensor_sizeLegacyNoScalars(index, dim); - TH_TENSOR_DIM_APPLY2(real, tensor, int64_t, index, dim, + TH_TENSOR_DIM_APPLY2(scalar_t, tensor, int64_t, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); @@ -515,7 +515,7 @@ accreal THTensor_(dot)(THTensor *tensor, THTensor *src) { accreal sum = 0; /* we use a trick here. careful with that. */ - TH_TENSOR_APPLY2(real, tensor, real, src, + TH_TENSOR_APPLY2(scalar_t, tensor, scalar_t, src, int64_t sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i); sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride); tensor_i += sz; @@ -526,14 +526,14 @@ accreal THTensor_(dot)(THTensor *tensor, THTensor *src) return sum; } -real THTensor_(minall)(THTensor *tensor) +scalar_t THTensor_(minall)(THTensor *tensor) { - real theMin; - real value; + scalar_t theMin; + scalar_t value; THArgCheck(THTensor_nDimensionLegacyAll(tensor) > 0, 1, "tensor must have one dimension"); - theMin = tensor->data()[0]; - TH_TENSOR_APPLY(real, tensor, + theMin = tensor->data()[0]; + TH_TENSOR_APPLY(scalar_t, tensor, value = *tensor_data; /* This is not the same as value= theMin)) @@ -544,14 +544,14 @@ real THTensor_(minall)(THTensor *tensor) return theMin; } -real THTensor_(maxall)(THTensor *tensor) +scalar_t THTensor_(maxall)(THTensor *tensor) { - real theMax; - real value; + scalar_t theMax; + scalar_t value; THArgCheck(THTensor_nDimensionLegacyAll(tensor) > 0, 1, "tensor must have one dimension"); - theMax = tensor->data()[0]; - TH_TENSOR_APPLY(real, tensor, + theMax = tensor->data()[0]; + TH_TENSOR_APPLY(scalar_t, tensor, value = *tensor_data; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) @@ -571,13 +571,13 @@ accreal THTensor_(sumall)(THTensor *tensor) if(inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, +:sum, sum += *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY_REDUCTION_OMP(scalar_t, tensor, +:sum, sum += *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); } #else serial_path = 1; #endif if (serial_path) { - TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;); + TH_TENSOR_APPLY(scalar_t, tensor, sum += *tensor_data;); } return sum; } @@ -591,18 +591,18 @@ accreal THTensor_(prodall)(THTensor *tensor) if(inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, *:prod, prod *= *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY_REDUCTION_OMP(scalar_t, tensor, *:prod, prod *= *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); } #else serial_path = 1; #endif if (serial_path) { - TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;); + TH_TENSOR_APPLY(scalar_t, tensor, prod *= *tensor_data;); } return prod; } -void THTensor_(add)(THTensor *r_, THTensor *t, real value) +void THTensor_(add)(THTensor *r_, THTensor *t, scalar_t value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); @@ -610,14 +610,14 @@ void THTensor_(add)(THTensor *r_, THTensor *t, real value) int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len);); + TH_TENSOR_APPLY2_CONTIG(scalar_t, r_, scalar_t, t, THVector_(adds)(r__data, t_data, value, r__len);); } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data + value;, ORDIN_TH_OMP_OVERHEAD_THRESHOLD) + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = *t_data + value;, ORDIN_TH_OMP_OVERHEAD_THRESHOLD) } #else (void)r_Size; @@ -625,26 +625,26 @@ void THTensor_(add)(THTensor *r_, THTensor *t, real value) #endif } if (serial_path) { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = *t_data + value;); } } -void THTensor_(sub)(THTensor *r_, THTensor *t, real value) +void THTensor_(sub)(THTensor *r_, THTensor *t, scalar_t value) { THTensor_(add)(r_, t, -value); } -void THTensor_(add_scaled)(THTensor *r_, THTensor *t, real value, real alpha) +void THTensor_(add_scaled)(THTensor *r_, THTensor *t, scalar_t value, scalar_t alpha) { THTensor_(add)(r_, t, value * alpha); } -void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, real value, real alpha) +void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, scalar_t value, scalar_t alpha) { THTensor_(add)(r_, t, -value * alpha); } -void THTensor_(mul)(THTensor *r_, THTensor *t, real value) +void THTensor_(mul)(THTensor *r_, THTensor *t, scalar_t value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); @@ -652,14 +652,14 @@ void THTensor_(mul)(THTensor *r_, THTensor *t, real value) int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len);); + TH_TENSOR_APPLY2_CONTIG(scalar_t, r_, scalar_t, t, THVector_(muls)(r__data, t_data, value, r__len);); } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data * value;, ORDIN_TH_OMP_OVERHEAD_THRESHOLD) + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = *t_data * value;, ORDIN_TH_OMP_OVERHEAD_THRESHOLD) } #else (void)r_Size; @@ -667,11 +667,11 @@ void THTensor_(mul)(THTensor *r_, THTensor *t, real value) #endif } if (serial_path) { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = *t_data * value;); } } -void THTensor_(div)(THTensor *r_, THTensor *t, real value) +void THTensor_(div)(THTensor *r_, THTensor *t, scalar_t value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); @@ -679,14 +679,14 @@ void THTensor_(div)(THTensor *r_, THTensor *t, real value) int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len);); + TH_TENSOR_APPLY2_CONTIG(scalar_t, r_, scalar_t, t, THVector_(divs)(r__data, t_data, value, r__len);); } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data / value;, ORDIN_TH_OMP_OVERHEAD_THRESHOLD) + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = *t_data / value;, ORDIN_TH_OMP_OVERHEAD_THRESHOLD) } #else (void)r_Size; @@ -694,11 +694,11 @@ void THTensor_(div)(THTensor *r_, THTensor *t, real value) #endif } if (serial_path) { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = *t_data / value;); } } -void THTensor_(lshift)(THTensor *r_, THTensor *t, real value) +void THTensor_(lshift)(THTensor *r_, THTensor *t, scalar_t value) { #if defined(TH_REAL_IS_FLOAT) return THTensor_(mul)(r_, t, powf(2, value)); @@ -713,13 +713,13 @@ void THTensor_(lshift)(THTensor *r_, THTensor *t, real value) int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { - real *tp = t->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; idata(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i> value; + rp[i] = ((scalar_t) tp[i]) >> value; #else rp[i] = ((ureal) tp[i]) >> value; #endif @@ -783,9 +783,9 @@ void THTensor_(rshift)(THTensor *r_, THTensor *t, real value) serial_path = 1; } else { #if defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((real) *t_data) >> value);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = (((scalar_t) *t_data) >> value);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); #else - TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((ureal) *t_data) >> value);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = (((ureal) *t_data) >> value);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); #endif } #else @@ -794,15 +794,15 @@ void THTensor_(rshift)(THTensor *r_, THTensor *t, real value) } if (serial_path) { #if defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value);); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = (((scalar_t) *t_data) >> value);); #else - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) >> value);); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = (((ureal) *t_data) >> value);); #endif } #endif } -void THTensor_(fmod)(THTensor *r_, THTensor *t, real value) +void THTensor_(fmod)(THTensor *r_, THTensor *t, scalar_t value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); @@ -810,8 +810,8 @@ void THTensor_(fmod)(THTensor *r_, THTensor *t, real value) int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { - real *tp = t->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; istorage_offset())[(x0)*THTensor_strideLegacyNoScalars(self, 0)]; } -static inline real THTensor_(fastGet1d)(THTensor *self, int64_t x0) { +static inline scalar_t THTensor_(fastGet1d)(THTensor *self, int64_t x0) { return (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)]; } -static inline real THTensor_(fastGet2d)(THTensor *self, int64_t x0, int64_t x1) { +static inline scalar_t THTensor_(fastGet2d)(THTensor *self, int64_t x0, int64_t x1) { return (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)]; } -static inline real THTensor_(fastGet3d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2) { +static inline scalar_t THTensor_(fastGet3d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2) { return (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)+(x2)*self->stride(2)]; } -static inline real THTensor_(fastGet4d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3) { +static inline scalar_t THTensor_(fastGet4d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3) { return (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)+(x2)*self->stride(2)+(x3)*self->stride(3)]; } -static inline real THTensor_(fastGet5d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3, int64_t x4) { +static inline scalar_t THTensor_(fastGet5d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3, int64_t x4) { return (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)+(x2)*self->stride(2)+(x3)*self->stride(3)+(x4)*self->stride(4)]; } -static inline void THTensor_(fastSet1d)(THTensor *self, int64_t x0, real value) { +static inline void THTensor_(fastSet1d)(THTensor *self, int64_t x0, scalar_t value) { (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)] = value; } -static inline void THTensor_(fastSet2d)(THTensor *self, int64_t x0, int64_t x1, real value) { +static inline void THTensor_(fastSet2d)(THTensor *self, int64_t x0, int64_t x1, scalar_t value) { (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)] = value; } -static inline void THTensor_(fastSet3d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, real value) { +static inline void THTensor_(fastSet3d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, scalar_t value) { (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)+(x2)*self->stride(2)] = value; } -static inline void THTensor_(fastSet4d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3, real value) { +static inline void THTensor_(fastSet4d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value) { (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)+(x2)*self->stride(2)+(x3)*self->stride(3)] = value; } -static inline void THTensor_(fastSet5d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3, int64_t x4, real value) { +static inline void THTensor_(fastSet5d)(THTensor *self, int64_t x0, int64_t x1, int64_t x2, int64_t x3, int64_t x4, scalar_t value) { (THStorage_(data)(THTensor_getStoragePtr(self))+self->storage_offset())[(x0)*self->stride(0)+(x1)*self->stride(1)+(x2)*self->stride(2)+(x3)*self->stride(3)+(x4)*self->stride(4)] = value; } diff --git a/aten/src/TH/generic/THTensorLapack.cpp b/aten/src/TH/generic/THTensorLapack.cpp index 1c1e32f3da1a2b..7269a5081ca13d 100644 --- a/aten/src/TH/generic/THTensorLapack.cpp +++ b/aten/src/TH/generic/THTensorLapack.cpp @@ -138,8 +138,8 @@ void THTensor_(gesv)(THTensor *rb_, THTensor *ra_, THTensor *b, THTensor *a) ipiv = THIntTensor_newWithSize1d((int64_t)n); THLapack_(gesv)(n, nrhs, - ra__->data(), lda, THIntTensor_data(ipiv), - rb__->data(), ldb, &info); + ra__->data(), lda, THIntTensor_data(ipiv), + rb__->data(), ldb, &info); THLapackCheckWithCleanup("Lapack Error in %s : U(%d,%d) is zero, singular U.", THCleanup( @@ -189,8 +189,8 @@ void THTensor_(trtrs)(THTensor *rb_, THTensor *ra_, THTensor *b, THTensor *a, ldb = n; THLapack_(trtrs)(uplo[0], trans[0], diag[0], n, nrhs, - ra__->data(), lda, - rb__->data(), ldb, &info); + ra__->data(), lda, + rb__->data(), ldb, &info); THLapackCheckWithCleanup("Lapack Error in %s : A(%d,%d) is zero, singular A", @@ -228,7 +228,7 @@ void THTensor_(gels)(THTensor *rb_, THTensor *ra_, THTensor *b, THTensor *a) int m, n, nrhs, lda, ldb, info, lwork; THTensor *work = NULL; - real wkopt = 0; + scalar_t wkopt = 0; THTensor *ra__ = NULL; // working version of A matrix to be passed into lapack GELS THTensor *rb__ = NULL; // working version of B matrix to be passed into lapack GELS @@ -247,14 +247,14 @@ void THTensor_(gels)(THTensor *rb_, THTensor *ra_, THTensor *b, THTensor *a) /* get optimal workspace size */ - THLapack_(gels)('N', m, n, nrhs, ra__->data(), lda, - rb__->data(), ldb, + THLapack_(gels)('N', m, n, nrhs, ra__->data(), lda, + rb__->data(), ldb, &wkopt, -1, &info); lwork = (int)wkopt; work = THTensor_(newWithSize1d)(lwork); - THLapack_(gels)('N', m, n, nrhs, ra__->data(), lda, - rb__->data(), ldb, - work->data(), lwork, &info); + THLapack_(gels)('N', m, n, nrhs, ra__->data(), lda, + rb__->data(), ldb, + work->data(), lwork, &info); THLapackCheckWithCleanup("Lapack Error in %s : The %d-th diagonal element of the triangular factor of A is zero", THCleanup(c10::raw::intrusive_ptr::decref(ra__); @@ -282,8 +282,8 @@ void THTensor_(geev)(THTensor *re_, THTensor *rv_, THTensor *a_, const char *job { int n, lda, lwork, info, ldvr; THTensor *work=nullptr, *wi, *wr, *a; - real wkopt; - real *rv_data; + scalar_t wkopt; + scalar_t *rv_data; int64_t i; THTensor *re__ = NULL; @@ -308,7 +308,7 @@ void THTensor_(geev)(THTensor *re_, THTensor *rv_, THTensor *a_, const char *job THTensor_(resize2d)(rv_,n,n); /* guard against someone passing a correct size, but wrong stride */ rv__ = THTensor_(newTransposedContiguous)(rv_); - rv_data = rv__->data(); + rv_data = rv__->data(); ldvr = n; } THTensor_(resize2d)(re_,n,2); @@ -316,14 +316,14 @@ void THTensor_(geev)(THTensor *re_, THTensor *rv_, THTensor *a_, const char *job if (n > 0) { // lapack doesn't work with size 0 /* get optimal workspace size */ - THLapack_(geev)('N', jobvr[0], n, a->data(), lda, wr->data(), wi->data(), + THLapack_(geev)('N', jobvr[0], n, a->data(), lda, wr->data(), wi->data(), NULL, 1, rv_data, ldvr, &wkopt, -1, &info); lwork = (int)wkopt; work = THTensor_(newWithSize1d)(lwork); - THLapack_(geev)('N', jobvr[0], n, a->data(), lda, wr->data(), wi->data(), - NULL, 1, rv_data, ldvr, work->data(), lwork, &info); + THLapack_(geev)('N', jobvr[0], n, a->data(), lda, wr->data(), wi->data(), + NULL, 1, rv_data, ldvr, work->data(), lwork, &info); THLapackCheckWithCleanup(" Lapack Error in %s : %d off-diagonal elements of an didn't converge to zero", THCleanup(c10::raw::intrusive_ptr::decref(re__); @@ -336,9 +336,9 @@ void THTensor_(geev)(THTensor *re_, THTensor *rv_, THTensor *a_, const char *job } { - real *re_data = re__->data(); - real *wi_data = wi->data(); - real *wr_data = wr->data(); + scalar_t *re_data = re__->data(); + scalar_t *wi_data = wi->data(); + scalar_t *wr_data = wr->data(); for (i=0; idata(), lda, - re_->data(), &wkopt, -1, &info); + THLapack_(syev)(jobz[0], uplo[0], n, rv__->data(), lda, + re_->data(), &wkopt, -1, &info); lwork = (int)wkopt; work = THTensor_(newWithSize1d)(lwork); - THLapack_(syev)(jobz[0], uplo[0], n, rv__->data(), lda, - re_->data(), work->data(), lwork, &info); + THLapack_(syev)(jobz[0], uplo[0], n, rv__->data(), lda, + re_->data(), work->data(), lwork, &info); THLapackCheckWithCleanup("Lapack Error %s : %d off-diagonal elements didn't converge to zero", THCleanup(c10::raw::intrusive_ptr::decref(rv__); @@ -421,7 +421,7 @@ void THTensor_(gesvd2)(THTensor *ru_, THTensor *rs_, THTensor *rv_, THTensor *ra int k,m, n, lda, ldu, ldvt, lwork, info; THTensor *work; THTensor *rvf_ = THTensor_(new)(); - real wkopt; + scalar_t wkopt; THTensor *ra__ = NULL; THTensor *ru__ = NULL; @@ -453,21 +453,21 @@ void THTensor_(gesvd2)(THTensor *ru_, THTensor *rs_, THTensor *rv_, THTensor *ra rv__ = THTensor_(newContiguous)(rvf_); THLapack_(gesvd)(jobu[0],jobu[0], - m,n,ra__->data(),lda, - rs__->data(), - ru__->data(), + m,n,ra__->data(),lda, + rs__->data(), + ru__->data(), ldu, - rv__->data(), ldvt, + rv__->data(), ldvt, &wkopt, -1, &info); lwork = (int)wkopt; work = THTensor_(newWithSize1d)(lwork); THLapack_(gesvd)(jobu[0],jobu[0], - m,n,ra__->data(),lda, - rs__->data(), - ru__->data(), + m,n,ra__->data(),lda, + rs__->data(), + ru__->data(), ldu, - rv__->data(), ldvt, - work->data(),lwork, &info); + rv__->data(), ldvt, + work->data(),lwork, &info); THLapackCheckWithCleanup("Lapack Error %s : %d superdiagonals failed to converge.", THCleanup( @@ -502,7 +502,7 @@ void THTensor_(getri)(THTensor *ra_, THTensor *a) THArgCheck(a->size(0) == a->size(1), 1, "A should be square"); int m, n, lda, info, lwork; - real wkopt; + scalar_t wkopt; THIntTensor *ipiv; THTensor *work; THTensor *ra__ = NULL; @@ -515,7 +515,7 @@ void THTensor_(getri)(THTensor *ra_, THTensor *a) ipiv = THIntTensor_newWithSize1d((int64_t)m); /* Run LU */ - THLapack_(getrf)(n, n, ra__->data(), lda, THIntTensor_data(ipiv), &info); + THLapack_(getrf)(n, n, ra__->data(), lda, THIntTensor_data(ipiv), &info); THLapackCheckWithCleanup("Lapack Error %s : U(%d,%d) is 0, U is singular", THCleanup( c10::raw::intrusive_ptr::decref(ra__); @@ -523,10 +523,10 @@ void THTensor_(getri)(THTensor *ra_, THTensor *a) "getrf", info, info); /* Run inverse */ - THLapack_(getri)(n, ra__->data(), lda, THIntTensor_data(ipiv), &wkopt, -1, &info); + THLapack_(getri)(n, ra__->data(), lda, THIntTensor_data(ipiv), &wkopt, -1, &info); lwork = (int)wkopt; work = THTensor_(newWithSize1d)(lwork); - THLapack_(getri)(n, ra__->data(), lda, THIntTensor_data(ipiv), work->data(), lwork, &info); + THLapack_(getri)(n, ra__->data(), lda, THIntTensor_data(ipiv), work->data(), lwork, &info); THLapackCheckWithCleanup("Lapack Error %s : U(%d,%d) is 0, U is singular", THCleanup( c10::raw::intrusive_ptr::decref(ra__); @@ -547,7 +547,7 @@ void THTensor_(clearUpLoTriangle)(THTensor *a, const char *uplo) int n = a->size(0); /* Build full matrix */ - real *p = a->data(); + scalar_t *p = a->data(); int64_t i, j; /* Upper Triangular Case */ @@ -580,7 +580,7 @@ void THTensor_(copyUpLoTriangle)(THTensor *a, const char *uplo) int n = a->size(0); /* Build full matrix */ - real *p = a->data(); + scalar_t *p = a->data(); int64_t i, j; /* Upper Triangular Case */ @@ -620,7 +620,7 @@ void THTensor_(potrf)(THTensor *ra_, THTensor *a, const char *uplo) lda = n; /* Run Factorization */ - THLapack_(potrf)(uplo[0], n, ra__->data(), lda, &info); + THLapack_(potrf)(uplo[0], n, ra__->data(), lda, &info); THLapackCheckWithCleanup("Lapack Error in %s : the leading minor of order %d is not positive definite", THCleanup(c10::raw::intrusive_ptr::decref(ra__);), "potrf", info, ""); @@ -661,8 +661,8 @@ void THTensor_(potrs)(THTensor *rb_, THTensor *b, THTensor *a, const char *uplo) lda = n; ldb = n; - THLapack_(potrs)(uplo[0], n, nrhs, ra__->data(), - lda, rb__->data(), ldb, &info); + THLapack_(potrs)(uplo[0], n, nrhs, ra__->data(), + lda, rb__->data(), ldb, &info); THLapackCheckWithCleanup("Lapack Error in %s : A(%d,%d) is zero, singular A", @@ -692,7 +692,7 @@ void THTensor_(potri)(THTensor *ra_, THTensor *a, const char *uplo) lda = n; /* Run inverse */ - THLapack_(potri)(uplo[0], n, ra__->data(), lda, &info); + THLapack_(potri)(uplo[0], n, ra__->data(), lda, &info); THLapackCheckWithCleanup("Lapack Error %s : A(%d,%d) is 0, A cannot be factorized", THCleanup(c10::raw::intrusive_ptr::decref(ra__);), "potri", info, info); @@ -717,7 +717,7 @@ void THTensor_(potri)(THTensor *ra_, THTensor *a, const char *uplo) * `tol` - double; user defined tolerance, or < 0 for automatic choice. The algorithm terminates when the pivot <= tol. */ -void THTensor_(pstrf)(THTensor *ra_, THIntTensor *rpiv_, THTensor *a, const char *uplo, real tol) { +void THTensor_(pstrf)(THTensor *ra_, THIntTensor *rpiv_, THTensor *a, const char *uplo, scalar_t tol) { THArgCheck(THTensor_nDimensionLegacyAll(a) == 2, 1, "A should be 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 1, "A should be square"); @@ -733,9 +733,9 @@ void THTensor_(pstrf)(THTensor *ra_, THIntTensor *rpiv_, THTensor *a, const char int lda = n; int rank, info; - THLapack_(pstrf)(uplo[0], n, ra__->data(), lda, + THLapack_(pstrf)(uplo[0], n, ra__->data(), lda, THIntTensor_data(rpiv_), &rank, tol, - work->data(), &info); + work->data(), &info); THLapackCheckWithCleanup("Lapack Error %s : matrix is rank deficient or not positive semidefinite", THCleanup( @@ -820,17 +820,17 @@ void THTensor_(geqrf)(THTensor *ra_, THTensor *rtau_, THTensor *a) /* Dry-run to query the suggested size of the workspace. */ int info = 0; - real wkopt = 0; - THLapack_(geqrf)(m, n, ra__->data(), lda, - rtau_->data(), + scalar_t wkopt = 0; + THLapack_(geqrf)(m, n, ra__->data(), lda, + rtau_->data(), &wkopt, -1, &info); /* Allocate the workspace and call LAPACK to do the real work. */ int lwork = (int)wkopt; THTensor *work = THTensor_(newWithSize1d)(lwork); - THLapack_(geqrf)(m, n, ra__->data(), lda, - rtau_->data(), - work->data(), lwork, &info); + THLapack_(geqrf)(m, n, ra__->data(), lda, + rtau_->data(), + work->data(), lwork, &info); THLapackCheckWithCleanup("Lapack Error %s : unknown Lapack error. info = %i", THCleanup( @@ -872,17 +872,17 @@ void THTensor_(orgqr)(THTensor *ra_, THTensor *a, THTensor *tau) /* Dry-run to query the suggested size of the workspace. */ int info = 0; - real wkopt = 0; - THLapack_(orgqr)(m, k, k, ra__->data(), lda, - tau->data(), + scalar_t wkopt = 0; + THLapack_(orgqr)(m, k, k, ra__->data(), lda, + tau->data(), &wkopt, -1, &info); /* Allocate the workspace and call LAPACK to do the real work. */ int lwork = (int)wkopt; THTensor *work = THTensor_(newWithSize1d)(lwork); - THLapack_(orgqr)(m, k, k, ra__->data(), lda, - tau->data(), - work->data(), lwork, &info); + THLapack_(orgqr)(m, k, k, ra__->data(), lda, + tau->data(), + work->data(), lwork, &info); THLapackCheckWithCleanup(" Lapack Error %s : unknown Lapack error. info = %i", THCleanup( @@ -935,17 +935,17 @@ void THTensor_(ormqr)(THTensor *ra_, THTensor *a, THTensor *tau, THTensor *c, co /* Dry-run to query the suggested size of the workspace. */ int info = 0; - real wkopt = 0; - THLapack_(ormqr)(side[0], trans[0], m, n, k, a->data(), lda, - tau->data(), ra__->data(), ldc, + scalar_t wkopt = 0; + THLapack_(ormqr)(side[0], trans[0], m, n, k, a->data(), lda, + tau->data(), ra__->data(), ldc, &wkopt, -1, &info); /* Allocate the workspace and call LAPACK to do the real work. */ int lwork = (int)wkopt; THTensor *work = THTensor_(newWithSize1d)(lwork); - THLapack_(ormqr)(side[0], trans[0], m, n, k, a->data(), lda, - tau->data(), ra__->data(), ldc, - work->data(), lwork, &info); + THLapack_(ormqr)(side[0], trans[0], m, n, k, a->data(), lda, + tau->data(), ra__->data(), ldc, + work->data(), lwork, &info); THLapackCheckWithCleanup(" Lapack Error %s : unknown Lapack error. info = %i", THCleanup( @@ -1009,7 +1009,7 @@ void THTensor_(btrifact)(THTensor *ra_, THIntTensor *rpivots_, THIntTensor *rinf THTensor_(select)(rai, ra__, 0, batch); THIntTensor_select(rpivoti, rpivots_, 0, batch); - THLapack_(getrf)(n, n, rai->data(), lda, + THLapack_(getrf)(n, n, rai->data(), lda, THIntTensor_data(rpivoti), info_ptr); if (rinfo_) { info_ptr++; @@ -1112,8 +1112,8 @@ void THTensor_(btrisolve)(THTensor *rb_, THTensor *b, THTensor *atf, THIntTensor #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) int info; - THLapack_(getrs)('N', n, nrhs, ai->data(), lda, - THIntTensor_data(pivoti), rbi->data(), + THLapack_(getrs)('N', n, nrhs, ai->data(), lda, + THIntTensor_data(pivoti), rbi->data(), ldb, &info); if (info != 0) { THError("Error: Nonzero info."); diff --git a/aten/src/TH/generic/THTensorLapack.h b/aten/src/TH/generic/THTensorLapack.h index 8785943485078d..3c2fdf97d60183 100644 --- a/aten/src/TH/generic/THTensorLapack.h +++ b/aten/src/TH/generic/THTensorLapack.h @@ -17,7 +17,7 @@ TH_API void THTensor_(qr)(THTensor *rq_, THTensor *rr_, THTensor *a); TH_API void THTensor_(geqrf)(THTensor *ra_, THTensor *rtau_, THTensor *a); TH_API void THTensor_(orgqr)(THTensor *ra_, THTensor *a, THTensor *tau); TH_API void THTensor_(ormqr)(THTensor *ra_, THTensor *a, THTensor *tau, THTensor *c, const char *side, const char *trans); -TH_API void THTensor_(pstrf)(THTensor *ra_, THIntTensor *rpiv_, THTensor*a, const char* uplo, real tol); +TH_API void THTensor_(pstrf)(THTensor *ra_, THIntTensor *rpiv_, THTensor*a, const char* uplo, scalar_t tol); TH_API void THTensor_(btrifact)(THTensor *ra_, THIntTensor *rpivots_, THIntTensor *rinfo_, int pivot, THTensor *a); TH_API void THTensor_(btrisolve)(THTensor *rb_, THTensor *b, THTensor *atf, THIntTensor *pivots); diff --git a/aten/src/TH/generic/THTensorMath.cpp b/aten/src/TH/generic/THTensorMath.cpp index 7bd80fe26127a3..1a71e97ce0d02c 100644 --- a/aten/src/TH/generic/THTensorMath.cpp +++ b/aten/src/TH/generic/THTensorMath.cpp @@ -23,11 +23,11 @@ // Should wrap if the value (a) has a different sign than the divisor (b), but is not 0. -static inline bool modulo_wrap(real a, real b) { +static inline bool modulo_wrap(scalar_t a, scalar_t b) { return (a != 0) && (a < 0) != (b < 0); } -void THTensor_(bitor)(THTensor *r_, THTensor *t, real value) +void THTensor_(bitor)(THTensor *r_, THTensor *t, scalar_t value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) (void)r_; @@ -41,8 +41,8 @@ void THTensor_(bitor)(THTensor *r_, THTensor *t, real value) int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { - real *tp = t->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; idata(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; idata(); - real *rp = r_->data(); - /* real t_val; */ + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); + /* scalar_t t_val; */ int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i max_value ? max_value : *t_data);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); } #else serial_path = 1; #endif } if (serial_path) { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); } } -void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) +void THTensor_(cadd)(THTensor *r_, THTensor *t, scalar_t value, THTensor *src) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); @@ -149,9 +149,9 @@ void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { if(r_ == t) { - THBlas_(axpy)(THTensor_(nElement)(t), value, src->data(), 1, r_->data(), 1); + THBlas_(axpy)(THTensor_(nElement)(t), value, src->data(), 1, r_->data(), 1); } else { - TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len);); + TH_TENSOR_APPLY3_CONTIG(scalar_t, r_, scalar_t, t, scalar_t, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len);); } } else { #if _OPENMP @@ -159,7 +159,7 @@ void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) if (inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data + value * *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); } #else serial_path = 1; @@ -169,11 +169,11 @@ void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) serial_path = 1; } if (serial_path) { - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data + value * *src_data;); } } -void THTensor_(csub)(THTensor *r_, THTensor *t, real value, THTensor *src) +void THTensor_(csub)(THTensor *r_, THTensor *t, scalar_t value, THTensor *src) { THTensor_(cadd)(r_, t, -value, src); } @@ -189,14 +189,14 @@ void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { - TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len);); + TH_TENSOR_APPLY3_CONTIG(scalar_t, r_, scalar_t, t, scalar_t, src, THVector_(cmul)(r__data, t_data, src_data, r__len);); } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data * *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); } #else serial_path = 1; @@ -206,11 +206,11 @@ void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) serial_path = 1; } if (serial_path) { - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data * *src_data;); } } -void THTensor_(pow)(THTensor *r_, THTensor *t, real value) +void THTensor_(pow)(THTensor *r_, THTensor *t, scalar_t value) { THTensor_(resizeAs)(r_, t); if(value == 1){ @@ -220,7 +220,7 @@ void THTensor_(pow)(THTensor *r_, THTensor *t, real value) THTensor_(cmul)(r_, t, t); } else if(value == 3){ - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * *t_data * *t_data;); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = *t_data * *t_data * *t_data;); } #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #if defined (TH_REAL_IS_FLOAT) @@ -238,15 +238,15 @@ void THTensor_(pow)(THTensor *r_, THTensor *t, real value) THTensor_(cinv)(r_, t); } else if(value == -2){ - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data);); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data);); } else{ - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(pow)(*t_data, value);); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = TH_MATH_NAME(pow)(*t_data, value);); } #undef TH_MATH_NAME #else else { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = THTensor_(powOne)(*t_data, value);); + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = THTensor_(powOne)(*t_data, value);); } #endif } @@ -262,9 +262,9 @@ void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src) int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { - real *tp = t->data(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i> sp[i]; + rp[i] = ((scalar_t) tp[i]) >> sp[i]; #else rp[i] = ((ureal) tp[i]) >> sp[i]; #endif @@ -424,13 +424,13 @@ void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src) serial_path = 1; } else { #if defined(TH_REAL_IS_FLOAT) - TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data / powf(2, *src_data);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); #elif defined(TH_REAL_IS_DOUBLE) - TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data / pow(2, *src_data);, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); #elif defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = ((scalar_t)*t_data) >> *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); #else - TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = ((ureal)*t_data) >> *src_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); #endif } #else @@ -442,13 +442,13 @@ void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src) } if (serial_path) { #if defined(TH_REAL_IS_FLOAT) - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data);); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data / powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data);); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = *t_data / pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = ((scalar_t)*t_data) >> *src_data;); #else - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, t, scalar_t, src, *r__data = ((ureal)*t_data) >> *src_data;); #endif } } @@ -464,9 +464,9 @@ void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src) int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { - real *tp = t->data(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *sp = src->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *sp = src->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idata(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; idim() != 2) || (THTensor_nDimensionLegacyNoScalars(vec) != 1) ) THError("matrix and vector expected, got %dD, %dD", @@ -838,25 +838,25 @@ void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor if(mat->stride(0) == 1 && LDA_COND(mat->size(0), mat->size(1), mat->stride(1))) { THBlas_(gemv)('n', mat->size(0), mat->size(1), - alpha, mat->data(), mat->stride(1), - vec->data(), THTensor_strideLegacyNoScalars(vec, 0), - beta, r_->data(), r_stride); + alpha, mat->data(), mat->stride(1), + vec->data(), THTensor_strideLegacyNoScalars(vec, 0), + beta, r_->data(), r_stride); } else if(mat->stride(1) == 1 && LDA_COND(mat->size(1), mat->size(0), mat->stride(0))) { THBlas_(gemv)('t', mat->size(1), mat->size(0), - alpha, mat->data(), mat->stride(0), - vec->data(), THTensor_strideLegacyNoScalars(vec, 0), - beta, r_->data(), r_stride); + alpha, mat->data(), mat->stride(0), + vec->data(), THTensor_strideLegacyNoScalars(vec, 0), + beta, r_->data(), r_stride); } else { THTensor *cmat = THTensor_(newContiguous)(mat); THBlas_(gemv)('t', mat->size(1), mat->size(0), - alpha, cmat->data(), cmat->stride(0), - vec->data(), THTensor_strideLegacyNoScalars(vec, 0), - beta, r_->data(), r_stride); + alpha, cmat->data(), cmat->stride(0), + vec->data(), THTensor_strideLegacyNoScalars(vec, 0), + beta, r_->data(), r_stride); c10::raw::intrusive_ptr::decref(cmat); } @@ -874,14 +874,14 @@ void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor #undef LDA_COND } -void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) +void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, scalar_t gain) { int64_t N1 = m1->size(0); int64_t N2 = m2->size(0); int64_t dim; - real *m1_p; - real *m2_p; - real *r_p; + scalar_t *m1_p; + scalar_t *m2_p; + scalar_t *r_p; int64_t i; THTensor_(resize2d)(r_, N1, N2); @@ -895,17 +895,17 @@ void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) dim = m1->size(1); THArgCheck(m1->size(1) == m2->size(1), 3, "m1 and m2 must have the same inner vector dim"); - m1_p = m1->data(); - m2_p = m2->data(); - r_p = r_->data(); + m1_p = m1->data(); + m2_p = m2->data(); + r_p = r_->data(); #pragma omp parallel for private(i) for (i=0; idata(), + m1_->data(), ldm1_, - m2_->data(), + m2_->data(), ldm2_, beta, - r__->data(), + r__->data(), ldr__); /* free intermediate variables */ @@ -1058,7 +1058,7 @@ void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor THTensor_(freeCopyTo)(r__, r_); } -void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2) +void THTensor_(addr)(THTensor *r_, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *vec1, THTensor *vec2) { if( (THTensor_nDimensionLegacyNoScalars(vec1) != 1) || (THTensor_nDimensionLegacyNoScalars(vec2) != 1) ) THError("vector and vector expected, got %dD, %dD tensors", @@ -1097,25 +1097,25 @@ void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor if(r_->stride(0) == 1 && LDA_COND(vec1_size, vec2_size, r_->stride(1))) { THBlas_(ger)(vec1_size, vec2_size, - alpha, vec1->data(), vec1_stride, - vec2->data(), vec2_stride, - r_->data(), r_->stride(1)); + alpha, vec1->data(), vec1_stride, + vec2->data(), vec2_stride, + r_->data(), r_->stride(1)); } else if(r_->stride(1) == 1 && LDA_COND(vec2_size, vec1_size, r_->stride(0))) { THBlas_(ger)(vec2_size, vec1_size, - alpha, vec2->data(), vec2_stride, - vec1->data(), vec1_stride, - r_->data(), r_->stride(0)); + alpha, vec2->data(), vec2_stride, + vec1->data(), vec1_stride, + r_->data(), r_->stride(0)); } else { THTensor *cr = THTensor_(newClone)(r_); THBlas_(ger)(vec2_size, vec1_size, - alpha, vec2->data(), vec2_stride, - vec1->data(), vec1_stride, - cr->data(), cr->stride(0)); + alpha, vec2->data(), vec2_stride, + vec1->data(), vec1_stride, + cr->data(), cr->stride(0)); THTensor_(freeCopyTo)(cr, r_); } @@ -1123,7 +1123,7 @@ void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor #undef LDA_COND } -void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) +void THTensor_(addbmm)(THTensor *result, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; diff --git a/aten/src/TH/generic/THTensorMath.h b/aten/src/TH/generic/THTensorMath.h index 08f3f1594afcd0..84412b8e76a3ca 100644 --- a/aten/src/TH/generic/THTensorMath.h +++ b/aten/src/TH/generic/THTensorMath.h @@ -2,10 +2,10 @@ #define TH_GENERIC_FILE "generic/THTensorMath.h" #else -TH_API void THTensor_(fill)(THTensor *r_, real value); +TH_API void THTensor_(fill)(THTensor *r_, scalar_t value); TH_API void THTensor_(zero)(THTensor *r_); -TH_API void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value); +TH_API void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, scalar_t value); TH_API void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src); TH_API void THTensor_(maskedSelect)(THTensor *tensor, THTensor* src, THByteTensor *mask); @@ -14,43 +14,43 @@ TH_API void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor); TH_API void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index); TH_API void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); TH_API void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); -TH_API void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val); +TH_API void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, scalar_t val); TH_API void THTensor_(take)(THTensor *tensor, THTensor *src, THLongTensor *index); TH_API void THTensor_(put)(THTensor *tensor, THLongTensor *index, THTensor *src, int accumulate); TH_API void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index); TH_API void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); TH_API void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); -TH_API void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val); +TH_API void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, scalar_t val); TH_API accreal THTensor_(dot)(THTensor *t, THTensor *src); -TH_API real THTensor_(minall)(THTensor *t); -TH_API real THTensor_(maxall)(THTensor *t); -TH_API real THTensor_(medianall)(THTensor *t); +TH_API scalar_t THTensor_(minall)(THTensor *t); +TH_API scalar_t THTensor_(maxall)(THTensor *t); +TH_API scalar_t THTensor_(medianall)(THTensor *t); TH_API accreal THTensor_(sumall)(THTensor *t); TH_API accreal THTensor_(prodall)(THTensor *t); TH_API void THTensor_(neg)(THTensor *self, THTensor *src); TH_API void THTensor_(cinv)(THTensor *self, THTensor *src); -TH_API void THTensor_(add)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(sub)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(add_scaled)(THTensor *r_, THTensor *t, real value, real alpha); -TH_API void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, real value, real alpha); -TH_API void THTensor_(mul)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(div)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(lshift)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(rshift)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(fmod)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(remainder)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value); -TH_API void THTensor_(bitand)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(bitor)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value); - -TH_API void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src); -TH_API void THTensor_(csub)(THTensor *self, THTensor *src1, real value, THTensor *src2); +TH_API void THTensor_(add)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(sub)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(add_scaled)(THTensor *r_, THTensor *t, scalar_t value, scalar_t alpha); +TH_API void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, scalar_t value, scalar_t alpha); +TH_API void THTensor_(mul)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(div)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(lshift)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(rshift)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(fmod)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(remainder)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(clamp)(THTensor *r_, THTensor *t, scalar_t min_value, scalar_t max_value); +TH_API void THTensor_(bitand)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(bitor)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(bitxor)(THTensor *r_, THTensor *t, scalar_t value); + +TH_API void THTensor_(cadd)(THTensor *r_, THTensor *t, scalar_t value, THTensor *src); +TH_API void THTensor_(csub)(THTensor *self, THTensor *src1, scalar_t value, THTensor *src2); TH_API void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src); TH_API void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src); TH_API void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src); @@ -62,17 +62,17 @@ TH_API void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src); TH_API void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src); TH_API void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2); -TH_API void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2); +TH_API void THTensor_(addcmul)(THTensor *r_, THTensor *t, scalar_t value, THTensor *src1, THTensor *src2); +TH_API void THTensor_(addcdiv)(THTensor *r_, THTensor *t, scalar_t value, THTensor *src1, THTensor *src2); -TH_API void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec); -TH_API void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat1, THTensor *mat2); -TH_API void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2); +TH_API void THTensor_(addmv)(THTensor *r_, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *mat, THTensor *vec); +TH_API void THTensor_(addmm)(THTensor *r_, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *mat1, THTensor *mat2); +TH_API void THTensor_(addr)(THTensor *r_, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *vec1, THTensor *vec2); -TH_API void THTensor_(addbmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2); -TH_API void THTensor_(baddbmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2); +TH_API void THTensor_(addbmm)(THTensor *r_, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *batch1, THTensor *batch2); +TH_API void THTensor_(baddbmm)(THTensor *r_, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *batch1, THTensor *batch2); -TH_API void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain); +TH_API void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, scalar_t gain); TH_API ptrdiff_t THTensor_(numel)(THTensor *t); void THTensor_(preserveReduceDimSemantics)(THTensor *r_, int in_dims, int reduce_dimension, int keepdim); @@ -91,8 +91,8 @@ TH_API void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimensi TH_API void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src); TH_API void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src); -TH_API void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value); -TH_API void THTensor_(cminValue)(THTensor *r, THTensor *t, real value); +TH_API void THTensor_(cmaxValue)(THTensor *r, THTensor *t, scalar_t value); +TH_API void THTensor_(cminValue)(THTensor *r, THTensor *t, scalar_t value); TH_API void THTensor_(zerosLike)(THTensor *r_, THTensor *input); TH_API void THTensor_(onesLike)(THTensor *r_, THTensor *input); @@ -111,19 +111,19 @@ TH_API void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInpu TH_API int THTensor_(equal)(THTensor *ta, THTensor *tb); -TH_API void THTensor_(ltValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(leValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(gtValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(geValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(neValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(eqValue)(THByteTensor *r_, THTensor* t, real value); +TH_API void THTensor_(ltValue)(THByteTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(leValue)(THByteTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(gtValue)(THByteTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(geValue)(THByteTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(neValue)(THByteTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(eqValue)(THByteTensor *r_, THTensor* t, scalar_t value); -TH_API void THTensor_(ltValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(leValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(gtValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(geValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(neValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(eqValueT)(THTensor *r_, THTensor* t, real value); +TH_API void THTensor_(ltValueT)(THTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(leValueT)(THTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(gtValueT)(THTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(geValueT)(THTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(neValueT)(THTensor *r_, THTensor* t, scalar_t value); +TH_API void THTensor_(eqValueT)(THTensor *r_, THTensor* t, scalar_t value); TH_API void THTensor_(ltTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); TH_API void THTensor_(leTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); @@ -139,8 +139,8 @@ TH_API void THTensor_(geTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); TH_API void THTensor_(neTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); TH_API void THTensor_(eqTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(pow)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(tpow)(THTensor *r_, real value, THTensor *t); +TH_API void THTensor_(pow)(THTensor *r_, THTensor *t, scalar_t value); +TH_API void THTensor_(tpow)(THTensor *r_, scalar_t value, THTensor *t); #if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) || defined(TH_REAL_IS_LONG) TH_API void THTensor_(abs)(THTensor *r_, THTensor *t); @@ -180,24 +180,24 @@ TH_API void THTensor_(round)(THTensor *r_, THTensor *t); TH_API void THTensor_(abs)(THTensor *r_, THTensor *t); TH_API void THTensor_(trunc)(THTensor *r_, THTensor *t); TH_API void THTensor_(frac)(THTensor *r_, THTensor *t); -TH_API void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight); +TH_API void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, scalar_t weight); TH_API void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim); TH_API void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim); TH_API void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim); -TH_API void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim); -TH_API void THTensor_(renorm)(THTensor *r_, THTensor *t, real value, int dimension, real maxnorm); -TH_API accreal THTensor_(dist)(THTensor *a, THTensor *b, real value); -TH_API void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue); -TH_API void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue); +TH_API void THTensor_(norm)(THTensor *r_, THTensor *t, scalar_t value, int dimension, int keepdim); +TH_API void THTensor_(renorm)(THTensor *r_, THTensor *t, scalar_t value, int dimension, scalar_t maxnorm); +TH_API accreal THTensor_(dist)(THTensor *a, THTensor *b, scalar_t value); +TH_API void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, scalar_t minvalue, scalar_t maxvalue); +TH_API void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, scalar_t minvalue, scalar_t maxvalue); TH_API accreal THTensor_(meanall)(THTensor *self); TH_API accreal THTensor_(varall)(THTensor *self, int biased); TH_API accreal THTensor_(stdall)(THTensor *self, int biased); -TH_API accreal THTensor_(normall)(THTensor *t, real value); +TH_API accreal THTensor_(normall)(THTensor *t, scalar_t value); -TH_API void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n); -TH_API void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n); +TH_API void THTensor_(linspace)(THTensor *r_, scalar_t a, scalar_t b, int64_t n); +TH_API void THTensor_(logspace)(THTensor *r_, scalar_t a, scalar_t b, int64_t n); TH_API void THTensor_(dirichlet_grad)(THTensor *self, THTensor *x, THTensor *alpha, THTensor *total); #endif diff --git a/aten/src/TH/generic/THTensorMoreMath.cpp b/aten/src/TH/generic/THTensorMoreMath.cpp index 177248bd39a84a..05916baad097b9 100644 --- a/aten/src/TH/generic/THTensorMoreMath.cpp +++ b/aten/src/TH/generic/THTensorMoreMath.cpp @@ -4,7 +4,7 @@ #include -void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) +void THTensor_(baddbmm)(THTensor *result, scalar_t beta, THTensor *t, scalar_t alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; @@ -86,11 +86,11 @@ void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int // two implementations optimized for data locality if (THTensor_strideLegacyNoScalars(t, dimension) == 1) { - real theMax; - real value; + scalar_t theMax; + scalar_t value; int64_t theIndex; int64_t i; - TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, + TH_TENSOR_DIM_APPLY3(scalar_t, t, scalar_t, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, theMax = t_data[0]; theIndex = 0; @@ -136,7 +136,7 @@ void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int tempIndices_->set_size(dimension,THTensor_sizeLegacyNoScalars(t, dimension)); tempIndices_->set_stride(dimension, 0); - TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, + TH_TENSOR_APPLY3_D(scalar_t, t, scalar_t, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; @@ -167,11 +167,11 @@ void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int // two implementations optimized for data locality if (THTensor_strideLegacyNoScalars(t, dimension) == 1) { - real theMax; - real value; + scalar_t theMax; + scalar_t value; int64_t theIndex; int64_t i; - TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, + TH_TENSOR_DIM_APPLY3(scalar_t, t, scalar_t, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, theMax = t_data[0]; theIndex = 0; @@ -217,7 +217,7 @@ void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int tempIndices_->set_size(dimension,THTensor_sizeLegacyNoScalars(t, dimension)); tempIndices_->set_stride(dimension, 0); - TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, + TH_TENSOR_APPLY3_D(scalar_t, t, scalar_t, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; @@ -250,8 +250,8 @@ void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim) serial_path = 1; } else { int r_Contig = THTensor_(isContiguous)(r_); - real *tp = t->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); if(r_Contig && (tp != rp)){ ptrdiff_t iter = 0; ptrdiff_t r_Size = THTensor_(nElement)(r_); @@ -270,8 +270,8 @@ void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim) tBasicIndex += quot*t->stride(j); } } - real *t_data = tp+tBasicIndex; - real *r__data = rp+iter; + scalar_t *t_data = tp+tBasicIndex; + scalar_t *r__data = rp+iter; *r__data = 0; for(j=0; j < THTensor_sizeLegacyNoScalars(t, dimension); ++j) { *r__data += *(t_data + j*THTensor_strideLegacyNoScalars(t, dimension)); @@ -287,12 +287,12 @@ void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim) if (serial_path) { // two implementations optimized for data locality if (THTensor_strideLegacyNoScalars(t, dimension) == 1) { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; - *r__data = (real)sum;); + *r__data = (scalar_t)sum;); } else { THTensor_(zero)(r_); THTensor *temp_ = THTensor_(newWithTensor)(r_); @@ -300,7 +300,7 @@ void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim) temp_->set_size(dimension,THTensor_sizeLegacyNoScalars(t, dimension)); temp_->set_stride(dimension, 0); - TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;); + TH_TENSOR_APPLY2(scalar_t, temp_, scalar_t, t, *temp__data = *temp__data + *t_data;); c10::raw::intrusive_ptr::decref(temp_); } } @@ -327,8 +327,8 @@ void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim) serial_path = 1; } else { int r_Contig = THTensor_(isContiguous)(r_); - real *tp = t->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); if(r_Contig && (tp != rp)){ ptrdiff_t iter = 0; ptrdiff_t r_Size = THTensor_(nElement)(r_); @@ -347,8 +347,8 @@ void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim) tBasicIndex += quot*t->stride(j); } } - real *t_data = tp+tBasicIndex; - real *r__data = rp+iter; + scalar_t *t_data = tp+tBasicIndex; + scalar_t *r__data = rp+iter; *r__data = 1; for(j=0; j < THTensor_sizeLegacyNoScalars(t, dimension); ++j) { *r__data *= *(t_data + j*THTensor_strideLegacyNoScalars(t, dimension)); @@ -365,12 +365,12 @@ void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim) if(serial_path) { // two implementations optimized for data locality if (THTensor_strideLegacyNoScalars(t, dimension) == 1) { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, accreal prod = 1; int64_t i; for(i = 0; i < t_size; i++) prod *= t_data[i*t_stride]; - *r__data = (real)prod;); + *r__data = (scalar_t)prod;); } else { THTensor_(fill)(r_, 1); THTensor *temp_ = THTensor_(newWithTensor)(r_); @@ -378,7 +378,7 @@ void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim) temp_->set_size(dimension,THTensor_sizeLegacyNoScalars(t, dimension)); temp_->set_stride(dimension, 0); - TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;); + TH_TENSOR_APPLY2(scalar_t, temp_, scalar_t, t, *temp__data = *temp__data * *t_data;); c10::raw::intrusive_ptr::decref(temp_); } } @@ -394,13 +394,13 @@ void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension) THTensor_(resizeAs)(r_, t); - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, accreal cumsum = 0; int64_t i; for(i = 0; i < t_size; i++) { cumsum += t_data[i*t_stride]; - r__data[i*r__stride] = (real)cumsum; + r__data[i*r__stride] = (scalar_t)cumsum; }); } @@ -411,13 +411,13 @@ void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension) THTensor_(resizeAs)(r_, t); - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, accreal cumprod = 1; int64_t i; for(i = 0; i < t_size; i++) { cumprod *= t_data[i*t_stride]; - r__data[i*r__stride] = (real)cumprod; + r__data[i*r__stride] = (scalar_t)cumprod; }); } @@ -427,11 +427,11 @@ void THTensor_(sign)(THTensor *r_, THTensor *t) THTensor_(resizeAs)(r_, t); #if defined (TH_REAL_IS_BYTE) - TH_TENSOR_APPLY2(real, r_, real, t, + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, if (*t_data > 0) *r__data = 1; else *r__data = 0;); #else - TH_TENSOR_APPLY2(real, r_, real, t, + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, if (*t_data > 0) *r__data = 1; else if (*t_data < 0) *r__data = -1; else *r__data = 0;); @@ -441,7 +441,7 @@ void THTensor_(sign)(THTensor *r_, THTensor *t) accreal THTensor_(trace)(THTensor *t) { - real *t_data = t->data(); + scalar_t *t_data = t->data(); accreal sum = 0; int64_t i = 0; int64_t t_stride_0, t_stride_1, t_diag_size; @@ -500,7 +500,7 @@ void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) THTensor_(resizeAs)(r_, a); - TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension, + TH_TENSOR_DIM_APPLY3(scalar_t, a, scalar_t, b, scalar_t, r_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride]; r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride]; @@ -509,25 +509,25 @@ void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY3(real, r, real, t, real, src, + TH_TENSOR_APPLY3(scalar_t, r, scalar_t, t, scalar_t, src, *r_data = *t_data > *src_data ? *t_data : *src_data;); } void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY3(real, r, real, t, real, src, + TH_TENSOR_APPLY3(scalar_t, r, scalar_t, t, scalar_t, src, *r_data = *t_data < *src_data ? *t_data : *src_data;); } -void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) { +void THTensor_(cmaxValue)(THTensor *r, THTensor *t, scalar_t value) { THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY2(real, r, real, t, + TH_TENSOR_APPLY2(scalar_t, r, scalar_t, t, *r_data = *t_data < value ? value : *t_data;); // this order propagates NaN } -void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) { +void THTensor_(cminValue)(THTensor *r, THTensor *t, scalar_t value) { THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY2(real, r, real, t, + TH_TENSOR_APPLY2(scalar_t, r, scalar_t, t, *r_data = *t_data > value ? value : *t_data;); // this order propagates NaN } @@ -549,18 +549,18 @@ void THTensor_(diag)(THTensor *r_, THTensor *t, int k) if(THTensor_(nDimensionLegacyNoScalars)(t) == 1) { - real *t_data = t->data(); + scalar_t *t_data = t->data(); int64_t t_stride_0 = THTensor_strideLegacyNoScalars(t, 0); int64_t t_size = THTensor_sizeLegacyNoScalars(t, 0); int64_t sz = t_size + (k >= 0 ? k : -k); - real *r__data; + scalar_t *r__data; int64_t r__stride_0; int64_t r__stride_1; int64_t i; THTensor_(resize2d)(r_, sz, sz); THTensor_(zero)(r_); - r__data = r_->data(); + r__data = r_->data(); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0); @@ -570,11 +570,11 @@ void THTensor_(diag)(THTensor *r_, THTensor *t, int k) } else { - real *t_data = t->data(); + scalar_t *t_data = t->data(); int64_t t_stride_0 = THTensor_(stride)(t, 0); int64_t t_stride_1 = THTensor_(stride)(t, 1); int64_t sz; - real *r__data; + scalar_t *r__data; int64_t r__stride_0; int64_t i; @@ -583,7 +583,7 @@ void THTensor_(diag)(THTensor *r_, THTensor *t, int k) else sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1)); THTensor_(resize1d)(r_, sz); - r__data = r_->data(); + r__data = r_->data(); r__stride_0 = THTensor_(stride)(r_, 0); t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0); @@ -594,7 +594,7 @@ void THTensor_(diag)(THTensor *r_, THTensor *t, int k) void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m) { - real *r__data; + scalar_t *r__data; int64_t i, sz; THArgCheck(n > 0, 1, "invalid argument"); @@ -606,7 +606,7 @@ void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m) THTensor_(zero)(r_); i = 0; - r__data = r_->data(); + r__data = r_->data(); sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1)); for(i = 0; i < sz; i++) r__data[i*(r_->stride(0)+r_->stride(1))] = 1; @@ -616,7 +616,7 @@ void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m) void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { ptrdiff_t size; - real i = 0; + scalar_t i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) @@ -628,12 +628,12 @@ void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) THTensor_(resize1d)(r_, size); } - TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); + TH_TENSOR_APPLY(scalar_t, r_, *r__data = xmin + (i++)*step;); } void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { ptrdiff_t size; - real i = 0; + scalar_t i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) @@ -645,28 +645,28 @@ void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { THTensor_(resize1d)(r_, size); } - TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); + TH_TENSOR_APPLY(scalar_t, r_, *r__data = xmin + (i++)*step;); } void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n) { - real *r__data; + scalar_t *r__data; int64_t r__stride_0; int64_t i; THArgCheck(n > 0, 1, "must be strictly positive"); THTensor_(resize1d)(r_, n); - r__data = r_->data(); + r__data = r_->data(); r__stride_0 = THTensor_(stride)(r_,0); for(i = 0; i < n; i++) - r__data[i*r__stride_0] = (real)(i); + r__data[i*r__stride_0] = (scalar_t)(i); for(i = 0; i < n-1; i++) { int64_t z = THRandom_random(_generator) % (n-i); - real sav = r__data[i*r__stride_0]; + scalar_t sav = r__data[i*r__stride_0]; r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0]; r__data[(z+i)*r__stride_0] = sav; } @@ -702,10 +702,10 @@ void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n) REAL_SWAP(ARR(III), ARR(JJJ)); \ LONG_SWAP(IDX(III), IDX(JJJ)) -static void THTensor_(quicksortascend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) +static void THTensor_(quicksortascend)(scalar_t *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; - real rswap, piv; + scalar_t rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ @@ -791,10 +791,10 @@ static void THTensor_(quicksortascend)(real *arr, int64_t *idx, int64_t elements } } -static void THTensor_(quicksortdescend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) +static void THTensor_(quicksortdescend)(scalar_t *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; - real rswap, piv; + scalar_t rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ @@ -894,7 +894,7 @@ void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimensio if(descendingOrder) { - TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; @@ -902,7 +902,7 @@ void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimensio } else { - TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; @@ -914,10 +914,10 @@ void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimensio public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. This version does not produce indices along with values. */ -static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride) +static void THTensor_(quickselectnoidx)(scalar_t *arr, int64_t k, int64_t elements, int64_t stride) { int64_t P, L, R, i, j; - real rswap, piv; + scalar_t rswap, piv; L = 0; R = elements-1; @@ -960,10 +960,10 @@ static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. */ -static void THTensor_(quickselect)(real *arr, int64_t *idx, int64_t k, int64_t elements, int64_t stride) +static void THTensor_(quickselect)(scalar_t *arr, int64_t *idx, int64_t k, int64_t elements, int64_t stride) { int64_t P, L, R, i, j, swap; - real rswap, piv; + scalar_t rswap, piv; L = 0; R = elements-1; @@ -1009,21 +1009,21 @@ static void THTensor_(quickselect)(real *arr, int64_t *idx, int64_t k, int64_t e #undef REAL_SWAP #undef BOTH_SWAP -real THTensor_(medianall)(THTensor *tensor) +scalar_t THTensor_(medianall)(THTensor *tensor) { THArgCheck(THTensor_nDimensionLegacyAll(tensor) > 0, 1, "tensor must have one dimension"); - real theMedian; + scalar_t theMedian; ptrdiff_t numel; int64_t k; THTensor *temp_; - real *temp__data; + scalar_t *temp__data; numel = THTensor_(nElement)(tensor); k = (numel-1) >> 1; temp_ = THTensor_(newClone)(tensor); - temp__data = temp_->data(); + temp__data = temp_->data(); THTensor_(quickselectnoidx)(temp__data, k, numel, 1); @@ -1038,7 +1038,7 @@ void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int { THTensor *temp_; THLongTensor *tempi_; - real *temp__data; + scalar_t *temp__data; int64_t *tempi__data; int64_t t_size_dim; @@ -1056,16 +1056,16 @@ void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); - temp__data = temp_->data(); + temp__data = temp_->data(); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); - TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, + TH_TENSOR_DIM_APPLY3(scalar_t, t, scalar_t, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; - real mode = 0; + scalar_t mode = 0; int64_t modei = 0; int64_t temp_freq = 0; int64_t max_freq = 0; @@ -1104,7 +1104,7 @@ void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, { THTensor *temp_; THLongTensor *tempi_; - real *temp__data; + scalar_t *temp__data; int64_t *tempi__data; int64_t t_size_dim; @@ -1123,13 +1123,13 @@ void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); - temp__data = temp_->data(); + temp__data = temp_->data(); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); - TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, + TH_TENSOR_DIM_APPLY3(scalar_t, t, scalar_t, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < t_size_dim; i++) @@ -1170,7 +1170,7 @@ void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, i THTensor *tmpResults = THTensor_(new)(); THTensor_(resize1d)(tmpResults, sliceSize); - real *tmp__data = tmpResults->data(); + scalar_t *tmp__data = tmpResults->data(); THLongTensor *tmpIndices = THLongTensor_new(); THLongTensor_resize1d(tmpIndices, sliceSize); @@ -1186,7 +1186,7 @@ void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, i if (dir) { /* k largest elements, descending order (optional: see sorted) */ int64_t K = sliceSize - k; - TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, + TH_TENSOR_DIM_APPLY3(scalar_t, t, scalar_t, rt_, int64_t, ri_, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < sliceSize; i++) @@ -1206,7 +1206,7 @@ void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, i } else { /* k smallest elements, ascending order (optional: see sorted) */ - TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, + TH_TENSOR_DIM_APPLY3(scalar_t, t, scalar_t, rt_, int64_t, ri_, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < sliceSize; i++) @@ -1233,7 +1233,7 @@ void THTensor_(tril)(THTensor *r_, THTensor *t, int64_t k) int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; - real *t_data, *r__data; + scalar_t *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimensionLegacyAll)(t) == 2, 1, "expected a matrix"); @@ -1246,8 +1246,8 @@ void THTensor_(tril)(THTensor *r_, THTensor *t, int64_t k) t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); - r__data = r_->data(); - t_data = t->data(); + r__data = r_->data(); + t_data = t->data(); for(r = 0; r < t_size_0; r++) { @@ -1264,7 +1264,7 @@ void THTensor_(triu)(THTensor *r_, THTensor *t, int64_t k) int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; - real *t_data, *r__data; + scalar_t *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimensionLegacyAll)(t) == 2, 1, "expected a matrix"); @@ -1277,8 +1277,8 @@ void THTensor_(triu)(THTensor *r_, THTensor *t, int64_t k) t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); - r__data = r_->data(); - t_data = t->data(); + r__data = r_->data(); + t_data = t->data(); for(r = 0; r < t_size_0; r++) { @@ -1382,16 +1382,16 @@ void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int // Second path for non-contiguous int64_t offset; if (dimension == 0 && allContiguous) { - real* result_data = THStorage_(data)(THTensor_getStoragePtr(result)) + result->storage_offset(); + scalar_t* result_data = THStorage_(data)(THTensor_getStoragePtr(result)) + result->storage_offset(); offset = 0; for (int j = 0; j < numInputs; j++) { if (!should_skip(inputs[j])) { THTensor* input0 = inputs[j]; - real* input0_data = THStorage_(data)(THTensor_getStoragePtr(input0)) + input0->storage_offset(); + scalar_t* input0_data = THStorage_(data)(THTensor_getStoragePtr(input0)) + input0->storage_offset(); int64_t input0_size = THTensor_(nElement)(input0); // C standard says you can't pass nullptrs to memcpy, even if the size is 0; ubsan checks this. if (input0_size != 0) { - memcpy(result_data + offset, input0_data, input0_size*sizeof(real)); + memcpy(result_data + offset, input0_data, input0_size*sizeof(scalar_t)); } offset += input0_size; } @@ -1418,8 +1418,8 @@ int THTensor_(equal)(THTensor *ta, THTensor* tb) return 0; if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) { - real *tap = ta->data(); - real *tbp = tb->data(); + scalar_t *tap = ta->data(); + scalar_t *tbp = tb->data(); ptrdiff_t sz = THTensor_(nElement)(ta); ptrdiff_t i; for (i=0; idim(), THTensor_getSizePtr(t), NULL); \ - TH_TENSOR_APPLY2(unsigned char, r_, real, t, \ + TH_TENSOR_APPLY2(unsigned char, r_, scalar_t, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ - void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \ + void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, scalar_t value) \ { \ THTensor_(resizeNd)(r_, t->dim(), THTensor_getSizePtr(t), NULL); \ - TH_TENSOR_APPLY2(real, r_, real, t, \ + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \ { \ THByteTensor_resizeNd(r_, ta->dim(), THTensor_getSizePtr(ta), NULL); \ - TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \ + TH_TENSOR_APPLY3(unsigned char, r_, scalar_t, ta, scalar_t, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \ { \ THTensor_(resizeNd)(r_, ta->dim(), THTensor_getSizePtr(ta), NULL); \ - TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \ + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, ta, scalar_t, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ @@ -1482,9 +1482,9 @@ TENSOR_IMPLEMENT_LOGICAL(ne,!=) int tContig = THTensor_(isContiguous)(t); \ int inOMP = omp_in_parallel(); \ if( !inOMP ){ \ - TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);, OMP_THRESHOLD); \ + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = CFUNC(*t_data);, OMP_THRESHOLD); \ } else { \ - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \ + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = CFUNC(*t_data);); \ } \ } @@ -1499,14 +1499,14 @@ TENSOR_IMPLEMENT_LOGICAL(ne,!=) int r_Contig = THTensor_(isContiguous)(r_); \ int tContig = THTensor_(isContiguous)(t); \ if (r_Contig && tContig) { \ - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \ + TH_TENSOR_APPLY2_CONTIG(scalar_t, r_, scalar_t, t, THVector_(NAME)(r__data, t_data, r__len);); \ } else { \ int inOMP = omp_in_parallel(); \ if( !inOMP ){ \ - TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);, OMP_THRESHOLD); \ + TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, scalar_t, r_, scalar_t, t, *r__data = CFUNC(*t_data);, OMP_THRESHOLD); \ } \ else { \ - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \ + TH_TENSOR_APPLY2(scalar_t, r_, scalar_t, t, *r__data = CFUNC(*t_data);); \ } \ } \ } @@ -1520,7 +1520,7 @@ TENSOR_IMPLEMENT_LOGICAL(ne,!=) void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ - TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ + TH_TENSOR_APPLY2(scalar_t, t, scalar_t, r_, *r__data = CFUNC(*t_data);); \ } \ #define LAB_IMPLEMENT_BASIC_FUNCTION_3_ARGS(NAME, CFUNC, PSEUDO_OMP_THRESHOLD) \ @@ -1533,9 +1533,9 @@ TENSOR_IMPLEMENT_LOGICAL(ne,!=) int r_Contig = THTensor_(isContiguous)(r_); \ int tContig = THTensor_(isContiguous)(t); \ if (r_Contig && tContig) { \ - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \ + TH_TENSOR_APPLY2_CONTIG(scalar_t, r_, scalar_t, t, THVector_(NAME)(r__data, t_data, r__len);); \ } else { \ - TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ + TH_TENSOR_APPLY2(scalar_t, t, scalar_t, r_, *r__data = CFUNC(*t_data);); \ } \ } \ @@ -1585,40 +1585,40 @@ LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs) int THTensor_(logicalAndAll)(THTensor *tensor) { - real prod = 1; + scalar_t prod = 1; int serial_path = 0; #ifdef _OPENMP int inOMP = omp_in_parallel(); if(inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, &&:prod, prod = prod && *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY_REDUCTION_OMP(scalar_t, tensor, &&:prod, prod = prod && *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); } #else serial_path = 1; #endif if (serial_path) { - TH_TENSOR_APPLY(real, tensor, prod = prod && *tensor_data;); + TH_TENSOR_APPLY(scalar_t, tensor, prod = prod && *tensor_data;); } return prod; } int THTensor_(logicalAnyAll)(THTensor *tensor) { - real sum = 0; + scalar_t sum = 0; int serial_path = 0; #ifdef _OPENMP int inOMP = omp_in_parallel(); if(inOMP) { serial_path = 1; } else { - TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, ||:sum, sum = sum || *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); + TH_TENSOR_APPLY_REDUCTION_OMP(scalar_t, tensor, ||:sum, sum = sum || *tensor_data;, UNCERTAIN_TH_OMP_OVERHEAD_THRESHOLD); } #else serial_path = 1; #endif if (serial_path) { - TH_TENSOR_APPLY(real, tensor, sum = sum || *tensor_data;); + TH_TENSOR_APPLY(scalar_t, tensor, sum = sum || *tensor_data;); } return (bool)sum; } @@ -1640,8 +1640,8 @@ void THTensor_(logicalAnd)(THTensor *r_, THTensor *t, int dimension, int keepdim serial_path = 1; } else { int r_Contig = THTensor_(isContiguous)(r_); - real *tp = t->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); if(r_Contig && (tp != rp)){ ptrdiff_t iter = 0; ptrdiff_t r_Size = THTensor_(nElement)(r_); @@ -1660,8 +1660,8 @@ void THTensor_(logicalAnd)(THTensor *r_, THTensor *t, int dimension, int keepdim tBasicIndex += quot*t->stride(j); } } - real *t_data = tp+tBasicIndex; - real *r__data = rp+iter; + scalar_t *t_data = tp+tBasicIndex; + scalar_t *r__data = rp+iter; *r__data = 1; for(j=0; j < THTensor_sizeLegacyNoScalars(t, dimension); ++j) { *r__data = *r__data && *(t_data + j*THTensor_strideLegacyNoScalars(t, dimension)); @@ -1678,12 +1678,12 @@ void THTensor_(logicalAnd)(THTensor *r_, THTensor *t, int dimension, int keepdim if(serial_path) { // two implementations optimized for data locality if (THTensor_strideLegacyNoScalars(t, dimension) == 1) { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, accreal prod = 1; int64_t i; for(i = 0; i < t_size; i++) prod = prod && t_data[i*t_stride]; - *r__data = (real)prod;); + *r__data = (scalar_t)prod;); } else { THTensor_(fill)(r_, 1); THTensor *temp_ = THTensor_(newWithTensor)(r_); @@ -1691,7 +1691,7 @@ void THTensor_(logicalAnd)(THTensor *r_, THTensor *t, int dimension, int keepdim temp_->set_size(dimension,THTensor_sizeLegacyNoScalars(t, dimension)); temp_->set_stride(dimension, 0); - TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data && *t_data;); + TH_TENSOR_APPLY2(scalar_t, temp_, scalar_t, t, *temp__data = *temp__data && *t_data;); c10::raw::intrusive_ptr::decref(temp_); } } @@ -1717,8 +1717,8 @@ void THTensor_(logicalAny)(THTensor *r_, THTensor *t, int dimension, int keepdim serial_path = 1; } else { int r_Contig = THTensor_(isContiguous)(r_); - real *tp = t->data(); - real *rp = r_->data(); + scalar_t *tp = t->data(); + scalar_t *rp = r_->data(); if(r_Contig && (tp != rp)){ ptrdiff_t iter = 0; ptrdiff_t r_Size = THTensor_(nElement)(r_); @@ -1737,8 +1737,8 @@ void THTensor_(logicalAny)(THTensor *r_, THTensor *t, int dimension, int keepdim tBasicIndex += quot*t->stride(j); } } - real *t_data = tp+tBasicIndex; - real *r__data = rp+iter; + scalar_t *t_data = tp+tBasicIndex; + scalar_t *r__data = rp+iter; *r__data = 0; for(j=0; j < THTensor_sizeLegacyNoScalars(t, dimension); ++j) { *r__data = *r__data || *(t_data + j*THTensor_strideLegacyNoScalars(t, dimension)); @@ -1754,12 +1754,12 @@ void THTensor_(logicalAny)(THTensor *r_, THTensor *t, int dimension, int keepdim if (serial_path) { // two implementations optimized for data locality if (THTensor_strideLegacyNoScalars(t, dimension) == 1) { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum = sum || t_data[i*t_stride]; - *r__data = (real)sum;); + *r__data = (scalar_t)sum;); } else { THTensor_(zero)(r_); THTensor *temp_ = THTensor_(newWithTensor)(r_); @@ -1767,7 +1767,7 @@ void THTensor_(logicalAny)(THTensor *r_, THTensor *t, int dimension, int keepdim temp_->set_size(dimension,THTensor_sizeLegacyNoScalars(t, dimension)); temp_->set_stride(dimension, 0); - TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data || *t_data;); + TH_TENSOR_APPLY2(scalar_t, temp_, scalar_t, t, *temp__data = *temp__data || *t_data;); c10::raw::intrusive_ptr::decref(temp_); } } @@ -1825,7 +1825,7 @@ LAB_IMPLEMENT_VECTORIZED_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid),HYPER_TH_OMP_ void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty) { THTensor_(resizeAs)(r_, tx); - TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data);); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, tx, scalar_t, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data);); } void THTensor_(polygamma)(THTensor *r_, int64_t n, THTensor *t) { @@ -1836,11 +1836,11 @@ void THTensor_(polygamma)(THTensor *r_, int64_t n, THTensor *t) { } } -void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight) +void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, scalar_t weight) { THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match"); THTensor_(resizeAs)(r_, a); - TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight);); + TH_TENSOR_APPLY3(scalar_t, r_, scalar_t, a, scalar_t, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight);); } void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim) @@ -1862,7 +1862,7 @@ void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int ke dim[dimension] = 1; THTensor_(resize)(r_, dim, {}); - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, // Uses Welford's algorithm for numeric stability accreal mean = 0; accreal M2 = 0; @@ -1870,10 +1870,10 @@ void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int ke int64_t i; for (i = 0; i < t_size; i++) { - real z = t_data[i*t_stride]; - real delta = z - mean; + scalar_t z = t_data[i*t_stride]; + scalar_t delta = z - mean; mean += delta / (i + 1); - real delta2 = z - mean; + scalar_t delta2 = z - mean; M2 += delta * delta2; } @@ -1903,7 +1903,7 @@ void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int ke dim[dimension] = 1; THTensor_(resize)(r_, dim, {}); - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, // Uses Welford's algorithm for numeric stability accreal mean = 0; accreal M2 = 0; @@ -1911,10 +1911,10 @@ void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int ke int64_t i; for (i = 0; i < t_size; i++) { - real z = t_data[i*t_stride]; - real delta = z - mean; + scalar_t z = t_data[i*t_stride]; + scalar_t delta = z - mean; mean += delta / (i + 1); - real delta2 = z - mean; + scalar_t delta2 = z - mean; M2 += delta * delta2; } @@ -1934,7 +1934,7 @@ void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int ke } } -void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim) +void THTensor_(norm)(THTensor *r_, THTensor *t, scalar_t value, int dimension, int keepdim) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimensionLegacyAll)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); @@ -1945,7 +1945,7 @@ void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int k THTensor_(resize)(r_, dim, {}); #define DIM_REDUCE(reduce, transform) \ - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, \ + TH_TENSOR_DIM_APPLY2(scalar_t, t, scalar_t, r_, dimension, \ accreal sum = 0; \ int64_t i; \ for(i = 0; i < t_size; i++) { \ @@ -1979,31 +1979,31 @@ void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int k #undef DIM_REDUCE } -accreal THTensor_(normall)(THTensor *tensor, real value) +accreal THTensor_(normall)(THTensor *tensor, scalar_t value) { accreal sum = 0; if(value == 0) { - TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;); + TH_TENSOR_APPLY(scalar_t, tensor, sum += *tensor_data != 0.0;); return sum; } else if(value == 1) { - TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data);); + TH_TENSOR_APPLY(scalar_t, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data);); return sum; } else if(value == 2) { - TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;); + TH_TENSOR_APPLY(scalar_t, tensor, accreal z = *tensor_data; sum += z*z;); return sqrt(sum); } else if(value == 3) { - TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += std::abs(z*z*z);); + TH_TENSOR_APPLY(scalar_t, tensor, accreal z = *tensor_data; sum += std::abs(z*z*z);); return TH_MATH_NAME(pow)(sum, 1.0/3); } else if(value == INFINITY) { - TH_TENSOR_APPLY(real, tensor, sum = THMax(sum, TH_MATH_NAME(fabs)(*tensor_data));); + TH_TENSOR_APPLY(scalar_t, tensor, sum = THMax(sum, TH_MATH_NAME(fabs)(*tensor_data));); return sum; } else { - TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value);); + TH_TENSOR_APPLY(scalar_t, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value);); return TH_MATH_NAME(pow)(sum, 1.0/value); } } -void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm) +void THTensor_(renorm)(THTensor *res, THTensor *src, scalar_t value, int dimension, scalar_t maxnorm) { THTensor *rowR, *rowS; @@ -2020,19 +2020,19 @@ void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, for (int64_t i = 0; i < THTensor_sizeLegacyNoScalars(src, dimension); i++) { - real norm = 0; - real new_norm; + scalar_t norm = 0; + scalar_t new_norm; THTensor_(select)(rowS, src, dimension, i); THTensor_(select)(rowR, res, dimension, i); if (value == 1) { - TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data);); + TH_TENSOR_APPLY(scalar_t, rowS, norm += fabs(*rowS_data);); } else if (value == 2) { - TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;); + TH_TENSOR_APPLY(scalar_t, rowS, accreal z = *rowS_data; norm += z*z;); } else if (value == INFINITY) { - TH_TENSOR_APPLY(real, rowS, norm = THMax(norm, TH_MATH_NAME(fabs)(*rowS_data));); + TH_TENSOR_APPLY(scalar_t, rowS, norm = THMax(norm, TH_MATH_NAME(fabs)(*rowS_data));); } else { - TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value);); + TH_TENSOR_APPLY(scalar_t, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value);); } if (value != INFINITY) { @@ -2044,7 +2044,7 @@ void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, new_norm = maxnorm / (norm + 1e-7); TH_TENSOR_APPLY2( - real, rowR, real, rowS, + scalar_t, rowR, scalar_t, rowS, *rowR_data = (*rowS_data) * new_norm; ) } @@ -2056,10 +2056,10 @@ void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, c10::raw::intrusive_ptr::decref(rowS); } -accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value) +accreal THTensor_(dist)(THTensor *tensor, THTensor *src, scalar_t value) { - real sum = 0; - TH_TENSOR_APPLY2(real, tensor, real, src, + scalar_t sum = 0; + TH_TENSOR_APPLY2(scalar_t, tensor, scalar_t, src, sum += TH_MATH_NAME(pow)( TH_MATH_NAME(fabs)(*tensor_data - *src_data), value);); return TH_MATH_NAME(pow)(sum, 1.0/value); @@ -2074,7 +2074,7 @@ accreal THTensor_(varall)(THTensor *tensor, int biased) { accreal mean = THTensor_(meanall)(tensor); accreal sum = 0; - TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); + TH_TENSOR_APPLY(scalar_t, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); sum /= std::max(0, THTensor_(nElement)(tensor) - (biased ? 0 : 1)); return sum; } @@ -2084,9 +2084,9 @@ accreal THTensor_(stdall)(THTensor *tensor, int biased) return sqrt(THTensor_(varall)(tensor, biased)); } -void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n) +void THTensor_(linspace)(THTensor *r_, scalar_t a, scalar_t b, int64_t n) { - real i = 0; + scalar_t i = 0; // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); @@ -2099,16 +2099,16 @@ void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n) } else if (n == 1) { THTensor_(set1d)(r_, 0, a); } else { - TH_TENSOR_APPLY(real, r_, - *r__data = a + (b-a)/((real)(n-1))*i; + TH_TENSOR_APPLY(scalar_t, r_, + *r__data = a + (b-a)/((scalar_t)(n-1))*i; i++; ); } } -void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n) +void THTensor_(logspace)(THTensor *r_, scalar_t a, scalar_t b, int64_t n) { - real i = 0; + scalar_t i = 0; // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); @@ -2121,18 +2121,18 @@ void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n) } else if (n == 1) { THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a)); } else { - TH_TENSOR_APPLY(real, r_, - *r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1))); + TH_TENSOR_APPLY(scalar_t, r_, + *r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((scalar_t)(n-1))); i++; ); } } -void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) +void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, scalar_t minvalue, scalar_t maxvalue) { - real minval; - real maxval; - real *h_data; + scalar_t minval; + scalar_t maxval; + scalar_t *h_data; THTensor_(resize1d)(hist, nbins); THTensor_(zero)(hist); @@ -2149,9 +2149,9 @@ void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minv maxval = maxval + 1; } - h_data = hist->data(); + h_data = hist->data(); - TH_TENSOR_APPLY(real, tensor, + TH_TENSOR_APPLY(scalar_t, tensor, if (*tensor_data >= minval && *tensor_data <= maxval) { const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins); h_data[THMin(bin, nbins-1)] += 1; @@ -2159,7 +2159,7 @@ void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minv ); } -void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) +void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, scalar_t minvalue, scalar_t maxvalue) { THArgCheck(THTensor_(nDimensionLegacyAll)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimensionLegacyAll)(tensor)); @@ -2167,8 +2167,8 @@ void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real min THArgCheck(dimension >= 0 && dimension < THTensor_(nDimensionLegacyAll)(tensor), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); - real minval; - real maxval; + scalar_t minval; + scalar_t maxval; THTensor_(resize2d)(hist, THTensor_sizeLegacyNoScalars(tensor, 0), nbins); THTensor_(zero)(hist); @@ -2186,7 +2186,7 @@ void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real min maxval = maxval + 1; } - TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, int64_t i; + TH_TENSOR_DIM_APPLY2(scalar_t, tensor, scalar_t, hist, dimension, int64_t i; for(i = 0; i < tensor_size; i++) { if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) { @@ -2199,41 +2199,41 @@ void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real min // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha. // Assumes x is close to zero and uses a Taylor expansion. -static inline real THTensor_(beta_grad_alpha_small)(real x, real alpha, real beta) { - const real factor = TH_MATH_NAME(TH_digamma)(alpha) - TH_MATH_NAME(TH_digamma)(alpha + beta) - TH_MATH_NAME(log)(x); - real numer = 1; - real series = numer / alpha * (factor + 1 / alpha); +static inline scalar_t THTensor_(beta_grad_alpha_small)(scalar_t x, scalar_t alpha, scalar_t beta) { + const scalar_t factor = TH_MATH_NAME(TH_digamma)(alpha) - TH_MATH_NAME(TH_digamma)(alpha + beta) - TH_MATH_NAME(log)(x); + scalar_t numer = 1; + scalar_t series = numer / alpha * (factor + 1 / alpha); for (int i = 1; i <= 10; ++i) { numer *= (i - beta) * x / i; - const real denom = alpha + i; + const scalar_t denom = alpha + i; series += numer / denom * (factor + 1 / denom); } - const real result = x * TH_MATH_NAME(pow)(1 - x, -beta) * series; + const scalar_t result = x * TH_MATH_NAME(pow)(1 - x, -beta) * series; return th_isnan(result) ? 0.0 : result; } // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta. // Assumes x is close to zero and uses a Taylor expansion. -static inline real THTensor_(beta_grad_beta_small)(real x, real alpha, real beta) { - const real factor = TH_MATH_NAME(TH_digamma)(alpha+beta) - TH_MATH_NAME(TH_digamma)(beta); - real numer = 1; - real betas = 1; - real dbetas = 0; - real series = factor / alpha; +static inline scalar_t THTensor_(beta_grad_beta_small)(scalar_t x, scalar_t alpha, scalar_t beta) { + const scalar_t factor = TH_MATH_NAME(TH_digamma)(alpha+beta) - TH_MATH_NAME(TH_digamma)(beta); + scalar_t numer = 1; + scalar_t betas = 1; + scalar_t dbetas = 0; + scalar_t series = factor / alpha; for (int i = 1; i <= 8; ++i) { numer *= -x / i; dbetas = dbetas * (beta - i) + betas; betas = betas * (beta - i); series += numer / (alpha + i) * (dbetas + factor * betas); } - const real result = -TH_MATH_NAME(pow)(1 - x, 1 - beta) * series; + const scalar_t result = -TH_MATH_NAME(pow)(1 - x, 1 - beta) * series; return th_isnan(result) ? 0.0 : result; } // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha. // Assumes alpha and beta are both large and uses a Rice saddle point expansion. // To ensure numerical stability, this computation is performed at higher precision. -static inline real THTensor_(beta_grad_alpha_mid)(double x, double alpha, double beta) { +static inline scalar_t THTensor_(beta_grad_alpha_mid)(double x, double alpha, double beta) { const double total = alpha + beta; const double mean = alpha / total; const double std = sqrt(alpha * beta / (total + 1)) / total; @@ -2272,9 +2272,9 @@ static inline real THTensor_(beta_grad_alpha_mid)(double x, double alpha, double // for random number x drawn from a Beta distribution Beta(alpha,beta). // This function inputs total=alpha+beta to make it easy to implement // Dirichlet reparameterized gradients in terms of Betas. -static inline real THTensor_(dirichlet_grad_one)(real x, real alpha, real total) { - const real beta = total - alpha; - const real boundary = total * x * (1 - x); +static inline scalar_t THTensor_(dirichlet_grad_one)(scalar_t x, scalar_t alpha, scalar_t total) { + const scalar_t beta = total - alpha; + const scalar_t boundary = total * x * (1 - x); // Use an asymptotic approximation for x close to 0. if (x <= 0.5f && boundary < 2.5f) { @@ -2292,7 +2292,7 @@ static inline real THTensor_(dirichlet_grad_one)(real x, real alpha, real total) } // Use a rational correction to an analytic approximation. - static const real c[2][3][3][4] = { + static const scalar_t c[2][3][3][4] = { {{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863}, {0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033}, {-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}}, @@ -2312,21 +2312,21 @@ static inline real THTensor_(dirichlet_grad_one)(real x, real alpha, real total) {0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05}, {-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}}, }; - const real u = TH_MATH_NAME(log)(x); - const real a = TH_MATH_NAME(log)(alpha) - u; - const real b = TH_MATH_NAME(log)(total) - a; - const real pow_u[3] = {1, u, u * u}; - const real pow_a[3] = {1, a, a * a}; - real p = 0.0; - real q = 0.0; + const scalar_t u = TH_MATH_NAME(log)(x); + const scalar_t a = TH_MATH_NAME(log)(alpha) - u; + const scalar_t b = TH_MATH_NAME(log)(total) - a; + const scalar_t pow_u[3] = {1, u, u * u}; + const scalar_t pow_a[3] = {1, a, a * a}; + scalar_t p = 0.0; + scalar_t q = 0.0; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { - const real ua = pow_u[i] * pow_a[j]; + const scalar_t ua = pow_u[i] * pow_a[j]; p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3]))); q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3]))); } } - const real approx = x * (TH_MATH_NAME(TH_digamma)(total) - TH_MATH_NAME(TH_digamma)(alpha)) / beta; + const scalar_t approx = x * (TH_MATH_NAME(TH_digamma)(total) - TH_MATH_NAME(TH_digamma)(alpha)) / beta; return p / q * approx; } @@ -2340,10 +2340,10 @@ void THTensor_(dirichlet_grad)(THTensor *self, THTensor *x, THTensor *alpha, THT THTensor_(resizeAs)(self, x); THTensor* grad = THTensor_(newContiguous)(self); - real*const grad_data = grad->data(); - real*const x_data = x->data(); - real*const alpha_data = alpha->data(); - real*const total_data = total->data(); + scalar_t*const grad_data = grad->data(); + scalar_t*const x_data = x->data(); + scalar_t*const alpha_data = alpha->data(); + scalar_t*const total_data = total->data(); const int64_t numel = THTensor_(nElement)(x); int64_t i; #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) diff --git a/aten/src/TH/generic/THTensorRandom.cpp b/aten/src/TH/generic/THTensorRandom.cpp index 911b0a7491f474..fd2c9d0341df8b 100644 --- a/aten/src/TH/generic/THTensorRandom.cpp +++ b/aten/src/TH/generic/THTensorRandom.cpp @@ -16,19 +16,19 @@ void THTensor_(random)(THTensor *self, THGenerator *_generator) { std::lock_guard lock(_generator->mutex); #if defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY(real, self, *self_data = (uint8_t)(THRandom_random(_generator) % (UINT8_MAX + 1));); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (uint8_t)(THRandom_random(_generator) % (UINT8_MAX + 1));); #elif defined(TH_REAL_IS_CHAR) - TH_TENSOR_APPLY(real, self, *self_data = (int8_t)(THRandom_random(_generator) % (INT8_MAX + 1));); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (int8_t)(THRandom_random(_generator) % (INT8_MAX + 1));); #elif defined(TH_REAL_IS_SHORT) - TH_TENSOR_APPLY(real, self, *self_data = (int16_t)(THRandom_random(_generator) % (INT16_MAX + 1));); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (int16_t)(THRandom_random(_generator) % (INT16_MAX + 1));); #elif defined(TH_REAL_IS_INT) - TH_TENSOR_APPLY(real, self, *self_data = (int32_t)(THRandom_random(_generator) % (INT32_MAX + 1UL));); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (int32_t)(THRandom_random(_generator) % (INT32_MAX + 1UL));); #elif defined(TH_REAL_IS_LONG) - TH_TENSOR_APPLY(real, self, *self_data = (uint64_t)(THRandom_random64(_generator) % (LONG_MAX + 1ULL));); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (uint64_t)(THRandom_random64(_generator) % (LONG_MAX + 1ULL));); #elif defined(TH_REAL_IS_FLOAT) - TH_TENSOR_APPLY(real, self, *self_data = (float)(THRandom_random(_generator) % ((1ULL << FLT_MANT_DIG) + 1));); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (float)(THRandom_random(_generator) % ((1ULL << FLT_MANT_DIG) + 1));); #elif defined(TH_REAL_IS_DOUBLE) - TH_TENSOR_APPLY(real, self, *self_data = (double)(THRandom_random64(_generator) % ((1ULL << DBL_MANT_DIG) + 1));); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (double)(THRandom_random64(_generator) % ((1ULL << DBL_MANT_DIG) + 1));); #else #error "Unknown type" #endif @@ -41,11 +41,11 @@ void THTensor_(clampedRandom)(THTensor *self, THGenerator *_generator, int64_t m uint64_t range = max - min; #if defined(TH_REAL_IS_LONG) || defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) if (range >= 1ULL << 32) { - TH_TENSOR_APPLY(real, self, *self_data = static_cast(static_cast((THRandom_random64(_generator) % range) + min));) + TH_TENSOR_APPLY(scalar_t, self, *self_data = static_cast(static_cast((THRandom_random64(_generator) % range) + min));) return; } #endif - TH_TENSOR_APPLY(real, self, *self_data = static_cast(static_cast((THRandom_random(_generator) % range) + min));) + TH_TENSOR_APPLY(scalar_t, self, *self_data = static_cast(static_cast((THRandom_random(_generator) % range) + min));) } void THTensor_(cappedRandom)(THTensor *self, THGenerator *_generator, int64_t max) { @@ -56,7 +56,7 @@ void THTensor_(cappedRandom)(THTensor *self, THGenerator *_generator, int64_t ma void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p) { std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_geometric(_generator, p);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_geometric(_generator, p);); } #ifdef TH_BLAS_MKL @@ -107,7 +107,7 @@ void THTensor_(iBernoulli_generate_copy)(THTensor *self, THGenerator *_generator #ifndef TH_REAL_IS_INT if (contig) { - real* self_seg = self->data() + line_index_offset; + scalar_t* self_seg = self->data() + line_index_offset; int* tmp_seg = tmp + line_index_offset; THVector_(cvtFromInt)(self_seg, tmp_seg, line_seg_len); } @@ -121,9 +121,9 @@ void THTensor_(iBernoulli_generate_copy)(THTensor *self, THGenerator *_generator #endif } else { #ifdef _OPENMP - TH_TENSOR_APPLY2_OMP(n, 1, 0, int, intTensor, real, self, *self_data = *intTensor_data;, TH_OMP_OVERHEAD_THRESHOLD_COPY) + TH_TENSOR_APPLY2_OMP(n, 1, 0, int, intTensor, scalar_t, self, *self_data = *intTensor_data;, TH_OMP_OVERHEAD_THRESHOLD_COPY) #else - TH_TENSOR_APPLY2(int, intTensor, real, self, *self_data = *intTensor_data;) + TH_TENSOR_APPLY2(int, intTensor, scalar_t, self, *self_data = *intTensor_data;) #endif THIntTensor_free(intTensor); } @@ -140,24 +140,24 @@ void THTensor_(bernoulli)(THTensor *self, THGenerator *_generator, double p) THTensor_(iBernoulli_generate_copy)(self, _generator, p); } else { std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_bernoulli(_generator, p);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_bernoulli(_generator, p);); } #else std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_bernoulli(_generator, p);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_bernoulli(_generator, p);); #endif } void THTensor_(bernoulli_FloatTensor)(THTensor *self, THGenerator *_generator, THFloatTensor *p) { std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY2(real, self, float, p, *self_data = (real)THRandom_bernoulli(_generator, (double)*p_data);); + TH_TENSOR_APPLY2(scalar_t, self, float, p, *self_data = (scalar_t)THRandom_bernoulli(_generator, (double)*p_data);); } void THTensor_(bernoulli_DoubleTensor)(THTensor *self, THGenerator *_generator, THDoubleTensor *p) { std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY2(real, self, double, p, *self_data = (real)THRandom_bernoulli(_generator, (double)*p_data);); + TH_TENSOR_APPLY2(scalar_t, self, double, p, *self_data = (scalar_t)THRandom_bernoulli(_generator, (double)*p_data);); } #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) @@ -181,11 +181,11 @@ void THTensor_(uniform)(THTensor *self, THGenerator *_generator, double a, doubl { std::lock_guard lock(_generator->mutex); #if defined(TH_REAL_IS_FLOAT) - TH_TENSOR_APPLY(real, self, *self_data = - (real)THRandom_uniformFloat(_generator, (real)a, (real)b);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = + (scalar_t)THRandom_uniformFloat(_generator, (scalar_t)a, (scalar_t)b);); #else - TH_TENSOR_APPLY(real, self, *self_data = - (real)THRandom_uniform(_generator, a, b);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = + (scalar_t)THRandom_uniform(_generator, a, b);); #endif } @@ -196,7 +196,7 @@ void THTensor_(normal)(THTensor *self, THGenerator *_generator, double mean, dou if (size >= 16 && THTensor_(isContiguous)(self)) { THVector_(normal_fill)(THStorage_(data)(THTensor_getStoragePtr(self)) + self->storage_offset(), size, _generator, mean, stddev); } else { - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_normal(_generator, mean, stddev);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_normal(_generator, mean, stddev);); } } @@ -226,7 +226,7 @@ void THTensor_(normal_means_stddevs)(THTensor *self, THGenerator *gen, THTensor void THTensor_(exponential)(THTensor *self, THGenerator *_generator, double lambda) { std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_exponential(_generator, lambda);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_exponential(_generator, lambda);); } #undef TH_REAL_MIN @@ -234,13 +234,13 @@ void THTensor_(exponential)(THTensor *self, THGenerator *_generator, double lamb void THTensor_(cauchy)(THTensor *self, THGenerator *_generator, double median, double sigma) { std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_cauchy(_generator, median, sigma);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_cauchy(_generator, median, sigma);); } void THTensor_(logNormal)(THTensor *self, THGenerator *_generator, double mean, double stdv) { std::lock_guard lock(_generator->mutex); - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_logNormal(_generator, mean, stdv);); + TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_logNormal(_generator, mean, stdv);); } void THTensor_(multinomialAliasSetup)(THTensor *probs, THLongTensor *J, THTensor *q) @@ -253,13 +253,13 @@ void THTensor_(multinomialAliasSetup)(THTensor *probs, THLongTensor *J, THTensor int64_t large_c = 0; THLongTensor_resize1d(J, inputsize); THTensor_(resize1d)(q, inputsize); - real *q_data = q->data(); + scalar_t *q_data = q->data(); int64_t *J_data = THLongTensor_data(J); for (i = 0; i < inputsize; i++) { THLongTensor_fastSet1d(J, i, 0L); - real val = THTensor_(fastGet1d)(probs, i); + scalar_t val = THTensor_(fastGet1d)(probs, i); THTensor_(fastSet1d)(q, i, inputsize*val); if (inputsize * val < 1.0) @@ -298,9 +298,9 @@ void THTensor_(multinomialAliasSetup)(THTensor *probs, THLongTensor *J, THTensor } } - real q_min = THTensor_(fastGet1d)(q, inputsize-1); - real q_max = q_min; - real q_temp; + scalar_t q_min = THTensor_(fastGet1d)(q, inputsize-1); + scalar_t q_max = q_min; + scalar_t q_temp; for (i=0; i < inputsize; i++) { q_temp = THTensor_(fastGet1d)(q, i); @@ -336,7 +336,7 @@ void THTensor_(multinomialAliasDraw)(THLongTensor *self, THGenerator *_generator int64_t K = THLongTensor_nElement(J); int64_t output_nelem = THLongTensor_nElement(self); int64_t i = 0, _mask=0; - real _q; + scalar_t _q; int64_t rand_ind, sample_idx, J_sample; for (i=0; i < output_nelem; i++) @@ -533,7 +533,7 @@ void THTensor_(getRNGState)(THGenerator *_generator, THTensor *self) THTensor_(resize1d)(self, size); THArgCheck(THTensor_(nElement)(self) == size, 1, "RNG state is wrong size"); THArgCheck(THTensor_(isContiguous)(self), 1, "RNG state needs to be contiguous"); - rng_state = (THGeneratorState *)self->data(); + rng_state = (THGeneratorState *)self->data(); THGeneratorState_copy(rng_state, &_generator->gen_state); } @@ -544,7 +544,7 @@ void THTensor_(setRNGState)(THGenerator *_generator, THTensor *self) THGeneratorState *rng_state; THArgCheck(THTensor_(nElement)(self) == size, 1, "RNG state is wrong size"); THArgCheck(THTensor_(isContiguous)(self), 1, "RNG state needs to be contiguous"); - rng_state = (THGeneratorState *)self->data(); + rng_state = (THGeneratorState *)self->data(); THArgCheck(THGeneratorState_isValid(rng_state), 1, "Invalid RNG state"); THGeneratorState_copy(&_generator->gen_state, rng_state); } diff --git a/aten/src/TH/generic/THVector.h b/aten/src/TH/generic/THVector.h index 1931700a750152..df92994e3ffaf1 100644 --- a/aten/src/TH/generic/THVector.h +++ b/aten/src/TH/generic/THVector.h @@ -5,63 +5,63 @@ // Opaque C++ struct struct THGenerator; -TH_API void THVector_(fill)(real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(cadd)(real *z, const real *x, const real *y, const real c, const ptrdiff_t n); -TH_API void THVector_(adds)(real *y, const real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(cmul)(real *z, const real *x, const real *y, const ptrdiff_t n); -TH_API void THVector_(muls)(real *y, const real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(cdiv)(real *z, const real *x, const real *y, const ptrdiff_t n); -TH_API void THVector_(divs)(real *y, const real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(copy)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(neg)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(normal_fill)(real *data, +TH_API void THVector_(fill)(scalar_t *x, const scalar_t c, const ptrdiff_t n); +TH_API void THVector_(cadd)(scalar_t *z, const scalar_t *x, const scalar_t *y, const scalar_t c, const ptrdiff_t n); +TH_API void THVector_(adds)(scalar_t *y, const scalar_t *x, const scalar_t c, const ptrdiff_t n); +TH_API void THVector_(cmul)(scalar_t *z, const scalar_t *x, const scalar_t *y, const ptrdiff_t n); +TH_API void THVector_(muls)(scalar_t *y, const scalar_t *x, const scalar_t c, const ptrdiff_t n); +TH_API void THVector_(cdiv)(scalar_t *z, const scalar_t *x, const scalar_t *y, const ptrdiff_t n); +TH_API void THVector_(divs)(scalar_t *y, const scalar_t *x, const scalar_t c, const ptrdiff_t n); +TH_API void THVector_(copy)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(neg)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(normal_fill)(scalar_t *data, const int64_t size, struct THGenerator *generator, - const real mean, - const real stddev); + const scalar_t mean, + const scalar_t stddev); #ifndef TH_REAL_IS_INT -TH_API void THVector_(cvtFromInt)(real *y, const int *x, const ptrdiff_t n); +TH_API void THVector_(cvtFromInt)(scalar_t *y, const int *x, const ptrdiff_t n); #endif #if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) || defined(TH_REAL_IS_LONG) -TH_API void THVector_(abs)(real *y, const real *x, const ptrdiff_t n); +TH_API void THVector_(abs)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); #endif /* floating point only now */ #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) -TH_API void THVector_(log)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(lgamma)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(digamma)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(trigamma)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(log10)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(log1p)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(log2)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(sigmoid)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(exp)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(expm1)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(erf)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(erfc)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(erfinv)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(cos)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(acos)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(cosh)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(sin)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(asin)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(sinh)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(tan)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(atan)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(tanh)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(pow)(real *y, const real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(sqrt)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(rsqrt)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(ceil)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(floor)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(round)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(abs)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(trunc)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(frac)(real *y, const real *x, const ptrdiff_t n); -TH_API void THVector_(cinv)(real *y, const real *x, const ptrdiff_t n); +TH_API void THVector_(log)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(lgamma)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(digamma)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(trigamma)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(log10)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(log1p)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(log2)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(sigmoid)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(exp)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(expm1)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(erf)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(erfc)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(erfinv)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(cos)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(acos)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(cosh)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(sin)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(asin)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(sinh)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(tan)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(atan)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(tanh)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(pow)(scalar_t *y, const scalar_t *x, const scalar_t c, const ptrdiff_t n); +TH_API void THVector_(sqrt)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(rsqrt)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(ceil)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(floor)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(round)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(abs)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(trunc)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(frac)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); +TH_API void THVector_(cinv)(scalar_t *y, const scalar_t *x, const ptrdiff_t n); #endif /* floating point only part */ diff --git a/aten/src/TH/generic/THVectorDefault.cpp b/aten/src/TH/generic/THVectorDefault.cpp index a32701a1a40220..dba380c6b84bb9 100644 --- a/aten/src/TH/generic/THVectorDefault.cpp +++ b/aten/src/TH/generic/THVectorDefault.cpp @@ -4,7 +4,7 @@ #include "../THRandom.h" -void THVector_(copy_DEFAULT)(real *x, const real *y, const ptrdiff_t n) { +void THVector_(copy_DEFAULT)(scalar_t *x, const scalar_t *y, const ptrdiff_t n) { ptrdiff_t i = 0; for(; i (0, 1] for log. - const real u2 = data[j + 8]; + const scalar_t u1 = 1 - data[j]; // [0, 1) -> (0, 1] for log. + const scalar_t u2 = data[j + 8]; - const real radius = sqrt(-2 * log(u1)); - const real theta = 2.0f * M_PI * u2; + const scalar_t radius = sqrt(-2 * log(u1)); + const scalar_t theta = 2.0f * M_PI * u2; data[j] = radius * cos(theta) * stddev + mean; data[j + 8] = radius * sin(theta) * stddev + mean; } } -void THVector_(normal_fill_DEFAULT)(real *data, +void THVector_(normal_fill_DEFAULT)(scalar_t *data, int64_t size, THGenerator *generator, - const real mean, - const real stddev) + const scalar_t mean, + const scalar_t stddev) { THAssert(size >= 16 && "Size must be >= 16 for normal fill"); @@ -201,7 +201,7 @@ void THVector_(normal_fill_DEFAULT)(real *data, } #define VECTOR_IMPLEMENT_FUNCTION(NAME, CFUNC) \ - void THVector_(NAME)(real *y, const real *x, const ptrdiff_t n) \ + void THVector_(NAME)(scalar_t *y, const scalar_t *x, const ptrdiff_t n) \ { \ ptrdiff_t i = 0; \ for(; i struct THCNumerics { }; -template -static inline __host__ __device__ scalar_t powi(scalar_t a, scalar_t b) { - assert(THCNumerics::ge(b, 0)); - scalar_t result = 1; +template +static inline __host__ __device__ T powi(T a, T b) { + assert(THCNumerics::ge(b, 0)); + T result = 1; while (b) { if (b & 1) { result *= a; diff --git a/aten/src/THC/THCTensorIndex.cu b/aten/src/THC/THCTensorIndex.cu index 68bea1b16d6f26..0ea5951d4ea734 100644 --- a/aten/src/THC/THCTensorIndex.cu +++ b/aten/src/THC/THCTensorIndex.cu @@ -451,30 +451,30 @@ struct TensorPutAccumulateOp { }; -template class Op, typename TensorType> +template class Op, typename TensorType> void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) { // These are only valid if index is contiguous auto start = THCudaLongTensor_data(state, index); auto end = start + THCudaLongTensor_numel(state, index); - auto aInfo = getTensorInfo(state, a); + auto aInfo = getTensorInfo(state, a); aInfo.collapseDims(); auto numel = THCTensor_nElement(state, a); if (aInfo.isContiguous()) { - auto op = Op(aInfo, numel, start, end); - THC_pointwiseApply2(state, b, index, op); + auto op = Op(aInfo, numel, start, end); + THC_pointwiseApply2(state, b, index, op); } else { - auto op = Op(aInfo, numel, start, end); - THC_pointwiseApply2(state, b, index, op); + auto op = Op(aInfo, numel, start, end); + THC_pointwiseApply2(state, b, index, op); } } -template class Op, typename TensorType> +template class Op, typename TensorType> void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) { if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) { - dispatchTakePutImpl(state, a, b, index); + dispatchTakePutImpl(state, a, b, index); } else { - dispatchTakePutImpl(state, a, b, index); + dispatchTakePutImpl(state, a, b, index); } } diff --git a/aten/src/THC/THCTensorMathPointwise.cuh b/aten/src/THC/THCTensorMathPointwise.cuh index 462c9fd001d9c7..fb15a05b155c0f 100644 --- a/aten/src/THC/THCTensorMathPointwise.cuh +++ b/aten/src/THC/THCTensorMathPointwise.cuh @@ -667,11 +667,11 @@ struct TensorBitXorOp { * Cephes Math Library Release 2.8: June, 2000 * Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier */ -template +template struct TensorDigammaOp { __device__ __forceinline__ void - operator()(real* out, real* in) { - using compute_type = typename std::conditional::value, accreal, real>::type; + operator()(T* out, T* in) { + using compute_type = typename std::conditional::value, accreal, T>::type; static const double PI_f64 = 3.14159265358979323846; static const compute_type PSI_10 = 2.25175258906672110764; static const compute_type A[] = { @@ -686,7 +686,7 @@ struct TensorDigammaOp { auto x = scalar_cast(*in); if (x == 0) { - *out = scalar_cast(INFINITY); + *out = scalar_cast(INFINITY); return; } @@ -694,7 +694,7 @@ struct TensorDigammaOp { compute_type result = 0; if (x < 0) { if (x_is_integer) { - *out = scalar_cast(INFINITY); + *out = scalar_cast(INFINITY); return; } // Rounding errors in tan's input can really affect the output @@ -709,7 +709,7 @@ struct TensorDigammaOp { x += 1; } if (x == 10) { - *out = scalar_cast(result + PSI_10); + *out = scalar_cast(result + PSI_10); return; } @@ -724,18 +724,18 @@ struct TensorDigammaOp { y = z * polevl_result; } - *out = scalar_cast(log(x) - (0.5 / x) - y + result); + *out = scalar_cast(log(x) - (0.5 / x) - y + result); return; } }; -template +template struct TensorTrigammaOp { - using compute_type = typename std::conditional::value, accreal, real>::type; + using compute_type = typename std::conditional::value, accreal, T>::type; __device__ __forceinline__ void - operator()(real* out, real* in) { + operator()(T* out, T* in) { const compute_type PI = 3.14159265358979323846; - compute_type x = ScalarConvert::to(*in); + compute_type x = ScalarConvert::to(*in); compute_type sign = +1; compute_type result = 0; if (x < 0.5f) { @@ -750,7 +750,7 @@ struct TensorTrigammaOp { } const compute_type ixx = 1 / (x*x); result += (1 + 1 / (2*x) + ixx * (1.f/6 - ixx * (1.f/30 - ixx * (1.f/42)))) / x; - *out = ScalarConvert::to(sign * result); + *out = ScalarConvert::to(sign * result); } }; diff --git a/aten/src/THC/THCTensorRandom.cu b/aten/src/THC/THCTensorRandom.cu index f355c96d1ace54..c5ac9f1ab66a90 100644 --- a/aten/src/THC/THCTensorRandom.cu +++ b/aten/src/THC/THCTensorRandom.cu @@ -147,9 +147,9 @@ struct is_same { static const bool value = false; }; template struct is_same { static const bool value = true; }; -template +template __global__ void generate_bernoulli_tensor(curandStateMtgp32 *state, int size, - real *result, prob_type *probs) + T *result, prob_type *probs) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; @@ -157,11 +157,11 @@ __global__ void generate_bernoulli_tensor(curandStateMtgp32 *state, int size, if (is_same::value) { double x = curand_uniform_double(&state[blockIdx.x]); if (i < size) - result[i] = ScalarConvert::to(x <= probs[i]); + result[i] = ScalarConvert::to(x <= probs[i]); } else { float x = curand_uniform(&state[blockIdx.x]); if (i < size) - result[i] = ScalarConvert::to(x <= probs[i]); + result[i] = ScalarConvert::to(x <= probs[i]); } } } diff --git a/aten/src/THC/generic/THCStorage.cpp b/aten/src/THC/generic/THCStorage.cpp index feb2e94959abf2..8918a449e19585 100644 --- a/aten/src/THC/generic/THCStorage.cpp +++ b/aten/src/THC/generic/THCStorage.cpp @@ -4,9 +4,9 @@ #include -real* THCStorage_(data)(THCState *state, const THCStorage *self) +scalar_t* THCStorage_(data)(THCState *state, const THCStorage *self) { - return self->data(); + return self->data(); } ptrdiff_t THCStorage_(size)(THCState *state, const THCStorage *self) @@ -16,25 +16,25 @@ ptrdiff_t THCStorage_(size)(THCState *state, const THCStorage *self) int THCStorage_(elementSize)(THCState *state) { - return sizeof(real); + return sizeof(scalar_t); } -void THCStorage_(set)(THCState *state, THCStorage *self, ptrdiff_t index, real value) +void THCStorage_(set)(THCState *state, THCStorage *self, ptrdiff_t index, scalar_t value) { THArgCheck((index >= 0) && (index < self->numel()), 2, "index out of bounds"); cudaStream_t stream = THCState_getCurrentStream(state); - THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, self) + index, &value, sizeof(real), + THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, self) + index, &value, sizeof(scalar_t), cudaMemcpyHostToDevice, stream)); THCudaCheck(cudaStreamSynchronize(stream)); } -real THCStorage_(get)(THCState *state, const THCStorage *self, ptrdiff_t index) +scalar_t THCStorage_(get)(THCState *state, const THCStorage *self, ptrdiff_t index) { THArgCheck((index >= 0) && (index < self->numel()), 2, "index out of bounds"); - real value; + scalar_t value; cudaStream_t stream = THCState_getCurrentStream(state); - THCudaCheck(cudaMemcpyAsync(&value, THCStorage_(data)(state, self) + index, sizeof(real), + THCudaCheck(cudaMemcpyAsync(&value, THCStorage_(data)(state, self) + index, sizeof(scalar_t), cudaMemcpyDeviceToHost, stream)); THCudaCheck(cudaStreamSynchronize(stream)); return value; @@ -43,7 +43,7 @@ real THCStorage_(get)(THCState *state, const THCStorage *self, ptrdiff_t index) THCStorage* THCStorage_(new)(THCState *state) { THStorage* storage = c10::make_intrusive( - at::scalarTypeToDataType(at::CTypeToScalarType::to()), + at::scalarTypeToDataType(at::CTypeToScalarType::to()), 0, state->cudaDeviceAllocator, true).release(); @@ -53,7 +53,7 @@ THCStorage* THCStorage_(new)(THCState *state) THCStorage* THCStorage_(newWithSize)(THCState *state, ptrdiff_t size) { THStorage* storage = c10::make_intrusive( - at::scalarTypeToDataType(at::CTypeToScalarType::to()), + at::scalarTypeToDataType(at::CTypeToScalarType::to()), size, state->cudaDeviceAllocator, true).release(); @@ -64,21 +64,21 @@ THCStorage* THCStorage_(newWithAllocator)(THCState *state, ptrdiff_t size, at::Allocator* allocator) { THStorage* storage = c10::make_intrusive( - at::scalarTypeToDataType(at::CTypeToScalarType::to()), + at::scalarTypeToDataType(at::CTypeToScalarType::to()), size, allocator, true).release(); return storage; } -THCStorage* THCStorage_(newWithSize1)(THCState *state, real data0) +THCStorage* THCStorage_(newWithSize1)(THCState *state, scalar_t data0) { THCStorage *self = THCStorage_(newWithSize)(state, 1); THCStorage_(set)(state, self, 0, data0); return self; } -THCStorage* THCStorage_(newWithSize2)(THCState *state, real data0, real data1) +THCStorage* THCStorage_(newWithSize2)(THCState *state, scalar_t data0, scalar_t data1) { THCStorage *self = THCStorage_(newWithSize)(state, 2); THCStorage_(set)(state, self, 0, data0); @@ -86,7 +86,7 @@ THCStorage* THCStorage_(newWithSize2)(THCState *state, real data0, real data1) return self; } -THCStorage* THCStorage_(newWithSize3)(THCState *state, real data0, real data1, real data2) +THCStorage* THCStorage_(newWithSize3)(THCState *state, scalar_t data0, scalar_t data1, scalar_t data2) { THCStorage *self = THCStorage_(newWithSize)(state, 3); THCStorage_(set)(state, self, 0, data0); @@ -95,7 +95,7 @@ THCStorage* THCStorage_(newWithSize3)(THCState *state, real data0, real data1, r return self; } -THCStorage* THCStorage_(newWithSize4)(THCState *state, real data0, real data1, real data2, real data3) +THCStorage* THCStorage_(newWithSize4)(THCState *state, scalar_t data0, scalar_t data1, scalar_t data2, scalar_t data3) { THCStorage *self = THCStorage_(newWithSize)(state, 4); THCStorage_(set)(state, self, 0, data0); @@ -117,7 +117,7 @@ THCStorage* THCStorage_(newWithDataAndAllocator)( ptrdiff_t size, at::Allocator* allocator) { THStorage* storage = c10::make_intrusive( - at::scalarTypeToDataType(at::CTypeToScalarType::to()), + at::scalarTypeToDataType(at::CTypeToScalarType::to()), size, std::move(data), allocator, diff --git a/aten/src/THC/generic/THCStorage.cu b/aten/src/THC/generic/THCStorage.cu index 95f2bc7163d46f..88ed2e5541820e 100644 --- a/aten/src/THC/generic/THCStorage.cu +++ b/aten/src/THC/generic/THCStorage.cu @@ -2,10 +2,10 @@ #define THC_GENERIC_FILE "generic/THCStorage.cu" #else -void THCStorage_(fill)(THCState *state, THCStorage *self, real value) +void THCStorage_(fill)(THCState *state, THCStorage *self, scalar_t value) { THCThrustAllocator thrustAlloc(state); - thrust::device_ptr self_data(THCStorage_(data)(state, self)); + thrust::device_ptr self_data(THCStorage_(data)(state, self)); thrust::fill( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), diff --git a/aten/src/THC/generic/THCStorage.h b/aten/src/THC/generic/THCStorage.h index c7d808f46567c5..d66b842ba40fc3 100644 --- a/aten/src/THC/generic/THCStorage.h +++ b/aten/src/THC/generic/THCStorage.h @@ -15,20 +15,20 @@ #define THCudaIntStorage THCStorage #define THCudaLongStorage THCStorage -THC_API real* THCStorage_(data)(THCState *state, const THCStorage*); +THC_API scalar_t* THCStorage_(data)(THCState *state, const THCStorage*); THC_API ptrdiff_t THCStorage_(size)(THCState *state, const THCStorage*); THC_API int THCStorage_(elementSize)(THCState *state); /* slow access -- checks everything */ -THC_API void THCStorage_(set)(THCState *state, THCStorage*, ptrdiff_t, real); -THC_API real THCStorage_(get)(THCState *state, const THCStorage*, ptrdiff_t); +THC_API void THCStorage_(set)(THCState *state, THCStorage*, ptrdiff_t, scalar_t); +THC_API scalar_t THCStorage_(get)(THCState *state, const THCStorage*, ptrdiff_t); THC_API THCStorage* THCStorage_(new)(THCState *state); THC_API THCStorage* THCStorage_(newWithSize)(THCState *state, ptrdiff_t size); -THC_API THCStorage* THCStorage_(newWithSize1)(THCState *state, real); -THC_API THCStorage* THCStorage_(newWithSize2)(THCState *state, real, real); -THC_API THCStorage* THCStorage_(newWithSize3)(THCState *state, real, real, real); -THC_API THCStorage* THCStorage_(newWithSize4)(THCState *state, real, real, real, real); +THC_API THCStorage* THCStorage_(newWithSize1)(THCState *state, scalar_t); +THC_API THCStorage* THCStorage_(newWithSize2)(THCState *state, scalar_t, scalar_t); +THC_API THCStorage* THCStorage_(newWithSize3)(THCState *state, scalar_t, scalar_t, scalar_t); +THC_API THCStorage* THCStorage_(newWithSize4)(THCState *state, scalar_t, scalar_t, scalar_t, scalar_t); THC_API THCStorage* THCStorage_(newWithMapping)(THCState *state, const char *filename, ptrdiff_t size, int shared); #ifdef __cplusplus @@ -46,7 +46,7 @@ THC_API void THCStorage_(retain)(THCState *state, THCStorage *storage); THC_API void THCStorage_(free)(THCState *state, THCStorage *storage); THC_API void THCStorage_(resize)(THCState *state, THCStorage *storage, ptrdiff_t size); -THC_API void THCStorage_(fill)(THCState *state, THCStorage *storage, real value); +THC_API void THCStorage_(fill)(THCState *state, THCStorage *storage, scalar_t value); THC_API int THCStorage_(getDevice)(THCState* state, const THCStorage* storage); diff --git a/aten/src/THC/generic/THCStorageCopy.cpp b/aten/src/THC/generic/THCStorageCopy.cpp index 546777baaf98c7..397cb7cc9704be 100644 --- a/aten/src/THC/generic/THCStorageCopy.cpp +++ b/aten/src/THC/generic/THCStorageCopy.cpp @@ -8,7 +8,7 @@ void THCStorage_(copyCPU)(THCState *state, THCStorage *self, struct THStorage *s cudaStream_t stream = THCState_getCurrentStream(state); THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, self), THStorage_(data)(src), - self->numel() * sizeof(real), + self->numel() * sizeof(scalar_t), cudaMemcpyHostToDevice, stream)); THCudaCheck(cudaStreamSynchronize(stream)); @@ -40,7 +40,7 @@ void THStorage_(copyCuda)(THCState *state, THStorage *self, struct THCStorage *s cudaStream_t stream = THCState_getCurrentStream(state); THCudaCheck(cudaMemcpyAsync(THStorage_(data)(self), THCStorage_(data)(state, src), - self->numel() * sizeof(real), + self->numel() * sizeof(scalar_t), cudaMemcpyDeviceToHost, stream)); THCudaCheck(cudaStreamSynchronize(stream)); diff --git a/aten/src/THC/generic/THCStorageCopy.cu b/aten/src/THC/generic/THCStorageCopy.cu index 962167c73b82c8..74ff1e18c4b9fb 100644 --- a/aten/src/THC/generic/THCStorageCopy.cu +++ b/aten/src/THC/generic/THCStorageCopy.cu @@ -2,9 +2,9 @@ #define THC_GENERIC_FILE "generic/THCStorageCopy.cu" #else -void THCStorage_(rawCopy)(THCState *state, THCStorage *self, real *src) +void THCStorage_(rawCopy)(THCState *state, THCStorage *self, scalar_t *src) { - THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, self), src, self->numel() * sizeof(real), cudaMemcpyDeviceToDevice, THCState_getCurrentStream(state))); + THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, self), src, self->numel() * sizeof(scalar_t), cudaMemcpyDeviceToDevice, THCState_getCurrentStream(state))); } // conversions are delegated to THCTensor implementation diff --git a/aten/src/THC/generic/THCStorageCopy.h b/aten/src/THC/generic/THCStorageCopy.h index ddef067ddecb34..65cae002b964e3 100644 --- a/aten/src/THC/generic/THCStorageCopy.h +++ b/aten/src/THC/generic/THCStorageCopy.h @@ -4,7 +4,7 @@ /* Support for copy between different Storage types */ -THC_API void THCStorage_(rawCopy)(THCState *state, THCStorage *storage, real *src); +THC_API void THCStorage_(rawCopy)(THCState *state, THCStorage *storage, scalar_t *src); THC_API void THCStorage_(copy)(THCState *state, THCStorage *storage, THCStorage *src); THC_API void THCStorage_(copyByte)(THCState *state, THCStorage *storage, struct THByteStorage *src); THC_API void THCStorage_(copyChar)(THCState *state, THCStorage *storage, struct THCharStorage *src); diff --git a/aten/src/THC/generic/THCTensor.cpp b/aten/src/THC/generic/THCTensor.cpp index 206b7d4b467013..db2b44511c2329 100644 --- a/aten/src/THC/generic/THCTensor.cpp +++ b/aten/src/THC/generic/THCTensor.cpp @@ -50,7 +50,7 @@ int64_t THCTensor_(strideLegacyNoScalars)(THCState *state, const THCTensor *self return THTensor_strideLegacyNoScalars(self, dim); } -real *THCTensor_(data)(THCState *state, const THCTensor *self) +scalar_t *THCTensor_(data)(THCState *state, const THCTensor *self) { if(THTensor_getStoragePtr(self)) return (THCStorage_(data)(state, THTensor_getStoragePtr(self))+self->storage_offset()); @@ -524,56 +524,56 @@ void THCTensor_(resizeNd)(THCState *state, THCTensor *self, int nDimension, cons THCTensor_resizeNd(state, self, nDimension, size, stride); } -void THCTensor_(set1d)(THCState *state, THCTensor *tensor, int64_t x0, real value) +void THCTensor_(set1d)(THCState *state, THCTensor *tensor, int64_t x0, scalar_t value) { THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension"); THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range"); THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0), value); } -real THCTensor_(get1d)(THCState *state, const THCTensor *tensor, int64_t x0) +scalar_t THCTensor_(get1d)(THCState *state, const THCTensor *tensor, int64_t x0) { THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension"); THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range"); return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0)); } -void THCTensor_(set2d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, real value) +void THCTensor_(set2d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, scalar_t value) { THArgCheck(tensor->dim() == 2, 1, "tensor must have two dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range"); THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1), value); } -real THCTensor_(get2d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1) +scalar_t THCTensor_(get2d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1) { THArgCheck(tensor->dim() == 2, 1, "tensor must have two dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range"); return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)); } -void THCTensor_(set3d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, real value) +void THCTensor_(set3d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, scalar_t value) { THArgCheck(tensor->dim() == 3, 1, "tensor must have three dimensions"); THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range"); THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2), value); } -real THCTensor_(get3d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2) +scalar_t THCTensor_(get3d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2) { THArgCheck(tensor->dim() == 3, 1, "tensor must have three dimensions"); THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range"); return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)); } -void THCTensor_(set4d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, real value) +void THCTensor_(set4d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value) { THArgCheck(tensor->dim() == 4, 1, "tensor must have four dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range"); THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3), value); } -real THCTensor_(get4d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3) +scalar_t THCTensor_(get4d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3) { THArgCheck(tensor->dim() == 4, 1, "tensor must have four dimensions"); THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range"); diff --git a/aten/src/THC/generic/THCTensor.h b/aten/src/THC/generic/THCTensor.h index 4f646d6fa15196..4cf77bde927c92 100644 --- a/aten/src/THC/generic/THCTensor.h +++ b/aten/src/THC/generic/THCTensor.h @@ -28,7 +28,7 @@ THC_API int64_t THCTensor_(size)(THCState *state, const THCTensor *self, int dim THC_API int64_t THCTensor_(sizeLegacyNoScalars)(THCState *state, const THCTensor *self, int dim); THC_API int64_t THCTensor_(stride)(THCState *state, const THCTensor *self, int dim); THC_API int64_t THCTensor_(strideLegacyNoScalars)(THCState *state, const THCTensor *self, int dim); -THC_API real *THCTensor_(data)(THCState *state, const THCTensor *self); +THC_API scalar_t *THCTensor_(data)(THCState *state, const THCTensor *self); THC_API void THCTensor_(setFlag)(THCState *state, THCTensor *self, const char flag); THC_API void THCTensor_(clearFlag)(THCState *state, THCTensor *self, const char flag); @@ -113,15 +113,15 @@ THC_API void THCTensor_(free)(THCState *state, THCTensor *self); THC_API void THCTensor_(freeCopyTo)(THCState *state, THCTensor *self, THCTensor *dst); /* Slow access methods [check everything] */ -THC_API void THCTensor_(set1d)(THCState *state, THCTensor *tensor, int64_t x0, real value); -THC_API void THCTensor_(set2d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, real value); -THC_API void THCTensor_(set3d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, real value); -THC_API void THCTensor_(set4d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, real value); - -THC_API real THCTensor_(get1d)(THCState *state, const THCTensor *tensor, int64_t x0); -THC_API real THCTensor_(get2d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1); -THC_API real THCTensor_(get3d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2); -THC_API real THCTensor_(get4d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3); +THC_API void THCTensor_(set1d)(THCState *state, THCTensor *tensor, int64_t x0, scalar_t value); +THC_API void THCTensor_(set2d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, scalar_t value); +THC_API void THCTensor_(set3d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, scalar_t value); +THC_API void THCTensor_(set4d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value); + +THC_API scalar_t THCTensor_(get1d)(THCState *state, const THCTensor *tensor, int64_t x0); +THC_API scalar_t THCTensor_(get2d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1); +THC_API scalar_t THCTensor_(get3d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2); +THC_API scalar_t THCTensor_(get4d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3); /* CUDA-specific functions */ THC_API int THCTensor_(getDevice)(THCState *state, const THCTensor *self); diff --git a/aten/src/THC/generic/THCTensorCopy.cpp b/aten/src/THC/generic/THCTensorCopy.cpp index 1a1247e2b06431..96ab307182639c 100644 --- a/aten/src/THC/generic/THCTensorCopy.cpp +++ b/aten/src/THC/generic/THCTensorCopy.cpp @@ -14,8 +14,8 @@ void THCTensor_(copyCPU)(THCState *state, THCTensor *self, struct THTensor *src) cudaStream_t stream = THCState_getCurrentStream(state); THCudaCheck(cudaMemcpyAsync(THCTensor_(data)(state,selfc), - src->data(), - THTensor_(nElement)(src) * sizeof(real), + src->data(), + THTensor_(nElement)(src) * sizeof(scalar_t), cudaMemcpyHostToDevice, stream)); THCudaCheck(cudaStreamSynchronize(stream)); @@ -61,9 +61,9 @@ void THTensor_(copyCuda)(THCState *state, THTensor *self, struct THCTensor *src) src = THCTensor_(newContiguous)(state, src); cudaStream_t stream = THCState_getCurrentStream(state); - THCudaCheck(cudaMemcpyAsync(selfc->data(), + THCudaCheck(cudaMemcpyAsync(selfc->data(), THCTensor_(data)(state, src), - THCTensor_(nElement)(state, src) * sizeof(real), + THCTensor_(nElement)(state, src) * sizeof(scalar_t), cudaMemcpyDeviceToHost, stream)); THCudaCheck(cudaStreamSynchronize(stream)); @@ -122,8 +122,8 @@ void THCTensor_(copyAsyncCPU)(THCState *state, THCTensor *self, struct THTensor THCStream *stream = THCState_getStream(state); THCudaCheck(cudaMemcpyAsync(THCTensor_(data)(state, self), - src->data(), - THTensor_(nElement)(src) * sizeof(real), + src->data(), + THTensor_(nElement)(src) * sizeof(scalar_t), cudaMemcpyHostToDevice, THCStream_stream(stream))); @@ -152,9 +152,9 @@ void THTensor_(copyAsyncCuda)(THCState *state, THTensor *self, struct THCTensor } THCStream *stream = THCState_getStream(state); - THCudaCheck(cudaMemcpyAsync(self->data(), + THCudaCheck(cudaMemcpyAsync(self->data(), THCTensor_(data)(state, src), - THCTensor_(nElement)(state, src) * sizeof(real), + THCTensor_(nElement)(state, src) * sizeof(scalar_t), cudaMemcpyDeviceToHost, THCStream_stream(stream))); diff --git a/aten/src/THC/generic/THCTensorCopy.cu b/aten/src/THC/generic/THCTensorCopy.cu index bf39c38f32a85f..0320fdfe8035a5 100644 --- a/aten/src/THC/generic/THCTensorCopy.cu +++ b/aten/src/THC/generic/THCTensorCopy.cu @@ -5,23 +5,23 @@ THC_API void THCTensor_(copy)(THCState* state, THCTensor* dst, THCTensor* src) { if (dst == src) return; - THC_copyTensor(state, dst, src); + THC_copyTensor(state, dst, src); } template <> -THCTensor *THCTensor_newClone(THCState *state, THCTensor *self) { +THCTensor *THCTensor_newClone(THCState *state, THCTensor *self) { THCTensor* tensor = THCTensor_new( state, at::dataTypeToScalarType(THTensor_getStoragePtr(self)->dtype())); THCTensor_resizeAs(state, tensor, self); - THC_copyTensor(state, tensor, self); + THC_copyTensor(state, tensor, self); return tensor; } template <> -THCTensor *THCTensor_newContiguous(THCState *state, THCTensor *self) +THCTensor *THCTensor_newContiguous(THCState *state, THCTensor *self) { if(!self->is_contiguous()) { - return THCTensor_newClone(state, self); + return THCTensor_newClone(state, self); } else { THCTensor_retain(state, self); return self; @@ -30,30 +30,30 @@ THCTensor *THCTensor_newContiguous(THCState *state, THCTensor *self) template <> -void THCTensor_freeCopyTo(THCState *state, THCTensor *self, THCTensor *dst) { +void THCTensor_freeCopyTo(THCState *state, THCTensor *self, THCTensor *dst) { if(self != dst) - THC_copyTensor(state, dst, self); + THC_copyTensor(state, dst, self); THCTensor_free(state, self); } template <> -void THCTensor_copyIgnoringOverlaps(THCState* state, THCTensor* dst, THCTensor* src) { +void THCTensor_copyIgnoringOverlaps(THCState* state, THCTensor* dst, THCTensor* src) { // Called when we are copying into an overlapping index `dst`, but // we don't care which writer wins. Hacky but it works. // This is itself invoked by pointwiseApply2 / THCTensor_copy in // case that there are write overlaps. // FIXME: really, overlapping writes should be illegal/an error in Torch - THC_pointwiseApply2( + THC_pointwiseApply2( state, dst, src, - CopyOp(), + CopyOp(), ReadOnly, /* ignore overwrites */ ReadOnly); } THC_API void THCTensor_(copyIgnoringOverlaps)(THCState* state, THCTensor* dst, THCTensor* src) { - THCTensor_copyIgnoringOverlaps(state, dst, src); + THCTensor_copyIgnoringOverlaps(state, dst, src); } #define IMPLEMENT_THC_CUDA_TENSOR_COPY(TYPEC, TYPECUDA, SCALARC) \ @@ -61,7 +61,7 @@ THCTensor_(copyIgnoringOverlaps)(THCState* state, THCTensor* dst, THCTensor* src THCTensor_(copyCuda##TYPEC)(THCState *state, \ THCTensor *self, \ THCuda##TYPECUDA##Tensor *src) { \ - THC_copyTensor(state, self, src); \ + THC_copyTensor(state, self, src); \ } IMPLEMENT_THC_CUDA_TENSOR_COPY(Byte, Byte, uint8_t) diff --git a/aten/src/THC/generic/THCTensorIndex.cu b/aten/src/THC/generic/THCTensorIndex.cu index fe98c3e6e06a4f..0b985487bbb14f 100644 --- a/aten/src/THC/generic/THCTensorIndex.cu +++ b/aten/src/THC/generic/THCTensorIndex.cu @@ -76,7 +76,7 @@ static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. -bool THCTensor_(indexShouldBeMajor)(TensorInfo &info, +bool THCTensor_(indexShouldBeMajor)(TensorInfo &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 @@ -148,13 +148,13 @@ void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongT if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo srcInfo = + getTensorInfo(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); @@ -166,43 +166,43 @@ void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongT // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { - SMALL_INDEX(real, unsigned int, 1, 1, -2); + SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { - SMALL_INDEX(real, unsigned int, 2, 2, -2); + SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { - SMALL_INDEX(real, unsigned int, 3, 3, -2); + SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { - SMALL_INDEX(real, unsigned int, -1, -1, -1); + SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { - LARGE_INDEX(real, unsigned int, 1, 1, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 2, 2, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { - LARGE_INDEX(real, unsigned int, 2, 2, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 3, 3, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { - LARGE_INDEX(real, unsigned int, 3, 3, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { - LARGE_INDEX(real, unsigned int, -1, -1, -1, true); + LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo srcInfo = + getTensorInfo(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); @@ -210,7 +210,7 @@ void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongT getTensorInfo(state, indices); indicesInfo.collapseDims(); - LARGE_INDEX(real, uint64_t, -1, -1, -1, true); + LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX @@ -229,14 +229,14 @@ void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLon "tried to take from an empty tensor"); THCTensor_(resizeNd)(state, dst, index->dim(), THTensor_getSizePtr(index), NULL); - dispatchTakePut(state, src, dst, index); + dispatchTakePut(state, src, dst, index); } static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) { THCThrustAllocator thrustAlloc(state); auto index_iter = thrust::device_ptr(THCudaLongTensor_data(state, index)); - auto src_iter = thrust::device_ptr(THCTensor_(data)(state, src)); + auto src_iter = thrust::device_ptr(THCTensor_(data)(state, src)); auto numel = THCTensor_(numel)(state, src); thrust::sort_by_key( @@ -272,12 +272,12 @@ void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, T THCTensor* sorted_src = THCTensor_(newClone)(state, src); THCTensor_(sort_indices)(state, sorted_index, sorted_src); - dispatchTakePut(state, dst, sorted_src, sorted_index); + dispatchTakePut(state, dst, sorted_src, sorted_index); THCTensor_(free)(state, sorted_src); THCudaLongTensor_free(state, sorted_index); } else { - dispatchTakePut(state, dst, src, index); + dispatchTakePut(state, dst, src, index); } } @@ -336,13 +336,13 @@ void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTe if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo srcInfo = + getTensorInfo(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); @@ -354,43 +354,43 @@ void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTe // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { - SMALL_INDEX(real, unsigned int, 1, 1, -2); + SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { - SMALL_INDEX(real, unsigned int, 2, 2, -2); + SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { - SMALL_INDEX(real, unsigned int, 3, 3, -2); + SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { - SMALL_INDEX(real, unsigned int, -1, -1, -1); + SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { - LARGE_INDEX(real, unsigned int, 1, 1, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 2, 2, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { - LARGE_INDEX(real, unsigned int, 2, 2, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 3, 3, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { - LARGE_INDEX(real, unsigned int, 3, 3, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { - LARGE_INDEX(real, unsigned int, -1, -1, -1, true); + LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo srcInfo = + getTensorInfo(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); @@ -398,14 +398,14 @@ void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTe getTensorInfo(state, indices); indicesInfo.collapseDims(); - LARGE_INDEX(real, uint64_t, -1, -1, -1, true); + LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } -void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val) +void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, scalar_t val) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); @@ -455,8 +455,8 @@ void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongT if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, indices)) { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); @@ -468,38 +468,38 @@ void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongT // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { - SMALL_INDEX(real, unsigned int, 1, -2); + SMALL_INDEX(scalar_t, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { - SMALL_INDEX(real, unsigned int, 2, -2); + SMALL_INDEX(scalar_t, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { - SMALL_INDEX(real, unsigned int, 3, -2); + SMALL_INDEX(scalar_t, unsigned int, 3, -2); } else { - SMALL_INDEX(real, unsigned int, -1, -1); + SMALL_INDEX(scalar_t, unsigned int, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim); if (dstInfo.dims == 1 && indContig) { - LARGE_INDEX(real, unsigned int, 1, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 1, -2, true); } else if (dstInfo.dims == 2 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 2, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 2, -2, true); } else { - LARGE_INDEX(real, unsigned int, 2, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 2, -2, false); } } else if (dstInfo.dims == 3 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 3, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 3, -2, true); } else { - LARGE_INDEX(real, unsigned int, 3, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 3, -2, false); } } else { - LARGE_INDEX(real, unsigned int, -1, -1, true); + LARGE_INDEX(scalar_t, unsigned int, -1, -1, true); } } } else { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); @@ -507,7 +507,7 @@ void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongT getTensorInfo(state, indices); indicesInfo.collapseDims(); - LARGE_INDEX(real, uint64_t, -1, -1, true); + LARGE_INDEX(scalar_t, uint64_t, -1, -1, true); } #undef SMALL_INDEX @@ -581,13 +581,13 @@ void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, in if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo srcInfo = + getTensorInfo(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); @@ -599,43 +599,43 @@ void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, in // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { - SMALL_INDEX(real, unsigned int, 1, 1, -2); + SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { - SMALL_INDEX(real, unsigned int, 2, 2, -2); + SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { - SMALL_INDEX(real, unsigned int, 3, 3, -2); + SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { - SMALL_INDEX(real, unsigned int, -1, -1, -1); + SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { - LARGE_INDEX(real, unsigned int, 1, 1, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 2, 2, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { - LARGE_INDEX(real, unsigned int, 2, 2, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { - LARGE_INDEX(real, unsigned int, 3, 3, -2, true); + LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { - LARGE_INDEX(real, unsigned int, 3, 3, -2, false); + LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { - LARGE_INDEX(real, unsigned int, -1, -1, -1, true); + LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { - TensorInfo dstInfo = - getTensorInfo(state, dst); + TensorInfo dstInfo = + getTensorInfo(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo srcInfo = + getTensorInfo(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); @@ -643,7 +643,7 @@ void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, in getTensorInfo(state, indices); indicesInfo.collapseDims(); - LARGE_INDEX(real, uint64_t, -1, -1, -1, true); + LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX diff --git a/aten/src/THC/generic/THCTensorIndex.h b/aten/src/THC/generic/THCTensorIndex.h index 03ff54c18033a1..589cedb2cbc3cf 100644 --- a/aten/src/THC/generic/THCTensorIndex.h +++ b/aten/src/THC/generic/THCTensorIndex.h @@ -4,7 +4,7 @@ THC_API void THCTensor_(indexCopy)(THCState *state, THCTensor *res_, int dim, THCudaLongTensor *indices, THCTensor *src); THC_API void THCTensor_(indexAdd)(THCState *state, THCTensor *res_, int dim, THCudaLongTensor *indices, THCTensor *src); -THC_API void THCTensor_(indexFill)(THCState *state, THCTensor *tensor, int dim, THCudaLongTensor *index, real val); +THC_API void THCTensor_(indexFill)(THCState *state, THCTensor *tensor, int dim, THCudaLongTensor *index, scalar_t val); THC_API void THCTensor_(indexSelect)(THCState *state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index); THC_API void THCTensor_(take)(THCState *state, THCTensor *res_, THCTensor *src, THCudaLongTensor *index); THC_API void THCTensor_(put)(THCState *state, THCTensor *res_, THCudaLongTensor *indices, THCTensor *src, int accumulate); diff --git a/aten/src/THC/generic/THCTensorMasked.cu b/aten/src/THC/generic/THCTensorMasked.cu index d941134edd429b..f7e3e3f32a9a18 100644 --- a/aten/src/THC/generic/THCTensorMasked.cu +++ b/aten/src/THC/generic/THCTensorMasked.cu @@ -5,15 +5,15 @@ THC_API void THCTensor_(maskedFill)(THCState* state, - THCTensor *tensor, THCudaByteTensor *mask, real value) + THCTensor *tensor, THCudaByteTensor *mask, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask)); THArgCheck(THCTensor_(nElement)(state, tensor) == THCudaByteTensor_nElement(state, mask), 2, "sizes do not match"); - if (!THC_pointwiseApply2(state, tensor, mask, - TensorMaskedFillOp(value))) { + if (!THC_pointwiseApply2(state, tensor, mask, + TensorMaskedFillOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } @@ -22,7 +22,7 @@ THCTensor_(maskedFill)(THCState* state, THC_API void THCTensor_(maskedFillByte)(THCState* state, - THCTensor *tensor, THByteTensor *mask, real value) + THCTensor *tensor, THByteTensor *mask, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {}); @@ -85,9 +85,9 @@ THCTensor_(maskedCopy)(THCState* state, // update `tensor` where `mask` == 1 but pull from `src` at // maskPrefixSum - bool status = THC_pointwiseApply3( + bool status = THC_pointwiseApply3( state, tensor, mask, maskPrefixSum, - TensorMaskedCopyOp( + TensorMaskedCopyOp( THCTensor_(data)(state, contigSrc))); THCTensor_(free)(state, contigSrc); @@ -152,9 +152,9 @@ THCTensor_(maskedSelect)(THCState* state, maskPrefixSumData); // Then copy over the masked elements at their desired output index - bool status = THC_pointwiseApply3( + bool status = THC_pointwiseApply3( state, mask, maskPrefixSum, - src, TensorMaskedSelectOp( + src, TensorMaskedSelectOp( THCTensor_(data)(state, tensor))); THCudaLongTensor_free(state, maskLong); diff --git a/aten/src/THC/generic/THCTensorMasked.h b/aten/src/THC/generic/THCTensorMasked.h index 98f5aee1f28546..401737772b561b 100644 --- a/aten/src/THC/generic/THCTensorMasked.h +++ b/aten/src/THC/generic/THCTensorMasked.h @@ -5,13 +5,13 @@ THC_API void THCTensor_(maskedFill)(THCState *state, THCTensor *tensor, THCudaByteTensor *mask, - real value); + scalar_t value); // FIXME: remove now that we have THCudaByteTensor? THC_API void THCTensor_(maskedFillByte)(THCState *state, THCTensor *tensor, THByteTensor *mask, - real value); + scalar_t value); THC_API void THCTensor_(maskedCopy)(THCState *state, THCTensor *tensor, diff --git a/aten/src/THC/generic/THCTensorMath.cu b/aten/src/THC/generic/THCTensorMath.cu index 54fb093422be7a..9ffe626dd8425f 100644 --- a/aten/src/THC/generic/THCTensorMath.cu +++ b/aten/src/THC/generic/THCTensorMath.cu @@ -3,12 +3,12 @@ #else THC_API void -THCTensor_(fill)(THCState* state, THCTensor *self_, real value) +THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); - if (!THC_pointwiseApply1( - state, self_, TensorFillOp(value))) { + if (!THC_pointwiseApply1( + state, self_, TensorFillOp(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } @@ -22,12 +22,12 @@ THCTensor_(zero)(THCState *state, THCTensor *self_) if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_), 0, - sizeof(real) * THCTensor_(nElement)(state, self_), + sizeof(scalar_t) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { - if (!THC_pointwiseApply1( + if (!THC_pointwiseApply1( state, self_, - TensorFillOp(ScalarConvert::to(0)))) { + TensorFillOp(ScalarConvert::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } @@ -48,7 +48,7 @@ THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); - THCTensor_(fill)(state, r_, ScalarConvert::to(1)); + THCTensor_(fill)(state, r_, ScalarConvert::to(1)); } ptrdiff_t @@ -164,11 +164,11 @@ void THCTensor_(catArray)(THCState *state, THCTensor *result, // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. - real *data = THCTensor_(data)(state, result); + scalar_t *data = THCTensor_(data)(state, result); // Kernel Parameter - size_t tensorMetadataSize = sizeof(CatArrInputTensor) * CAT_ARRAY_BATCH_SIZE; - auto d_inputs = static_cast *>(THCudaMalloc(state, tensorMetadataSize)); + size_t tensorMetadataSize = sizeof(CatArrInputTensor) * CAT_ARRAY_BATCH_SIZE; + auto d_inputs = static_cast *>(THCudaMalloc(state, tensorMetadataSize)); OutputTensorSizeStride param; @@ -182,7 +182,7 @@ void THCTensor_(catArray)(THCState *state, THCTensor *result, // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ - CatArrayBatchedCopy<<>>(data, d_inputs, param, dimension, param.outputStride[dimension]); + CatArrayBatchedCopy<<>>(data, d_inputs, param, dimension, param.outputStride[dimension]); // Now we loop offset = 0; @@ -190,7 +190,7 @@ void THCTensor_(catArray)(THCState *state, THCTensor *result, // Re-allocate stackInputs every iteration to avoid read-after-write hazard { auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize); - CatArrInputTensor* stackInputs = static_cast*>(stackInputs_owner.get()); + CatArrInputTensor* stackInputs = static_cast*>(stackInputs_owner.get()); cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension); @@ -207,7 +207,7 @@ void THCTensor_(catArray)(THCState *state, THCTensor *result, THCudaCheck(cudaMemcpyAsync( d_inputs, stackInputs, - j * sizeof(CatArrInputTensor), + j * sizeof(CatArrInputTensor), cudaMemcpyHostToDevice, THCStream_stream(stream))); THCudaHostRecord(state, stackInputs); @@ -268,7 +268,7 @@ void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); - thrust::device_ptr self_data(THCTensor_(data)(state, self)); + thrust::device_ptr self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(nDimensionLegacyNoScalars)(state, self); int64_t N = THCTensor_(nElement)(state, self); @@ -296,7 +296,7 @@ void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, idxlast, self_data, strided_tensor.begin(), - NonZeroOp() + NonZeroOp() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); @@ -341,7 +341,7 @@ void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_ const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); - THCTensor_copyFromDiagonal<<>> + THCTensor_copyFromDiagonal<<>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } } else { @@ -356,7 +356,7 @@ void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_ const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); - THCTensor_copyToDiagonal<<>> + THCTensor_copyToDiagonal<<>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } } @@ -381,7 +381,7 @@ void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m) THCTensor *diag = THCTensor_(newWithStorage1d)(state, THTensor_getStoragePtr(self_), self_->storage_offset(), sz, stride); - THCTensor_(fill)(state, diag, ScalarConvert::to(1)); + THCTensor_(fill)(state, diag, ScalarConvert::to(1)); THCTensor_(free)(state, diag); } @@ -397,7 +397,7 @@ accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) -void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { +void THCTensor_(linspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); @@ -409,10 +409,10 @@ void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_ THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ // if r_ is contiguous we can direct work on it : THCTensor_(newContiguous)(state, r_); - real step = THCNumerics::div(THCNumerics::sub(b, a), - ScalarConvert::to(n - 1)); - LinspaceOp linspace_method(a, step); - thrust::device_ptr data_(THCTensor_(data)(state, r)); + scalar_t step = THCNumerics::div(THCNumerics::sub(b, a), + ScalarConvert::to(n - 1)); + LinspaceOp linspace_method(a, step); + thrust::device_ptr data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_ THCTensor_(freeCopyTo)(state, r, r_); @@ -421,22 +421,22 @@ void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_ THCudaCheck(cudaGetLastError()); } -void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { +void THCTensor_(logspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); // NumPy allows you to pass different points even if n <= 1 -- should we? THArgCheck(n > 1 || ((n == 0 || n == 1) && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 0) { // skip - } else if (n == 1) THCTensor_(fill)(state, r_, THCNumerics::exp10(a)); + } else if (n == 1) THCTensor_(fill)(state, r_, THCNumerics::exp10(a)); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); - real step = THCNumerics::div(THCNumerics::sub(b, a), - ScalarConvert::to(n - 1)); - LogspaceOp logspace_method(a, step); - thrust::device_ptr data_(THCTensor_(data)(state, r)); + scalar_t step = THCNumerics::div(THCNumerics::sub(b, a), + ScalarConvert::to(n - 1)); + LogspaceOp logspace_method(a, step); + thrust::device_ptr data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, logspace_method); if (!THCTensor_(isContiguous)(state, r_)) { THCTensor_(freeCopyTo)(state, r, r_); @@ -455,8 +455,8 @@ void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xma ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); - LinspaceOp linspace_method(xmin, step); - thrust::device_ptr data_(THCTensor_(data)(state, r)); + LinspaceOp linspace_method(xmin, step); + thrust::device_ptr data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); @@ -470,8 +470,8 @@ void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xm ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert::to(xmax - xmin) / step); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); - LinspaceOp linspace_method(xmin, step); - thrust::device_ptr data_(THCTensor_(data)(state, r)); + LinspaceOp linspace_method(xmin, step); + thrust::device_ptr data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); diff --git a/aten/src/THC/generic/THCTensorMath.h b/aten/src/THC/generic/THCTensorMath.h index 1cd75340f67895..374147b6feee03 100644 --- a/aten/src/THC/generic/THCTensorMath.h +++ b/aten/src/THC/generic/THCTensorMath.h @@ -2,7 +2,7 @@ #define THC_GENERIC_FILE "generic/THCTensorMath.h" #else -THC_API void THCTensor_(fill)(THCState *state, THCTensor *self, real value); +THC_API void THCTensor_(fill)(THCState *state, THCTensor *self, scalar_t value); THC_API void THCTensor_(zero)(THCState *state, THCTensor *self); THC_API void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor* input); @@ -21,8 +21,8 @@ THC_API accreal THCTensor_(trace)(THCState *state, THCTensor *self); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) -THC_API void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n); -THC_API void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n); +THC_API void THCTensor_(linspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n); +THC_API void THCTensor_(logspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n); #endif diff --git a/aten/src/THC/generic/THCTensorMathBlas.cu b/aten/src/THC/generic/THCTensorMathBlas.cu index 39c29d754f5d20..a37645de394de8 100644 --- a/aten/src/THC/generic/THCTensorMathBlas.cu +++ b/aten/src/THC/generic/THCTensorMathBlas.cu @@ -45,7 +45,7 @@ THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) } THC_API void -THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) +THCTensor_(addmv)(THCState *state, THCTensor *r_, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); @@ -125,9 +125,9 @@ THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real // In cublasSgemv, cublasDgemv (x,0).mv(0) does not // handle beta, whereas cublasSgemm, cublasDgemm do for case where (x,0).mm(0,y). if (THTensor_sizeLegacyNoScalars(vec, 0) == 0 && mat->size(0) != 0) { - if(THCNumerics::eq(beta, ScalarConvert::to(0))) { + if(THCNumerics::eq(beta, ScalarConvert::to(0))) { THCTensor_(zero)(state, r_); - } else if(THCNumerics::ne(beta, ScalarConvert::to(1))) { + } else if(THCNumerics::ne(beta, ScalarConvert::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } } @@ -153,7 +153,7 @@ THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real } THC_API void -THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) +THCTensor_(addr)(THCState *state, THCTensor *r_, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); @@ -180,9 +180,9 @@ THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real a THCTensor_(copy)(state, r_, t); } - if(THCNumerics::eq(beta, ScalarConvert::to(0))) { + if(THCNumerics::eq(beta, ScalarConvert::to(0))) { THCTensor_(zero)(state, r_); - } else if(THCNumerics::ne(beta, ScalarConvert::to(1))) { + } else if(THCNumerics::ne(beta, ScalarConvert::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } @@ -251,7 +251,7 @@ THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real a } THC_API void -THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) +THCTensor_(addmm)(THCState *state, THCTensor *r_, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) @@ -281,7 +281,7 @@ THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real if(t != r_) { THCTensor_(resizeAs)(state, r_, t); - if (ScalarConvert::to(beta) != 0.0) { + if (ScalarConvert::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } @@ -415,8 +415,8 @@ THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real } THC_API void -THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, - real alpha, THCTensor *batch1, THCTensor *batch2) { +THCTensor_(addbmm)(THCState *state, THCTensor *result, scalar_t beta, THCTensor *t, + scalar_t alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 2, 4, "expected 2D tensor"); @@ -440,7 +440,7 @@ THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, if (t != result) { THCTensor_(resizeAs)(state, result, t); - if (ScalarConvert::to(beta) != 0.0) { + if (ScalarConvert::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } @@ -452,7 +452,7 @@ THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); - beta = ScalarConvert::to(1); + beta = ScalarConvert::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); @@ -461,7 +461,7 @@ THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, #endif } -__global__ void createBatchGemmBuffer(const real** buffer, real* data, +__global__ void createBatchGemmBuffer(const scalar_t** buffer, scalar_t* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { @@ -469,8 +469,8 @@ __global__ void createBatchGemmBuffer(const real** buffer, real* data, } } -__global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffer2, const real ** buffer3, real* data1, - real * data2, real * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { +__global__ void createBatchGemmBuffer3(const scalar_t** buffer1, const scalar_t ** buffer2, const scalar_t ** buffer3, scalar_t* data1, + scalar_t * data2, scalar_t * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; @@ -480,8 +480,8 @@ __global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffe } THC_API void -THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, - real alpha, THCTensor *batch1, THCTensor *batch2) { +THCTensor_(baddbmm)(THCState *state, THCTensor *result, scalar_t beta, THCTensor *t, + scalar_t alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor"); @@ -500,7 +500,7 @@ THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, if (t != result) { THCTensor_(resizeAs)(state, result, t); - if (ScalarConvert::to(beta) != 0.0) { + if (ScalarConvert::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } @@ -594,18 +594,18 @@ THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if CUDA_VERSION < 8000 && !defined __HIP_PLATFORM_HCC__ - size_t matrices_size = num_batches * sizeof(real*); + size_t matrices_size = num_batches * sizeof(scalar_t*); // Copy pointers to device. - auto d_matrices1 = static_cast(THCudaMalloc(state, matrices_size)); - auto d_matrices2 = static_cast(THCudaMalloc(state, matrices_size)); - auto d_result_matrices = static_cast(THCudaMalloc(state, matrices_size)); + auto d_matrices1 = static_cast(THCudaMalloc(state, matrices_size)); + auto d_matrices2 = static_cast(THCudaMalloc(state, matrices_size)); + auto d_result_matrices = static_cast(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer3<<>>( - d_matrices1, d_matrices2, (const real**)d_result_matrices, THCTensor_(data)(state, batch1_), + d_matrices1, d_matrices2, (const scalar_t**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches); @@ -806,14 +806,14 @@ THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTens int *info_gpu = THCudaIntTensor_data(state, rinfo_); // Copy pointers to device. - size_t matrices_size = num_batches * sizeof(real*); - auto d_result = static_cast(THCudaMalloc(state, matrices_size)); + size_t matrices_size = num_batches * sizeof(scalar_t*); + auto d_result = static_cast(THCudaMalloc(state, matrices_size)); if (num_batches > 0) { const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<>>( - (const real**)d_result, THCTensor_(data)(state, ra__), + (const scalar_t**)d_result, THCTensor_(data)(state, ra__), ra__->stride(0), num_batches); } @@ -921,16 +921,16 @@ THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b } int64_t num_batches = rb_->size(0); - size_t matrices_size = num_batches * sizeof(real*); + size_t matrices_size = num_batches * sizeof(scalar_t*); // Copy pointers to device. - auto d_result = static_cast(THCudaMalloc(state, matrices_size)); - auto d_atf = static_cast(THCudaMalloc(state, matrices_size)); + auto d_result = static_cast(THCudaMalloc(state, matrices_size)); + auto d_atf = static_cast(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<>>( - (const real**)d_result, THCTensor_(data)(state, rb__), + (const scalar_t**)d_result, THCTensor_(data)(state, rb__), rb__->stride(0), num_batches); createBatchGemmBuffer<<>>( d_atf, THCTensor_(data)(state, atf_), diff --git a/aten/src/THC/generic/THCTensorMathBlas.h b/aten/src/THC/generic/THCTensorMathBlas.h index 1279d7e7c41106..20a9f5837ed446 100644 --- a/aten/src/THC/generic/THCTensorMathBlas.h +++ b/aten/src/THC/generic/THCTensorMathBlas.h @@ -3,11 +3,11 @@ #else THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src); -THC_API void THCTensor_(addmv)(THCState *state, THCTensor *self, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec); -THC_API void THCTensor_(addmm)(THCState *state, THCTensor *self, real beta, THCTensor *t, real alpha, THCTensor *mat1, THCTensor *mat2); -THC_API void THCTensor_(addr)(THCState *state, THCTensor *self, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2); -THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2); -THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2); +THC_API void THCTensor_(addmv)(THCState *state, THCTensor *self, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *mat, THCTensor *vec); +THC_API void THCTensor_(addmm)(THCState *state, THCTensor *self, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *mat1, THCTensor *mat2); +THC_API void THCTensor_(addr)(THCState *state, THCTensor *self, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *vec1, THCTensor *vec2); +THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *batch1, THCTensor *batch2); +THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, scalar_t beta, THCTensor *t, scalar_t alpha, THCTensor *batch1, THCTensor *batch2); THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a); THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *atf, THCudaIntTensor *pivots); diff --git a/aten/src/THC/generic/THCTensorMathCompare.cu b/aten/src/THC/generic/THCTensorMathCompare.cu index fca7046af615bb..0a0041ab9e4784 100644 --- a/aten/src/THC/generic/THCTensorMathCompare.cu +++ b/aten/src/THC/generic/THCTensorMathCompare.cu @@ -2,100 +2,100 @@ #define THC_GENERIC_FILE "generic/THCTensorMathCompare.cu" #else -THC_API void THCTensor_(ltValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(ltValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorLTValueOp(state, self_, src, + TensorLTValueOp(value)); } -THC_API void THCTensor_(gtValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(gtValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorGTValueOp(state, self_, src, + TensorGTValueOp(value)); } -THC_API void THCTensor_(leValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(leValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorLEValueOp(state, self_, src, + TensorLEValueOp(value)); } -THC_API void THCTensor_(geValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(geValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorGEValueOp(state, self_, src, + TensorGEValueOp(value)); } -THC_API void THCTensor_(eqValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(eqValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorEQValueOp(state, self_, src, + TensorEQValueOp(value)); } -THC_API void THCTensor_(neValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(neValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorNEValueOp(state, self_, src, + TensorNEValueOp(value)); } -THC_API void THCTensor_(ltValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(ltValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorLTValueOp(value)); + THC_logicalValue(state, self_, src, + TensorLTValueOp(value)); } -THC_API void THCTensor_(gtValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(gtValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorGTValueOp(value)); + THC_logicalValue(state, self_, src, + TensorGTValueOp(value)); } -THC_API void THCTensor_(leValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(leValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorLEValueOp(value)); + THC_logicalValue(state, self_, src, + TensorLEValueOp(value)); } -THC_API void THCTensor_(geValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(geValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorGEValueOp(value)); + THC_logicalValue(state, self_, src, + TensorGEValueOp(value)); } -THC_API void THCTensor_(eqValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(eqValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorEQValueOp(value)); + THC_logicalValue(state, self_, src, + TensorEQValueOp(value)); } -THC_API void THCTensor_(neValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value) +THC_API void THCTensor_(neValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); - THC_logicalValue(state, self_, src, - TensorNEValueOp(value)); + THC_logicalValue(state, self_, src, + TensorNEValueOp(value)); } #endif diff --git a/aten/src/THC/generic/THCTensorMathCompare.h b/aten/src/THC/generic/THCTensorMathCompare.h index 7b8837cc9db9e4..31928bb8dfcb9e 100644 --- a/aten/src/THC/generic/THCTensorMathCompare.h +++ b/aten/src/THC/generic/THCTensorMathCompare.h @@ -2,19 +2,19 @@ #define THC_GENERIC_FILE "generic/THCTensorMathCompare.h" #else -THC_API void THCTensor_(ltValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(gtValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(leValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(geValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(eqValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(neValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, real value); +THC_API void THCTensor_(ltValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(gtValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(leValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(geValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(eqValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(neValue)(THCState *state, THCudaByteTensor *self_, THCTensor *src, scalar_t value); -THC_API void THCTensor_(ltValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(gtValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(leValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(geValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(eqValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value); -THC_API void THCTensor_(neValueT)(THCState *state, THCTensor *self_, THCTensor *src, real value); +THC_API void THCTensor_(ltValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(gtValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(leValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(geValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(eqValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value); +THC_API void THCTensor_(neValueT)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value); #endif diff --git a/aten/src/THC/generic/THCTensorMathCompareT.cu b/aten/src/THC/generic/THCTensorMathCompareT.cu index ee7bc418e2c2a0..6397a0b7caaa96 100644 --- a/aten/src/THC/generic/THCTensorMathCompareT.cu +++ b/aten/src/THC/generic/THCTensorMathCompareT.cu @@ -6,8 +6,8 @@ THC_API void THCTensor_(ltTensor)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorLTOp(state, self_, src1, src2, + TensorLTOp()); } @@ -15,8 +15,8 @@ THC_API void THCTensor_(gtTensor)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorGTOp(state, self_, src1, src2, + TensorGTOp()); } @@ -24,8 +24,8 @@ THC_API void THCTensor_(leTensor)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorLEOp(state, self_, src1, src2, + TensorLEOp()); } @@ -33,8 +33,8 @@ THC_API void THCTensor_(geTensor)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorGEOp(state, self_, src1, src2, + TensorGEOp()); } @@ -42,8 +42,8 @@ THC_API void THCTensor_(eqTensor)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorEQOp(state, self_, src1, src2, + TensorEQOp()); } @@ -51,8 +51,8 @@ THC_API void THCTensor_(neTensor)(THCState *state, THCudaByteTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorNEOp(state, self_, src1, src2, + TensorNEOp()); } @@ -60,54 +60,54 @@ THC_API void THCTensor_(ltTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorLTOp()); + THC_logicalTensor(state, self_, src1, src2, + TensorLTOp()); } THC_API void THCTensor_(gtTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorGTOp()); + THC_logicalTensor(state, self_, src1, src2, + TensorGTOp()); } THC_API void THCTensor_(leTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorLEOp()); + THC_logicalTensor(state, self_, src1, src2, + TensorLEOp()); } THC_API void THCTensor_(geTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorGEOp()); + THC_logicalTensor(state, self_, src1, src2, + TensorGEOp()); } THC_API void THCTensor_(eqTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorEQOp()); + THC_logicalTensor(state, self_, src1, src2, + TensorEQOp()); } THC_API void THCTensor_(neTensorT)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); - THC_logicalTensor(state, self_, src1, src2, - TensorNEOp()); + THC_logicalTensor(state, self_, src1, src2, + TensorNEOp()); } #endif diff --git a/aten/src/THC/generic/THCTensorMathMagma.cu b/aten/src/THC/generic/THCTensorMathMagma.cu index 3b63c3ae1c7b2f..9d53e2b8efe6f4 100644 --- a/aten/src/THC/generic/THCTensorMathMagma.cu +++ b/aten/src/THC/generic/THCTensorMathMagma.cu @@ -6,28 +6,28 @@ #ifdef USE_MAGMA -static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k) +static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k) { int64_t size[1] = { k }; int64_t stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); - size_t len = k * sizeof(real); + size_t len = k * sizeof(scalar_t); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice)); } -static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n) +static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n) { int64_t size[2] = { m, n }; int64_t stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); - size_t len = m * n * sizeof(real); + size_t len = m * n * sizeof(scalar_t); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice)); } -static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self) +static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self) { THAssert(self->dim() == 2); - size_t len = THCTensor_(nElement)(state, self)*sizeof(real); + size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(cudaMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, cudaMemcpyDeviceToHost)); @@ -73,8 +73,8 @@ THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, T THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); - real *a_data = THCTensor_(data)(state, a); - real *b_data = THCTensor_(data)(state, b); + scalar_t *a_data = THCTensor_(data)(state, a); + scalar_t *b_data = THCTensor_(data)(state, b); int *ipiv = th_magma_malloc_pinned(n); @@ -112,15 +112,15 @@ THC_API void THCTensor_(trtrs)(THCState *state, THCTensor *rb_, THCTensor *ra_, magma_trans_t ts = trans[0] == 'N' ? MagmaNoTrans : MagmaTrans; magma_diag_t dg = diag[0] == 'U' ? MagmaUnit : MagmaNonUnit; - real alpha = 1; + scalar_t alpha = 1; int64_t n = a_->size(0); int64_t nrhs = b_->size(1); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); - real *a_data = THCTensor_(data)(state, a); - real *b_data = THCTensor_(data)(state, b); + scalar_t *a_data = THCTensor_(data)(state, a); + scalar_t *b_data = THCTensor_(data)(state, b); #if defined(THC_REAL_IS_FLOAT) magma_strsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); @@ -147,13 +147,13 @@ THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, T THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); - real *a_data = THCTensor_(data)(state, a); - real *b_data = THCTensor_(data)(state, b); + scalar_t *a_data = THCTensor_(data)(state, a); + scalar_t *b_data = THCTensor_(data)(state, b); int64_t m = a->size(0); int64_t n = a->size(1); int64_t nrhs = b->size(1); - real wkopt; + scalar_t wkopt; int info; #if defined(THC_REAL_IS_FLOAT) @@ -162,7 +162,7 @@ THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, T magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif - real *hwork = th_magma_malloc_pinned((size_t)wkopt); + scalar_t *hwork = th_magma_malloc_pinned((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); @@ -192,16 +192,16 @@ THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, T magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); - real *input_data = THCTensor_(data)(state, input); + scalar_t *input_data = THCTensor_(data)(state, input); if (n > 0) { // eigen values and workspace - real *w = th_magma_malloc_pinned(n); - real *wA = th_magma_malloc_pinned(lda * n); + scalar_t *w = th_magma_malloc_pinned(n); + scalar_t *wA = th_magma_malloc_pinned(lda * n); // compute optimal size of work array int info; - real lwork; + scalar_t lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) @@ -210,7 +210,7 @@ THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, T magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif - real *work = th_magma_malloc_pinned((size_t)lwork); + scalar_t *work = th_magma_malloc_pinned((size_t)lwork); int *iwork = th_magma_malloc_pinned(liwork); // compute eigenvalues and, optionally, eigenvectors @@ -256,25 +256,25 @@ THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, T magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int64_t n = a_->size(0); - real *a_data = th_magma_malloc_pinned(n * n); + scalar_t *a_data = th_magma_malloc_pinned(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); - real *wr = th_magma_malloc_pinned(n); - real *wi = th_magma_malloc_pinned(n); + scalar_t *wr = th_magma_malloc_pinned(n); + scalar_t *wi = th_magma_malloc_pinned(n); - real *vr_data = NULL; + scalar_t *vr_data = NULL; int64_t ldvr = 1; if (jobvr == MagmaVec) { - vr_data = th_magma_malloc_pinned(n * n); + vr_data = th_magma_malloc_pinned(n * n); ldvr = n; } - real *work_data = nullptr; + scalar_t *work_data = nullptr; if (n > 0) { int info; - real wkopt; + scalar_t wkopt; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else @@ -282,7 +282,7 @@ THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, T #endif int lwork = (int) wkopt; - work_data = th_magma_malloc_pinned(lwork); + work_data = th_magma_malloc_pinned(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); @@ -300,8 +300,8 @@ THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, T THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); if (n > 0) { - THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(real), cudaMemcpyHostToDevice)); - THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(real), cudaMemcpyHostToDevice)); + THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(scalar_t), cudaMemcpyHostToDevice)); + THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(scalar_t), cudaMemcpyHostToDevice)); } THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); @@ -346,14 +346,14 @@ THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, int64_t j = (jobz == MagmaAllVec) ? m : k; int64_t jv = (jobz == MagmaAllVec) ? n : k; - real *a_data = th_magma_malloc_pinned(m * n); + scalar_t *a_data = th_magma_malloc_pinned(m * n); THCTensor_(copyTensor2d)(state, a_data, a); - real *rs_data = th_magma_malloc_pinned(k); - real *ru_data = th_magma_malloc_pinned(m * j); - real *rv_data = th_magma_malloc_pinned(n * n); + scalar_t *rs_data = th_magma_malloc_pinned(k); + scalar_t *ru_data = th_magma_malloc_pinned(m * j); + scalar_t *rv_data = th_magma_malloc_pinned(n * n); - real wkopt; + scalar_t wkopt; int info; #if defined(THC_REAL_IS_FLOAT) @@ -363,7 +363,7 @@ THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, #endif int lwork = (int) wkopt; - real *work_data = th_magma_malloc_pinned(lwork); + scalar_t *work_data = th_magma_malloc_pinned(lwork); int *iwork = th_magma_malloc_pinned(8 * k); #if defined(THC_REAL_IS_FLOAT) @@ -407,12 +407,12 @@ THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); - real *input_data = THCTensor_(data)(state, input); + scalar_t *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); - real *work_data = THCTensor_(data)(state, work); + scalar_t *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) @@ -448,16 +448,16 @@ THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, THTensor_getSizePtr(input), THTensor_getStridePtr(input)); - real *matrices1[1] = { THCTensor_(data)(state, input) }; - real *matrices2[1] = { THCTensor_(data)(state, ra_) }; + scalar_t *matrices1[1] = { THCTensor_(data)(state, input) }; + scalar_t *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. - auto d_matrices1 = static_cast(THCudaMalloc(state, sizeof(real*))); - auto d_matrices2 = static_cast(THCudaMalloc(state, sizeof(real*))); + auto d_matrices1 = static_cast(THCudaMalloc(state, sizeof(scalar_t*))); + auto d_matrices2 = static_cast(THCudaMalloc(state, sizeof(scalar_t*))); - THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(real*), + THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(scalar_t*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); - THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(real*), + THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(scalar_t*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; auto info_gpu = static_cast(THCudaMalloc(state, sizeof(int))); @@ -480,9 +480,9 @@ THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) // Inverse #if defined(THC_REAL_IS_FLOAT) - THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); + THCudaBlas_Sgetri(state, n, (const scalar_t**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else - THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); + THCudaBlas_Dgetri(state, n, (const scalar_t**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); @@ -502,7 +502,7 @@ THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) #endif } -__global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len) +__global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; @@ -513,7 +513,7 @@ __global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len) } } -__global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len) +__global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; @@ -534,7 +534,7 @@ THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, co magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); - real *input_data = THCTensor_(data)(state, input); + scalar_t *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) @@ -574,7 +574,7 @@ THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, co magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); - real *input_data = THCTensor_(data)(state, input); + scalar_t *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) @@ -610,9 +610,9 @@ THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, TH magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b); - real *b_data = THCTensor_(data)(state, b_); + scalar_t *b_data = THCTensor_(data)(state, b_); THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a); - real *a_data = THCTensor_(data)(state, a_); + scalar_t *a_data = THCTensor_(data)(state, a_); int info; #if defined(THC_REAL_IS_FLOAT) @@ -648,8 +648,8 @@ THC_API void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_ int64_t nb = magma_get_dgeqrf_nb(m, n); #endif - real *rtau_data = th_magma_malloc_pinned(k); - real *a_data = THCTensor_(data)(state, a); + scalar_t *rtau_data = th_magma_malloc_pinned(k); + scalar_t *a_data = THCTensor_(data)(state, a); int info; #if defined(THC_REAL_IS_FLOAT) @@ -685,10 +685,10 @@ THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THC int64_t nb = magma_get_dgeqrf_nb(m, n); #endif - real *a_data = THCTensor_(data)(state, a); - real *tau_data = th_magma_malloc_pinned(k); + scalar_t *a_data = THCTensor_(data)(state, a); + scalar_t *tau_data = th_magma_malloc_pinned(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); - real *work_data = THCTensor_(data)(state, work); + scalar_t *work_data = THCTensor_(data)(state, work); int info; // We need to call two different versions of ?geqrf: @@ -722,7 +722,7 @@ THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THC THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); - real *q_data = THCTensor_(data)(state, q); + scalar_t *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); diff --git a/aten/src/THC/generic/THCTensorMathPairwise.cu b/aten/src/THC/generic/THCTensorMathPairwise.cu index 788d53b2051666..40d6bdb6382983 100644 --- a/aten/src/THC/generic/THCTensorMathPairwise.cu +++ b/aten/src/THC/generic/THCTensorMathPairwise.cu @@ -3,17 +3,17 @@ #else THC_API void -THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorAddConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorAddConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorAddConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorAddConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -22,17 +22,17 @@ THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, real value) } THC_API void -THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorSubConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorSubConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorSubConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorSubConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -41,7 +41,7 @@ THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, real value) } THC_API void -THCTensor_(add_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, real value, real alpha) +THCTensor_(add_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha) { #ifdef THC_REAL_IS_HALF auto v = THC_half2float(value) * THC_half2float(alpha); @@ -52,7 +52,7 @@ THCTensor_(add_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, real } THC_API void -THCTensor_(sub_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, real value, real alpha) +THCTensor_(sub_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value, scalar_t alpha) { #ifdef THC_REAL_IS_HALF auto v = THC_half2float(value) * THC_half2float(alpha); @@ -63,17 +63,17 @@ THCTensor_(sub_scaled)(THCState *state, THCTensor *self_, THCTensor *src_, real } THC_API void -THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorMulConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorMulConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorMulConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorMulConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -82,19 +82,19 @@ THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, real value) } THC_API void -THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); - THArgCheck(value != ScalarConvert::to(0), 3, "divide by zero"); + THArgCheck(value != ScalarConvert::to(0), 3, "divide by zero"); if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorDivConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorDivConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorDivConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorDivConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -103,7 +103,7 @@ THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, real value) } THC_API void -THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCTensor_(mul)(state, self_, src_, pow(2, value)); @@ -111,13 +111,13 @@ THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, real valu return THError("lshift not supported for torch.CudaHalfTensor"); #else if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorLShiftConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorLShiftConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorLShiftConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorLShiftConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -127,7 +127,7 @@ THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, real valu } THC_API void -THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCTensor_(mul)(state, self_, src_, pow(2, -value)); @@ -135,13 +135,13 @@ THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, real valu return THError("rshift not supported for torch.CudaHalfTensor"); #else if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorRShiftConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorRShiftConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorRShiftConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorRShiftConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -151,17 +151,17 @@ THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, real valu } THC_API void -THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorFmodOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorFmodOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorFmodOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorFmodOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -170,17 +170,17 @@ THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, real value) } THC_API void -THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorRemainderOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorRemainderOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorRemainderOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorRemainderOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -198,18 +198,18 @@ void THCTensor_(tril)(THCState *state, THCTensor *self_, THCTensor *src_, int64_ int64_t stride0 = self_->stride(0); int64_t stride1 = self_->stride(1); - real *start = THCTensor_(data)(state, self_); + scalar_t *start = THCTensor_(data)(state, self_); - TensorTriOp op(start, stride0, stride1, k); + TensorTriOp op(start, stride0, stride1, k); if (self_ == src_) { - if (!THC_pointwiseApply1(state, src_, op)) { + if (!THC_pointwiseApply1(state, src_, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, op)) { + if (!THC_pointwiseApply2(state, self_, src_, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -227,17 +227,17 @@ void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, int64_ int64_t stride0 = self_->stride(0); int64_t stride1 = self_->stride(1); - real *start = THCTensor_(data)(state, self_); + scalar_t *start = THCTensor_(data)(state, self_); - TensorTriOp op(start, stride0, stride1, k); + TensorTriOp op(start, stride0, stride1, k); if (self_ == src_) { - if (!THC_pointwiseApply1(state, src_, op)) { + if (!THC_pointwiseApply1(state, src_, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { - if (!THC_pointwiseApply2(state, self_, src_, op)) { + if (!THC_pointwiseApply2(state, self_, src_, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -258,7 +258,7 @@ THC_API int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_ THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, self_->sizes(), {}); - if (!THC_pointwiseApply3(state, buf, self_, src_, TensorEQOp())) { + if (!THC_pointwiseApply3(state, buf, self_, src_, TensorEQOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } @@ -270,19 +270,19 @@ THC_API int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_ } THC_API void -THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) return THError("bitand only supported for integer type tensors"); #else if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorBitAndConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorBitAndConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorBitAndConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorBitAndConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -292,19 +292,19 @@ THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, real valu } THC_API void -THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) return THError("bitor only supported for integer type tensors"); #else if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorBitOrConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorBitOrConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorBitOrConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorBitOrConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -314,19 +314,19 @@ THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, real value } THC_API void -THCTensor_(bitxor)(THCState* state, THCTensor *self_, THCTensor *src_, real value) +THCTensor_(bitxor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) return THError("bitxor only supported for integer type tensors"); #else if (self_ == src_) { - if (!THC_pointwiseApply1(state, self_, TensorBitXorConstantOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorBitXorConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); - if (!THC_pointwiseApply2(state, self_, src_, TensorBitXorConstantOp(value))) { + if (!THC_pointwiseApply2(state, self_, src_, TensorBitXorConstantOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } diff --git a/aten/src/THC/generic/THCTensorMathPairwise.h b/aten/src/THC/generic/THCTensorMathPairwise.h index b54b0c6b19e370..d453ebe603f820 100644 --- a/aten/src/THC/generic/THCTensorMathPairwise.h +++ b/aten/src/THC/generic/THCTensorMathPairwise.h @@ -2,19 +2,19 @@ #define THC_GENERIC_FILE "generic/THCTensorMathPairwise.h" #else -THC_API void THCTensor_(add)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(sub)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(add_scaled)(THCState *state, THCTensor *self, THCTensor *src, real value, real alpha); -THC_API void THCTensor_(sub_scaled)(THCState *state, THCTensor *self, THCTensor *src, real value, real alpha); -THC_API void THCTensor_(mul)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(div)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(lshift)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(rshift)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(fmod)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(remainder)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(bitand)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(bitor)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(bitxor)(THCState *state, THCTensor *self, THCTensor *src, real value); +THC_API void THCTensor_(add)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(sub)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(add_scaled)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value, scalar_t alpha); +THC_API void THCTensor_(sub_scaled)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value, scalar_t alpha); +THC_API void THCTensor_(mul)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(div)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(lshift)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(rshift)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(fmod)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(remainder)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(bitand)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(bitor)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(bitxor)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); THC_API int THCTensor_(equal)(THCState *state, THCTensor *self, THCTensor *src); diff --git a/aten/src/THC/generic/THCTensorMathPointwise.cu b/aten/src/THC/generic/THCTensorMathPointwise.cu index fb619bc27e847a..9192d6c9f9a1d5 100644 --- a/aten/src/THC/generic/THCTensorMathPointwise.cu +++ b/aten/src/THC/generic/THCTensorMathPointwise.cu @@ -4,11 +4,11 @@ #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ - __device__ __forceinline__ void operator()(real* out, real* in) const { \ + __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ - __device__ __forceinline__ void operator()(real* v) const { \ + __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ @@ -16,13 +16,13 @@ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ if (self_ == src) { \ - if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ + if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ - if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ + if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ @@ -35,39 +35,39 @@ #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics::log, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics::lgamma, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics::log10, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics::log1p, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics::log2, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics::exp, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics::expm1, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics::cos, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics::sin, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics::sqrt, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics::rsqrt, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics::ceil, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics::floor, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics::trunc, Real) - -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics::acos, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics::cosh, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics::asin, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics::sinh, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics::tan, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics::atan, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics::tanh, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics::erf, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics::erfc, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics::erfinv,Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics::round, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics::frac, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics::cinv, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics::log, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics::lgamma, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics::log10, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics::log1p, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics::log2, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics::exp, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics::expm1, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics::cos, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics::sin, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics::sqrt, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics::rsqrt, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics::ceil, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics::floor, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics::trunc, Real) + +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics::acos, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics::cosh, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics::asin, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics::sinh, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics::tan, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics::atan, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics::tanh, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics::erf, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics::erfc, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics::erfinv,Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics::round, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics::frac, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics::cinv, Real) #endif -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics::neg, Real) -IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics::abs, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics::neg, Real) +IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC @@ -75,13 +75,13 @@ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics::abs, Real) void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { - if (!THC_pointwiseApply1(state, self_, TensorSignOp())) { + if (!THC_pointwiseApply1(state, self_, TensorSignOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); - if (!THC_pointwiseApply2(state, self_, src, TensorSignOp())) { + if (!THC_pointwiseApply2(state, self_, src, TensorSignOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -89,18 +89,18 @@ void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) { THCudaCheck(cudaGetLastError()); } -void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value, - real max_value) +void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value, + scalar_t max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { - if (!THC_pointwiseApply1(state, self_, TensorClampOp(min_value, max_value))) { + if (!THC_pointwiseApply1(state, self_, TensorClampOp(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); - if (!THC_pointwiseApply2(state, self_, src, TensorClampOp(min_value, max_value))) { + if (!THC_pointwiseApply2(state, self_, src, TensorClampOp(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -135,7 +135,7 @@ THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); - if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp(sx, sy, so))) { + if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); @@ -152,7 +152,7 @@ void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTens THCTensor_(nElement)(state, ty), 3, "sizes do not match"); THCTensor_(resizeAs)(state, self_, tx); - if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) { + if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } @@ -162,13 +162,13 @@ void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTens void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { - if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp())) { + if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); - if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp())) { + if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -181,7 +181,7 @@ void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) { if (self_ != src) { THCTensor_(resizeAs)(state, self_, src); } - if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp())) { + if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } @@ -195,12 +195,12 @@ void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTens } switch (n) { case 0: - if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp())) { + if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } break; case 1: - if (!THC_pointwiseApply2(state, self_, src, TensorTrigammaOp())) { + if (!THC_pointwiseApply2(state, self_, src, TensorTrigammaOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } break; @@ -212,14 +212,14 @@ void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTens } THC_API void -THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w) +THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, scalar_t w) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, result, a, b)); THArgCheck(THCTensor_(nElement)(state, a) == THCTensor_(nElement)(state, b), 3, "sizes do not match"); THCTensor_(resizeAs)(state, result, a); - if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp(w))) { + if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp(w))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } @@ -229,7 +229,7 @@ THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, #endif THC_API void -THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) +THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(self_, true); #ifdef THC_REAL_IS_HALF @@ -241,7 +241,7 @@ THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, } THC_API void -THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) +THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(self_, true); #ifdef THC_REAL_IS_HALF @@ -268,14 +268,14 @@ THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor * if (self_ == src1) { // self = pow(self, src2) - if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp())) { + if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = pow(src1, src2) - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp())) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -283,65 +283,65 @@ THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor * THCudaCheck(cudaGetLastError()); } -void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) { +void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { - if (THCNumerics::eq(value, ScalarConvert::to(1))) { - if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { + if (THCNumerics::eq(value, ScalarConvert::to(1))) { + if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } - } else if (THCNumerics::eq(value, ScalarConvert::to(2))) { - if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(2))) { + if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } - } else if (THCNumerics::eq(value, ScalarConvert::to(3))) { - if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(3))) { + if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) - } else if (THCNumerics::eq(value, ScalarConvert::to(-1))) { - if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(-1))) { + if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } - } else if (THCNumerics::eq(value, ScalarConvert::to(-2))) { - if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(-2))) { + if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #endif } else { // fallback implementation using pow - if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src); - if (THCNumerics::eq(value, ScalarConvert::to(1))) { - if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { + if (THCNumerics::eq(value, ScalarConvert::to(1))) { + if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } - } else if (THCNumerics::eq(value, ScalarConvert::to(2))) { - if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(2))) { + if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } - } else if (THCNumerics::eq(value, ScalarConvert::to(3))) { - if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(3))) { + if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) - } else if (THCNumerics::eq(value, ScalarConvert::to(-1))) { - if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(-1))) { + if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } - } else if (THCNumerics::eq(value, ScalarConvert::to(-2))) { - if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { + } else if (THCNumerics::eq(value, ScalarConvert::to(-2))) { + if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #endif } else { // fallback implementation using pow - if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { + if (!THC_pointwiseApply2(state, self_, src, TensorPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -350,17 +350,17 @@ void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real val THCudaCheck(cudaGetLastError()); } -void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src) +void THCTensor_(tpow)(THCState *state, THCTensor *self_, scalar_t value, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { - if (!THC_pointwiseApply1(state, self_, TensorTPowOp(value))) { + if (!THC_pointwiseApply1(state, self_, TensorTPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); - if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp(value))) { + if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -386,14 +386,14 @@ THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTenso if (self_ == src1) { // self /= src2 - if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp())) { + if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp())) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -414,14 +414,14 @@ THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTenso if (self_ == src1) { // self /= src2 - if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp())) { + if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp())) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -438,12 +438,12 @@ THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *s THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { - if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp())) { + if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); - if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp())) { + if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -457,12 +457,12 @@ THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *s THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { - if (!THC_pointwiseApply2(state, self, src2, TensorMinOp())) { + if (!THC_pointwiseApply2(state, self, src2, TensorMinOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); - if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp())) { + if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -476,12 +476,12 @@ THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTen THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { - if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp())) { + if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); - if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp())) { + if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -495,53 +495,53 @@ THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor * THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { - if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp())) { + if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); - if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp())) { + if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void -THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value) +THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { - if (!THC_pointwiseApply1(state, self, TensorMaxValueOp(value))) { + if (!THC_pointwiseApply1(state, self, TensorMaxValueOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); - if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp(value))) { + if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void -THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value) +THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { - if (!THC_pointwiseApply1(state, self, TensorMinValueOp(value))) { + if (!THC_pointwiseApply1(state, self, TensorMinValueOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); - if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp(value))) { + if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void -THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) +THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, scalar_t value, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) @@ -558,7 +558,7 @@ THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp(value))) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } @@ -566,7 +566,7 @@ THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, } THC_API void -THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) +THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, scalar_t value, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) @@ -582,7 +582,7 @@ THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp(value))) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } @@ -601,14 +601,14 @@ THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTenso if (self_ == src1) { // self /= src2 - if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp())) { + if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp())) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -629,14 +629,14 @@ THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor if (self_ == src1) { // self /= src2 - if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp())) { + if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp())) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } @@ -657,14 +657,14 @@ THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTenso if (self_ == src1) { // self /= src2 - if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp())) { + if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 - if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp())) { + if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } diff --git a/aten/src/THC/generic/THCTensorMathPointwise.h b/aten/src/THC/generic/THCTensorMathPointwise.h index 7f790275a05ade..6e3bd21c3d91d6 100644 --- a/aten/src/THC/generic/THCTensorMathPointwise.h +++ b/aten/src/THC/generic/THCTensorMathPointwise.h @@ -2,8 +2,8 @@ #define THC_GENERIC_FILE "generic/THCTensorMathPointwise.h" #else -THC_API void THCTensor_(pow)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(tpow)(THCState *state, THCTensor *self, real value, THCTensor *src); +THC_API void THCTensor_(pow)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(tpow)(THCState *state, THCTensor *self, scalar_t value, THCTensor *src); THC_API void THCTensor_(cpow)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) @@ -38,7 +38,7 @@ THC_API void THCTensor_(floor)(THCState *state, THCTensor *self, THCTensor *src) THC_API void THCTensor_(round)(THCState *state, THCTensor *self, THCTensor *src); THC_API void THCTensor_(trunc)(THCState *state, THCTensor *self, THCTensor *src); THC_API void THCTensor_(frac)(THCState *state, THCTensor *self, THCTensor *src); -THC_API void THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w); +THC_API void THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, scalar_t w); THC_API void THCTensor_(cinv)(THCState *state, THCTensor *self, THCTensor *src); @@ -47,11 +47,11 @@ THC_API void THCTensor_(cinv)(THCState *state, THCTensor *self, THCTensor *src); THC_API void THCTensor_(neg)(THCState *state, THCTensor *self, THCTensor *src); THC_API void THCTensor_(abs)(THCState *state, THCTensor *self, THCTensor *src); THC_API void THCTensor_(sign)(THCState *state, THCTensor *self, THCTensor *src); -THC_API void THCTensor_(clamp)(THCState *state, THCTensor *self, THCTensor *src, real min_value, real max_value); +THC_API void THCTensor_(clamp)(THCState *state, THCTensor *self, THCTensor *src, scalar_t min_value, scalar_t max_value); THC_API void THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2, int dimension); -THC_API void THCTensor_(cadd)(THCState *state, THCTensor *self, THCTensor *src1, real value, THCTensor *src2); -THC_API void THCTensor_(csub)(THCState *state, THCTensor *self, THCTensor *src1, real value, THCTensor *src2); +THC_API void THCTensor_(cadd)(THCState *state, THCTensor *self, THCTensor *src1, scalar_t value, THCTensor *src2); +THC_API void THCTensor_(csub)(THCState *state, THCTensor *self, THCTensor *src1, scalar_t value, THCTensor *src2); THC_API void THCTensor_(cmul)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); THC_API void THCTensor_(cdiv)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); THC_API void THCTensor_(clshift)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); @@ -60,13 +60,13 @@ THC_API void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THC_API void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); THC_API void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); THC_API void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); -THC_API void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value); -THC_API void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value); +THC_API void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); +THC_API void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value); THC_API void THCTensor_(cbitand)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); THC_API void THCTensor_(cbitor)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); THC_API void THCTensor_(cbitxor)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2); -THC_API void THCTensor_(addcmul)(THCState *state, THCTensor *self, THCTensor* t, real value, THCTensor *src1, THCTensor *src2); -THC_API void THCTensor_(addcdiv)(THCState *state, THCTensor *self, THCTensor* t, real value, THCTensor *src1, THCTensor *src2); +THC_API void THCTensor_(addcmul)(THCState *state, THCTensor *self, THCTensor* t, scalar_t value, THCTensor *src1, THCTensor *src2); +THC_API void THCTensor_(addcdiv)(THCState *state, THCTensor *self, THCTensor* t, scalar_t value, THCTensor *src1, THCTensor *src2); #endif diff --git a/aten/src/THC/generic/THCTensorMathReduce.cu b/aten/src/THC/generic/THCTensorMathReduce.cu index 7a4d3655bcc5fe..91319745b6a1de 100644 --- a/aten/src/THC/generic/THCTensorMathReduce.cu +++ b/aten/src/THC/generic/THCTensorMathReduce.cu @@ -5,7 +5,7 @@ THC_API void THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); - if (!THC_reduceDim(state, self, src, + if (!THC_reduceDim(state, self, src, thrust::identity{}, ReduceAdd{}, thrust::identity{}, @@ -21,7 +21,7 @@ THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, THC_API void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); - if (!THC_reduceDim(state, self, src, + if (!THC_reduceDim(state, self, src, thrust::identity{}, ReduceMultiply{}, thrust::identity{}, @@ -39,7 +39,7 @@ THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); const accreal size = scalar_cast(THCTensor_(size)(state, src, dim)); - if (!THC_reduceDim(state, self, src, + if (!THC_reduceDim(state, self, src, thrust::identity{}, ReduceAdd{}, ReduceDivide{size}, @@ -55,7 +55,7 @@ THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THC_API void -THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm) +THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THCTensor *self_; @@ -64,7 +64,7 @@ THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int64_t numel = THCTensor_(nElement)(state, data); THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension"); - THArgCheck(THCNumerics::gt(value, scalar_cast(0)), 2, "non-positive-norm not supported"); + THArgCheck(THCNumerics::gt(value, scalar_cast(0)), 2, "non-positive-norm not supported"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions"); if (numel > 0) { @@ -72,7 +72,7 @@ THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, dim3 grid( THTensor_sizeLegacyNoScalars(data, 0)); dim3 threads(32); - THCTensor_kernel_renorm + THCTensor_kernel_renorm <<>> (THCTensor_(data)(state, data), scalar_cast(value), size, scalar_cast(maxnorm)); @@ -103,9 +103,9 @@ THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension src = THCTensor_(newContiguous)(state, src); if (dimension == THCTensor_(nDimensionLegacyAll)(state, src) - 1) { - THCTensor_varInnermostDim(state, self, src, biased); + THCTensor_varInnermostDim(state, self, src, biased); } else { - THCTensor_varOuterDim(state, self, src, dimension, biased); + THCTensor_varOuterDim(state, self, src, dimension, biased); } THCTensor_(free)(state, src); @@ -131,9 +131,9 @@ THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension src = THCTensor_(newContiguous)(state, src); if (dimension == THCTensor_(nDimensionLegacyAll)(state, src) - 1) { - THCTensor_varInnermostDim(state, self, src, biased); + THCTensor_varInnermostDim(state, self, src, biased); } else { - THCTensor_varOuterDim(state, self, src, dimension, biased); + THCTensor_varOuterDim(state, self, src, dimension, biased); } THCTensor_(free)(state, src); @@ -158,7 +158,7 @@ THCTensor_(varall)(THCState *state, THCTensor *self, int biased) accreal mean = THCTensor_(meanall)(state, self); accreal val; - if (!THC_reduceAll(state, self, + if (!THC_reduceAll(state, self, SquareFunctor(mean), ReduceAdd(), scalar_cast(0), @@ -176,40 +176,40 @@ THCTensor_(varall)(THCState *state, THCTensor *self, int biased) } THC_API void -THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, int dimension, int keepdim) +THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim) { const accreal value = scalar_cast(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (THCNumerics::eq(value, scalar_cast(0))) { - THC_reduceDim(state, self, src, + THC_reduceDim(state, self, src, TensorNonZeroOp{}, ReduceAdd{}, thrust::identity{}, scalar_cast(0), dimension, keepdim); } else if (THCNumerics::eq(value, scalar_cast(1))) { - THC_reduceDim(state, self, src, + THC_reduceDim(state, self, src, TensorNormOp{value}, ReduceAdd{}, thrust::identity{}, scalar_cast(0), dimension, keepdim); } else if (THCNumerics::eq(value, scalar_cast(2))) { - THC_reduceDim(state, self, src, + THC_reduceDim(state, self, src, TensorNormOp{value}, ReduceAdd{}, ReducePow{scalar_cast(.5)}, scalar_cast(0), dimension, keepdim); } else if (THCNumerics::eq(value, scalar_cast(INFINITY))) { - THC_reduceDim(state, self, src, + THC_reduceDim(state, self, src, TensorNormOp{value}, ReduceMax{}, thrust::identity{}, scalar_cast(0), dimension, keepdim); } else { - THC_reduceDim(state, self, src, + THC_reduceDim(state, self, src, TensorNormOp{value}, ReduceAdd{}, ReducePow{THCNumerics::cinv(value)}, @@ -221,39 +221,39 @@ THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, } THC_API accreal -THCTensor_(normall)(THCState *state, THCTensor *self, real _value) +THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value) { const accreal value = scalar_cast(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal result; if (THCNumerics::eq(value, scalar_cast(0))) { - THC_reduceAll(state, self, + THC_reduceAll(state, self, TensorNonZeroOp{}, ReduceAdd{}, scalar_cast(0), &result, 0); } else if (THCNumerics::eq(value, scalar_cast(1))) { - THC_reduceAll(state, self, + THC_reduceAll(state, self, TensorNormOp{value}, ReduceAdd{}, scalar_cast(0), &result, 0); } else if (THCNumerics::eq(value, scalar_cast(2))) { - THC_reduceAll(state, self, + THC_reduceAll(state, self, TensorNormOp{value}, ReduceAdd{}, scalar_cast(0), &result, 0); result = THCNumerics::sqrt(result); } else if (THCNumerics::eq(value, scalar_cast(INFINITY))) { - THC_reduceAll(state, self, + THC_reduceAll(state, self, TensorNormOp{value}, ReduceMax{}, scalar_cast(0), &result, 0); } else { - THC_reduceAll(state, self, + THC_reduceAll(state, self, TensorNormOp{value}, ReduceAdd{}, scalar_cast(0), @@ -267,15 +267,15 @@ THCTensor_(normall)(THCState *state, THCTensor *self, real _value) } accreal THCTensor_(dist)(THCState *state, THCTensor *self, - THCTensor *src, real _value) + THCTensor *src, scalar_t _value) { const accreal value = scalar_cast(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); self = THCTensor_(newContiguous)(state, self); ptrdiff_t size = THCTensor_(nElement)(state, self); src = THCTensor_(newContiguous)(state, src); - thrust::device_ptr self_data(THCTensor_(data)(state, self)); - thrust::device_ptr src_data(THCTensor_(data)(state, src)); + thrust::device_ptr self_data(THCTensor_(data)(state, self)); + thrust::device_ptr src_data(THCTensor_(data)(state, src)); THCThrustAllocator thrustAlloc(state); accreal result = thrust::inner_product( @@ -284,7 +284,7 @@ accreal THCTensor_(dist)(THCState *state, THCTensor *self, #endif self_data, self_data+size, src_data, scalar_cast(0), thrust::plus(), - ThrustTensorDistOp(value)); + ThrustTensorDistOp(value)); THCTensor_(free)(state, src); THCTensor_(free)(state, self); @@ -298,7 +298,7 @@ THC_API accreal THCTensor_(sumall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; - if (!THC_reduceAll(state, self, + if (!THC_reduceAll(state, self, thrust::identity{}, ReduceAdd{}, scalar_cast(0), @@ -314,7 +314,7 @@ THC_API accreal THCTensor_(prodall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; - if (!THC_reduceAll(state, self, + if (!THC_reduceAll(state, self, thrust::identity{}, ReduceMultiply{}, scalar_cast(1), @@ -333,11 +333,11 @@ THCTensor_(meanall)(THCState *state, THCTensor *self) return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self); } -THC_API real +THC_API scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; - if (!THC_reduceAll(state, self, + if (!THC_reduceAll(state, self, thrust::identity{}, ReduceMin{}, THCNumerics::upper_bound(), &val, 0)) { @@ -345,14 +345,14 @@ THCTensor_(minall)(THCState *state, THCTensor *self) { } THCudaCheck(cudaGetLastError()); - return scalar_cast(val); + return scalar_cast(val); } -THC_API real +THC_API scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; - if (!THC_reduceAll(state, self, + if (!THC_reduceAll(state, self, thrust::identity{}, ReduceMax{}, THCNumerics::lower_bound(), &val, 0)) { @@ -360,14 +360,14 @@ THCTensor_(maxall)(THCState *state, THCTensor *self) { } THCudaCheck(cudaGetLastError()); - return scalar_cast(val); + return scalar_cast(val); } -THC_API real +THC_API scalar_t THCTensor_(medianall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); - real val; + scalar_t val; ptrdiff_t nelem, k; nelem = THCTensor_(nElement)(state, self); @@ -442,14 +442,14 @@ THCTensor_(max)(THCState *state, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); - thrust::pair + thrust::pair init = - thrust::make_pair( - THCNumerics::lower_bound(), 0); + thrust::make_pair( + THCNumerics::lower_bound(), 0); - return THC_reduceDimIndex( + return THC_reduceDimIndex( state, values, indices, src, dimension, keepdim, init, - MaxValuePair()); + MaxValuePair()); } THC_API void @@ -461,14 +461,14 @@ THCTensor_(min)(THCState *state, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); - thrust::pair + thrust::pair init = - thrust::make_pair( - THCNumerics::upper_bound(), 0); + thrust::make_pair( + THCNumerics::upper_bound(), 0); - return THC_reduceDimIndex( + return THC_reduceDimIndex( state, values, indices, src, dimension, keepdim, init, - MinValuePair()); + MinValuePair()); } #endif diff --git a/aten/src/THC/generic/THCTensorMathReduce.h b/aten/src/THC/generic/THCTensorMathReduce.h index 4fbbc94e27f642..b004232eb3a32f 100644 --- a/aten/src/THC/generic/THCTensorMathReduce.h +++ b/aten/src/THC/generic/THCTensorMathReduce.h @@ -4,13 +4,13 @@ #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) -THC_API void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real max_norm); +THC_API void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t max_norm); THC_API void THCTensor_(std)(THCState *state, THCTensor *self, THCTensor *src, int dim, int biased, int keepdim); -THC_API void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, int keepdim); +THC_API void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, int keepdim); THC_API void THCTensor_(var)(THCState *state, THCTensor *self, THCTensor *src, int dim, int biased, int keepdim); THC_API accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased); -THC_API accreal THCTensor_(normall)(THCState *state, THCTensor *self, real value); +THC_API accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t value); THC_API accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased); #endif @@ -32,9 +32,9 @@ THC_API void THCTensor_(max)(THCState *state, THCudaLongTensor *indices, THCTensor *src, int dim, int keepdim); -THC_API real THCTensor_(minall)(THCState *state, THCTensor *self); -THC_API real THCTensor_(maxall)(THCState *state, THCTensor *self); -THC_API real THCTensor_(medianall)(THCState *state, THCTensor *self); +THC_API scalar_t THCTensor_(minall)(THCState *state, THCTensor *self); +THC_API scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self); +THC_API scalar_t THCTensor_(medianall)(THCState *state, THCTensor *self); THC_API void THCTensor_(median)(THCState *state, THCTensor *values, @@ -42,6 +42,6 @@ THC_API void THCTensor_(median)(THCState *state, THCTensor *src, int dim, int keepdim); THC_API accreal THCTensor_(dist)(THCState *state, THCTensor *self, THCTensor *src, - real value); + scalar_t value); #endif diff --git a/aten/src/THC/generic/THCTensorMathScan.cu b/aten/src/THC/generic/THCTensorMathScan.cu index 708cb2ed4d63e3..53da774d56b6c0 100644 --- a/aten/src/THC/generic/THCTensorMathScan.cu +++ b/aten/src/THC/generic/THCTensorMathScan.cu @@ -11,8 +11,8 @@ __host__ void THCTensor_(scanThrust)( BinaryFunction binary_op) { THCThrustAllocator thrustAlloc(state); - thrust::device_ptr src_data(THCTensor_(data)(state, src)); - thrust::device_ptr dst_data(THCTensor_(data)(state, dst)); + thrust::device_ptr src_data(THCTensor_(data)(state, src)); + thrust::device_ptr dst_data(THCTensor_(data)(state, dst)); ptrdiff_t size = THCTensor_(nElement)(state, src); thrust::inclusive_scan( #if CUDA_VERSION >= 7000 @@ -26,7 +26,7 @@ __host__ void THCTensor_(scanThrust)( template __host__ void THCTensor_(scanOuterDim)(THCState *state, THCTensor *tgt, THCTensor *src, int dimension, - real init, BinaryOp binary_op) + scalar_t init, BinaryOp binary_op) { unsigned ndim = THCTensor_(nDimensionLegacyAll)(state, src); // Treat all outer dimensions (i.e. dim < dimension) as one. @@ -45,7 +45,7 @@ __host__ void THCTensor_(scanOuterDim)(THCState *state, THCTensor *tgt, unsigned maxGridDim = 1024; dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THCCeilDiv(num_irows, threads.x))); - THCTensor_kernel_scanOuterDim<<>>( + THCTensor_kernel_scanOuterDim<<>>( THCTensor_(data)(state, tgt), THCTensor_(data)(state, src), num_orows, num_irows, row_size, init, binary_op); @@ -54,7 +54,7 @@ __host__ void THCTensor_(scanOuterDim)(THCState *state, THCTensor *tgt, template __host__ void THCTensor_(scanInnermostDim)(THCState *state, THCTensor *tgt, - THCTensor *src, real init, + THCTensor *src, scalar_t init, BinaryFunction binary_op) { unsigned ndim = THCTensor_(nDimensionLegacyAll)(state, src); @@ -68,7 +68,7 @@ __host__ void THCTensor_(scanInnermostDim)(THCState *state, THCTensor *tgt, dim3 threads(16, 32); dim3 grid(min(1024, THCCeilDiv(num_rows, threads.y))); - THCTensor_kernel_scanInnermostDim<<>>( + THCTensor_kernel_scanInnermostDim<<>>( THCTensor_(data)(state, tgt), THCTensor_(data)(state, src), num_rows, row_size, init, binary_op); THCudaCheck(cudaGetLastError()); @@ -76,7 +76,7 @@ __host__ void THCTensor_(scanInnermostDim)(THCState *state, THCTensor *tgt, template void THCTensor_(scanDim)(THCState *state, THCTensor *self_, THCTensor *src, - int dimension, real init, BinaryFunction binary_op) + int dimension, scalar_t init, BinaryFunction binary_op) { // "init" must be the identity element for binary_op int ndim = THCTensor_(nDimensionLegacyNoScalars)(state, src); @@ -109,14 +109,14 @@ void THCTensor_(cumsum)(THCState *state, THCTensor *self, THCTensor *src, int di { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); return THCTensor_(scanDim)(state, self, src, dimension, - ScalarConvert::to(0.0), AddOp()); + ScalarConvert::to(0.0), AddOp()); } void THCTensor_(cumprod)(THCState *state, THCTensor *self, THCTensor *src, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); return THCTensor_(scanDim)(state, self, src, dimension, - ScalarConvert::to(1.0), MulOp()); + ScalarConvert::to(1.0), MulOp()); } #endif diff --git a/aten/src/THC/generic/THCTensorMode.cu b/aten/src/THC/generic/THCTensorMode.cu index 0e54d0195744c0..274093ef105ae5 100644 --- a/aten/src/THC/generic/THCTensorMode.cu +++ b/aten/src/THC/generic/THCTensorMode.cu @@ -15,7 +15,7 @@ THC_API void THCTensor_(calculateMode)(THCState *state, // location of the buffer at the innermost dimension that we are going // to calculate the mode for --> we do this by manually doing the stride // calculations to get an offset - real *data = THCTensor_(data)(state, input); + scalar_t *data = THCTensor_(data)(state, input); for (int i = 0; i < THLongStorage_size(position); ++i) { data += THLongStorage_data(position)[i] * THTensor_strideLegacyNoScalars(input, i); } @@ -24,8 +24,8 @@ THC_API void THCTensor_(calculateMode)(THCState *state, THCThrustAllocator thrustAlloc(state); // Wrap input data, sortBuffer, in Thrust device vectors - thrust::device_ptr vecPtr = thrust::device_pointer_cast(data); - thrust::device_vector iter(vecPtr, vecPtr + nElement); + thrust::device_ptr vecPtr = thrust::device_pointer_cast(data); + thrust::device_vector iter(vecPtr, vecPtr + nElement); thrust::device_ptr sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer)); thrust::device_vector seq(sbPtr, sbPtr + nElement); @@ -63,12 +63,12 @@ THC_API void THCTensor_(calculateMode)(THCState *state, #if defined(THC_REAL_IS_HALF) ThrustHalfNotEqualTo() #else - thrust::not_equal_to() + thrust::not_equal_to() #endif ); // Count frequency of each element - thrust::device_vector keys(unique); + thrust::device_vector keys(unique); thrust::device_vector counts(unique); thrust::reduce_by_key( #if CUDA_VERSION >= 7000 @@ -91,11 +91,11 @@ THC_API void THCTensor_(calculateMode)(THCState *state, thrust::device, #endif counts.begin(), counts.end()); - real mode = keys[it - counts.begin()]; + scalar_t mode = keys[it - counts.begin()]; // Find first index within which it occurs #if defined(THC_REAL_IS_HALF) - thrust::device_vector::iterator positionIter = thrust::find_if( + thrust::device_vector::iterator positionIter = thrust::find_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else @@ -103,7 +103,7 @@ THC_API void THCTensor_(calculateMode)(THCState *state, #endif iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode)); #else - thrust::device_vector::iterator positionIter = thrust::find( + thrust::device_vector::iterator positionIter = thrust::find( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else @@ -220,7 +220,7 @@ THC_API void THCTensor_(mode)(THCState *state, indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1); // Set-up TensorInfo structs for passing to kernel - TensorInfo tiValues = getTensorInfo(state, valuesTransposed); + TensorInfo tiValues = getTensorInfo(state, valuesTransposed); TensorInfo tiIndices = getTensorInfo(state, indicesTransposed); // The number of blocks is the number of slices that we need to calculate the mode for. Each block @@ -237,8 +237,8 @@ THC_API void THCTensor_(mode)(THCState *state, { \ dim3 blockSize(SIZE / 2); \ \ - int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \ - computeMode \ + int memsize = (sizeof(scalar_t) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \ + computeMode \ <<>>( \ THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \ } diff --git a/aten/src/THC/generic/THCTensorRandom.cu b/aten/src/THC/generic/THCTensorRandom.cu index 1778af2112ca0a..620c73e9af01d3 100644 --- a/aten/src/THC/generic/THCTensorRandom.cu +++ b/aten/src/THC/generic/THCTensorRandom.cu @@ -13,7 +13,7 @@ THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, do if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); generate_uniform<<>>( gen->state.gen_states, size, data, a, b); @@ -28,7 +28,7 @@ THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); generate_normal<<>>( gen->state.gen_states, size, data, mean, stdv); @@ -39,7 +39,7 @@ THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, THC_API void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, stddev); - THCTensor_(cadd)(state, self, self, ScalarConvert::to(1), means); + THCTensor_(cadd)(state, self, self, ScalarConvert::to(1), means); } THC_API void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs) @@ -47,7 +47,7 @@ THC_API void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double THCTensor_(resizeAs)(state, self, stddevs); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); - THCTensor_(add)(state, self, self, ScalarConvert::to(mean)); + THCTensor_(add)(state, self, self, ScalarConvert::to(mean)); } THC_API void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs) @@ -55,7 +55,7 @@ THC_API void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); - THCTensor_(cadd)(state, self, self, ScalarConvert::to(1), means); + THCTensor_(cadd)(state, self, self, ScalarConvert::to(1), means); } THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) @@ -67,9 +67,9 @@ THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mea THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); - generateLogNormal<<>>( + generateLogNormal<<>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); @@ -83,7 +83,7 @@ THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double l THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); generate_exponential<<>>( gen->state.gen_states, size, data, lambda); @@ -99,7 +99,7 @@ THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); generate_cauchy<<>>( gen->state.gen_states, size, data, median, sigma); @@ -122,8 +122,8 @@ void THCTensor_(renormRows)(struct THCState* state, dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); - renormRowsL1 - << + <<>>(THCTensor_(data)(state, t), rows, cols); } @@ -181,7 +181,7 @@ THC_API void THCTensor_(multinomial)(struct THCState *state, int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) - * (sizeof(real) + sizeof(accreal)); + * (sizeof(scalar_t) + sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation @@ -194,7 +194,7 @@ THC_API void THCTensor_(multinomial)(struct THCState *state, dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); - sampleMultinomialOnce + sampleMultinomialOnce <<>>( @@ -312,7 +312,7 @@ THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_prob THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); - real one = ScalarConvert::to(1); + scalar_t one = ScalarConvert::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); aliasMultinomialFilter <<>>( @@ -340,7 +340,7 @@ THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_prob THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); - real q_max = THCTensor_(maxall)(state, _q); + scalar_t q_max = THCTensor_(maxall)(state, _q); condDiv<<< inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, _q), @@ -385,7 +385,7 @@ THC_API void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_bernoulli, double, double p, double, curand_uniform_double, x <= p) #else -GENERATE_KERNEL1(generate_bernoulli, real, double p, float, curand_uniform, (ScalarConvert::to(x <= p))) +GENERATE_KERNEL1(generate_bernoulli, scalar_t, double p, float, curand_uniform, (ScalarConvert::to(x <= p))) #endif THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p) @@ -395,7 +395,7 @@ THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p) if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); generate_bernoulli<<>>( gen->state.gen_states, size, data, p); @@ -423,7 +423,7 @@ THC_API void THCTensor_(NAME)(THCState* state, \ THCTensor *self = THCTensor_(newContiguous)(state, self_); \ PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \ ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \ - real *result_data = THCTensor_(data)(state, self); \ + scalar_t *result_data = THCTensor_(data)(state, self); \ PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \ \ THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \ @@ -441,21 +441,21 @@ DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double) #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p))) #else -GENERATE_KERNEL1(generate_geometric, real, double p, float, curand_uniform, (ScalarConvert::to(ceilf(logf(x) / log(1-p))))) +GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, curand_uniform, (ScalarConvert::to(ceilf(logf(x) / log(1-p))))) #endif #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) #define CURAND64(STATE) (((uint64_t)curand(STATE)) << 32) | (uint64_t)curand(STATE) -GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, \ - static_cast(static_cast((x % range) + base))) -GENERATE_KERNEL2(generate_random_64, real, int64_t base, uint64_t range, uint64_t, CURAND64, \ - static_cast(static_cast((x % range) + base))) +GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, \ + static_cast(static_cast((x % range) + base))) +GENERATE_KERNEL2(generate_random_64, scalar_t, int64_t base, uint64_t range, uint64_t, CURAND64, \ + static_cast(static_cast((x % range) + base))) #elif defined(THC_REAL_IS_HALF) -GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, - (ScalarConvert::to(static_cast(x % range + base)))) +GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, + (ScalarConvert::to(static_cast(x % range + base)))) #else -GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, - static_cast(static_cast(x % range + base))) +GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, + static_cast(static_cast(x % range + base))) #endif THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) @@ -466,7 +466,7 @@ THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); generate_geometric<<>>( gen->state.gen_states, size, data, p); @@ -483,7 +483,7 @@ THC_API void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_ if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); uint64_t range = max_val - min_val; @@ -516,7 +516,7 @@ THC_API void THCTensor_(random)(THCState* state, THCTensor *self_) if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); - real *data = THCTensor_(data)(state, self); + scalar_t *data = THCTensor_(data)(state, self); #if defined(THC_REAL_IS_HALF) generate_random<<>>( @@ -529,10 +529,10 @@ THC_API void THCTensor_(random)(THCState* state, THCTensor *self_) gen->state.gen_states, static_cast(size), data, static_cast(0ULL), static_cast((1ULL << DBL_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_LONG) generate_random_64<<>>( - gen->state.gen_states, static_cast(size), data, static_cast(0ULL), static_cast(std::numeric_limits::max()) + 1); + gen->state.gen_states, static_cast(size), data, static_cast(0ULL), static_cast(std::numeric_limits::max()) + 1); #else generate_random<<>>( - gen->state.gen_states, static_cast(size), data, static_cast(0UL), static_cast(std::numeric_limits::max()) + 1); + gen->state.gen_states, static_cast(size), data, static_cast(0UL), static_cast(std::numeric_limits::max()) + 1); #endif THCTensor_(freeCopyTo)(state, self, self_); diff --git a/aten/src/THC/generic/THCTensorScatterGather.cu b/aten/src/THC/generic/THCTensorScatterGather.cu index dd7d85f81dbc73..b41f1a68142dd2 100644 --- a/aten/src/THC/generic/THCTensorScatterGather.cu +++ b/aten/src/THC/generic/THCTensorScatterGather.cu @@ -49,46 +49,46 @@ void THCTensor_(gather)(THCState* state, THCTensor *tensor, if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); + TensorInfo srcInfo = + getTensorInfo(state, src); TensorInfo indexInfo = getTensorInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: - RUN(unsigned int, 1, real); + RUN(unsigned int, 1, scalar_t); THCudaCheck(cudaGetLastError()); break; case 2: - RUN(unsigned int, 2, real); + RUN(unsigned int, 2, scalar_t); THCudaCheck(cudaGetLastError()); break; case 3: - RUN(unsigned int, 3, real); + RUN(unsigned int, 3, scalar_t); THCudaCheck(cudaGetLastError()); break; default: - RUN(unsigned int, -1, real); + RUN(unsigned int, -1, scalar_t); THCudaCheck(cudaGetLastError()); break; } } else { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); + TensorInfo srcInfo = + getTensorInfo(state, src); TensorInfo indexInfo = getTensorInfo(state, index); - RUN(uint64_t, -1, real); + RUN(uint64_t, -1, scalar_t); THCudaCheck(cudaGetLastError()); } } if (oldTensor) { - THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); + THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } @@ -146,42 +146,42 @@ void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLong if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); + TensorInfo srcInfo = + getTensorInfo(state, src); TensorInfo indexInfo = getTensorInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: - RUN(unsigned int, 1, real); + RUN(unsigned int, 1, scalar_t); break; case 2: - RUN(unsigned int, 2, real); + RUN(unsigned int, 2, scalar_t); break; case 3: - RUN(unsigned int, 3, real); + RUN(unsigned int, 3, scalar_t); break; default: - RUN(unsigned int, -1, real); + RUN(unsigned int, -1, scalar_t); break; } } else { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); + TensorInfo srcInfo = + getTensorInfo(state, src); TensorInfo indexInfo = getTensorInfo(state, index); - RUN(uint64_t, -1, real) + RUN(uint64_t, -1, scalar_t) } } if (oldTensor) { - THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); + THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } @@ -238,42 +238,42 @@ void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaL if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); + TensorInfo srcInfo = + getTensorInfo(state, src); TensorInfo indexInfo = getTensorInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: - RUN(unsigned int, 1, real); + RUN(unsigned int, 1, scalar_t); break; case 2: - RUN(unsigned int, 2, real); + RUN(unsigned int, 2, scalar_t); break; case 3: - RUN(unsigned int, 3, real); + RUN(unsigned int, 3, scalar_t); break; default: - RUN(unsigned int, -1, real); + RUN(unsigned int, -1, scalar_t); break; } } else { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); - TensorInfo srcInfo = - getTensorInfo(state, src); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); + TensorInfo srcInfo = + getTensorInfo(state, src); TensorInfo indexInfo = getTensorInfo(state, index); - RUN(uint64_t, -1, real) + RUN(uint64_t, -1, scalar_t) } } if (oldTensor) { - THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); + THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } @@ -289,7 +289,7 @@ void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaL void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, - int dim, THCudaLongTensor *index, real value) { + int dim, THCudaLongTensor *index, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); @@ -325,37 +325,37 @@ THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); TensorInfo indexInfo = getTensorInfo(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: - RUN(unsigned int, 1, real); + RUN(unsigned int, 1, scalar_t); break; case 2: - RUN(unsigned int, 2, real); + RUN(unsigned int, 2, scalar_t); break; case 3: - RUN(unsigned int, 3, real); + RUN(unsigned int, 3, scalar_t); break; default: - RUN(unsigned int, -1, real); + RUN(unsigned int, -1, scalar_t); break; } } else { - TensorInfo tensorInfo = - getTensorInfo(state, tensor); + TensorInfo tensorInfo = + getTensorInfo(state, tensor); TensorInfo indexInfo = getTensorInfo(state, index); - RUN(uint64_t, -1, real); + RUN(uint64_t, -1, scalar_t); } if (oldTensor) { - THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); + THCTensor_copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } diff --git a/aten/src/THC/generic/THCTensorScatterGather.h b/aten/src/THC/generic/THCTensorScatterGather.h index e7e83b21757717..9fd4347bc85ec0 100644 --- a/aten/src/THC/generic/THCTensorScatterGather.h +++ b/aten/src/THC/generic/THCTensorScatterGather.h @@ -5,6 +5,6 @@ THC_API void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index); THC_API void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src); THC_API void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src); -THC_API void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value); +THC_API void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, scalar_t value); #endif diff --git a/aten/src/THC/generic/THCTensorSort.cu b/aten/src/THC/generic/THCTensorSort.cu index b6bcf6aecb4f78..0dca009b80ab72 100644 --- a/aten/src/THC/generic/THCTensorSort.cu +++ b/aten/src/THC/generic/THCTensorSort.cu @@ -53,7 +53,7 @@ THC_API void THCTensor_(sortKeyValueInplace)(THCState* state, dim3 block(blockSize); \ \ if (dir) { \ - bitonicSortKVInPlace, TYPE, SIZE> \ + bitonicSortKVInPlace, TYPE, SIZE> \ <<>>( \ keyInfo, \ keySlices, \ @@ -61,9 +61,9 @@ THC_API void THCTensor_(sortKeyValueInplace)(THCState* state, (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ - GTComp()); \ + GTComp()); \ } else { \ - bitonicSortKVInPlace, TYPE, SIZE> \ + bitonicSortKVInPlace, TYPE, SIZE> \ <<>>( \ keyInfo, \ keySlices, \ @@ -71,7 +71,7 @@ THC_API void THCTensor_(sortKeyValueInplace)(THCState* state, (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ - LTComp()); \ + LTComp()); \ } \ } while (0) @@ -108,8 +108,8 @@ THC_API void THCTensor_(sortKeyValueInplace)(THCState* state, // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis if (THCTensor_canUse32BitIndexMath(state, key)) { - TensorInfo keyInfo = - getTensorInfo(state, key); + TensorInfo keyInfo = + getTensorInfo(state, key); keyInfo.reduceDim(dim); int collapseKeyDim = keyInfo.collapseDims(dim); @@ -131,8 +131,8 @@ THC_API void THCTensor_(sortKeyValueInplace)(THCState* state, } } } else { - TensorInfo keyInfo = - getTensorInfo(state, key); + TensorInfo keyInfo = + getTensorInfo(state, key); keyInfo.reduceDim(dim); int collapseKeyDim = keyInfo.collapseDims(dim); @@ -209,7 +209,7 @@ void THCTensor_(sortViaThrust)(THCState* state, THCThrustAllocator thrustAlloc(state); - thrust::device_ptr keyIter(THCTensor_(data)(state, trContigKey)); + thrust::device_ptr keyIter(THCTensor_(data)(state, trContigKey)); // Since we are composing a global index across all segments rather // than a per-segment index, we treat the memory as int so we don't @@ -234,13 +234,13 @@ void THCTensor_(sortViaThrust)(THCState* state, #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif - keyIter, keyIter + totalElements, indexIter, ThrustGTOp()); + keyIter, keyIter + totalElements, indexIter, ThrustGTOp()); } else { thrust::stable_sort_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif - keyIter, keyIter + totalElements, indexIter, ThrustLTOp()); + keyIter, keyIter + totalElements, indexIter, ThrustLTOp()); } // Then, re-sort according to slice that each index is diff --git a/aten/src/THC/generic/THCTensorTopK.cu b/aten/src/THC/generic/THCTensorTopK.cu index 9fdb13f1469428..71ee008659b12a 100644 --- a/aten/src/THC/generic/THCTensorTopK.cu +++ b/aten/src/THC/generic/THCTensorTopK.cu @@ -33,7 +33,7 @@ THC_API void THCTensor_(topk)(THCState* state, // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, DIR) \ - gatherTopK \ + gatherTopK \ <<>>( \ inputInfo, \ static_cast(sliceSize), \ @@ -73,10 +73,10 @@ THC_API void THCTensor_(topk)(THCState* state, #endif #define RUN_T(INDEX_T) \ - TensorInfo inputInfo = \ - getTensorInfo(state, input); \ - TensorInfo topKInfo = \ - getTensorInfo(state, topK); \ + TensorInfo inputInfo = \ + getTensorInfo(state, input); \ + TensorInfo topKInfo = \ + getTensorInfo(state, topK); \ TensorInfo indicesInfo = \ getTensorInfo(state, indices); \ \ diff --git a/aten/src/THCUNN/VolumetricAveragePooling.cu b/aten/src/THCUNN/VolumetricAveragePooling.cu index 610127c1777442..110eac44dcb997 100644 --- a/aten/src/THCUNN/VolumetricAveragePooling.cu +++ b/aten/src/THCUNN/VolumetricAveragePooling.cu @@ -122,7 +122,7 @@ __global__ void cuda_VolumetricAveragePooling_updateOutput_fixedKW( } #define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ - cuda_VolumetricAveragePooling_updateOutput_fixedKW \ + cuda_VolumetricAveragePooling_updateOutput_fixedKW \ <<>>( \ cudaInput, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); \ break diff --git a/aten/src/THCUNN/generic/Abs.cu b/aten/src/THCUNN/generic/Abs.cu index 0b2a5e7dfe090a..377761b2e4256d 100644 --- a/aten/src/THCUNN/generic/Abs.cu +++ b/aten/src/THCUNN/generic/Abs.cu @@ -11,7 +11,7 @@ void THNN_(Abs_updateOutput)( { THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, absupdateOutput_functor()); + THC_pointwiseApply2(state, output, input, absupdateOutput_functor()); } void THNN_(Abs_updateGradInput)( @@ -23,7 +23,7 @@ void THNN_(Abs_updateGradInput)( THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, input, gradOutput, absupdateGradInput_functor()); + THC_pointwiseApply3(state, gradInput, input, gradOutput, absupdateGradInput_functor()); } #endif diff --git a/aten/src/THCUNN/generic/AbsCriterion.cu b/aten/src/THCUNN/generic/AbsCriterion.cu index d1faeaa342554c..84fef3d0281abf 100644 --- a/aten/src/THCUNN/generic/AbsCriterion.cu +++ b/aten/src/THCUNN/generic/AbsCriterion.cu @@ -14,8 +14,8 @@ void THNN_(AbsCriterion_updateOutput)( if (reduction == Reduction::None) { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply3(state, input, target, output, - abs_updateOutput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, output, + abs_updateOutput_no_reduce_functor()); return; } @@ -26,9 +26,9 @@ void THNN_(AbsCriterion_updateOutput)( input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - accreal sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal)0, thrust::plus(), abs_functor()); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + accreal sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal)0, thrust::plus(), abs_functor()); if (reduction == Reduction::ElementwiseMean) sum /= size; @@ -36,7 +36,7 @@ void THNN_(AbsCriterion_updateOutput)( THCTensor_(free)(state, input); THCTensor_(free)(state, target); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); } void THNN_(AbsCriterion_updateGradInput)( @@ -54,8 +54,8 @@ void THNN_(AbsCriterion_updateGradInput)( if (reduction == Reduction::None) { THCUNN_check_shape(state, gradOutput, input); - THC_pointwiseApply3(state, input, target, gradInput, - abs_updateGradInput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, gradInput, + abs_updateGradInput_no_reduce_functor()); THCTensor_(cmul)(state, gradInput, gradInput, gradOutput); return; } @@ -63,17 +63,17 @@ void THNN_(AbsCriterion_updateGradInput)( THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); - real norm = ScalarConvert::to(reduction == Reduction::ElementwiseMean ? 1./size : 1.); + scalar_t norm = ScalarConvert::to(reduction == Reduction::ElementwiseMean ? 1./size : 1.); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, - abs_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0))); + abs_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0))); THCTensor_(free)(state, input); THCTensor_(free)(state, target); diff --git a/aten/src/THCUNN/generic/BCECriterion.cu b/aten/src/THCUNN/generic/BCECriterion.cu index 3dcde6226e9393..43acca82093617 100644 --- a/aten/src/THCUNN/generic/BCECriterion.cu +++ b/aten/src/THCUNN/generic/BCECriterion.cu @@ -16,8 +16,8 @@ void THNN_(BCECriterion_updateOutput)( if (reduction == Reduction::None) { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply3(state, input, target, output, - bce_updateOutput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, output, + bce_updateOutput_no_reduce_functor()); if (weights) { THCTensor_(cmul)(state, output, output, weights); } @@ -30,18 +30,18 @@ void THNN_(BCECriterion_updateOutput)( input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); THCThrustAllocator thrustAlloc(state); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); accreal sum; if (weights) { weights = THCTensor_(newContiguous)(state, weights); - thrust::device_ptr weights_data(THCTensor_(data)(state, weights)); + thrust::device_ptr weights_data(THCTensor_(data)(state, weights)); sum = thrust::transform_reduce( thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), - bce_functor_weights(), + bce_functor_weights(), (accreal) 0, thrust::plus() ); @@ -51,7 +51,7 @@ void THNN_(BCECriterion_updateOutput)( thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), - bce_functor(), + bce_functor(), (accreal) 0, thrust::plus() ); @@ -63,7 +63,7 @@ void THNN_(BCECriterion_updateOutput)( THCTensor_(free)(state, input); THCTensor_(free)(state, target); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); } void THNN_(BCECriterion_updateGradInput)( @@ -83,8 +83,8 @@ void THNN_(BCECriterion_updateGradInput)( if (reduction == Reduction::None) { THCUNN_check_nElement(state, gradOutput, input); - THC_pointwiseApply3(state, input, target, gradInput, - bce_updateGradInput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, gradInput, + bce_updateGradInput_no_reduce_functor()); THCTensor_(cmul)(state, gradInput, gradInput, gradOutput); if (weights) { THCTensor_(cmul)(state, gradInput, gradInput, weights); @@ -95,23 +95,23 @@ void THNN_(BCECriterion_updateGradInput)( THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); - real norm = ScalarConvert::to((reduction == Reduction::ElementwiseMean ? accreal(1)/size : accreal(1)) * THCTensor_(get1d)(state, gradOutput, 0)); + scalar_t norm = ScalarConvert::to((reduction == Reduction::ElementwiseMean ? accreal(1)/size : accreal(1)) * THCTensor_(get1d)(state, gradOutput, 0)); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); if (weights) { weights = THCTensor_(newContiguous)(state, weights); - thrust::device_ptr weights_data(THCTensor_(data)(state, weights)); + thrust::device_ptr weights_data(THCTensor_(data)(state, weights)); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), gradInput_data, - bce_updateGradInput_functor_weights(norm) + bce_updateGradInput_functor_weights(norm) ); THCTensor_(free)(state, weights); } else { @@ -119,7 +119,7 @@ void THNN_(BCECriterion_updateGradInput)( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), gradInput_data, - bce_updateGradInput_functor(norm) + bce_updateGradInput_functor(norm) ); } diff --git a/aten/src/THCUNN/generic/BatchNormalization.cu b/aten/src/THCUNN/generic/BatchNormalization.cu index 5a218e0f52dd80..227cc476567981 100644 --- a/aten/src/THCUNN/generic/BatchNormalization.cu +++ b/aten/src/THCUNN/generic/BatchNormalization.cu @@ -2,18 +2,18 @@ #define THC_GENERIC_FILE "generic/BatchNormalization.cu" #else -#define DeviceTensor3 THCDeviceTensor -#define DeviceTensor1 THCDeviceTensor +#define DeviceTensor3 THCDeviceTensor +#define DeviceTensor1 THCDeviceTensor template -static THCDeviceTensor THNN_(devicetensor)(THCState *state, THCTensor *t) { +static THCDeviceTensor THNN_(devicetensor)(THCState *state, THCTensor *t) { if (!t) { - return THCDeviceTensor(); + return THCDeviceTensor(); } int inDim = THCTensor_nDimensionLegacyAll(state, t); if (inDim == Dim) { - return toDeviceTensor(state, t); + return toDeviceTensor(state, t); } // View in which the last dimensions are collapsed or expanded as needed @@ -28,7 +28,7 @@ static THCDeviceTensor THNN_(devicetensor)(THCState *state, THCTensor size[Dim - 1] *= THTensor_sizeLegacyNoScalars(t, i); } } - return THCDeviceTensor(t->data(), size); + return THCDeviceTensor(t->data(), size); } void THNN_(BatchNormalization_updateOutput)( @@ -58,12 +58,12 @@ void THNN_(BatchNormalization_updateOutput)( if (!train) { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); - BatchNormalizationUpdateOutputInference_kernel <<>>( + BatchNormalizationUpdateOutputInference_kernel <<>>( input, output, runningMean, runningVar, weight, bias, eps); } else { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); - BatchNormalizationUpdateOutput_kernel <<>>( + BatchNormalizationUpdateOutput_kernel <<>>( input, output, weight, bias, static_cast(eps), static_cast(momentum), runningMean, runningVar, saveMean, saveStd); } @@ -96,7 +96,7 @@ void THNN_(BatchNormalization_backward)( dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); - BatchNormalizationBackward_kernel <<>>( + BatchNormalizationBackward_kernel <<>>( input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, saveMean, saveStd, train, scale, eps); THCudaCheck(cudaGetLastError()); diff --git a/aten/src/THCUNN/generic/ClassNLLCriterion.cu b/aten/src/THCUNN/generic/ClassNLLCriterion.cu index 6866c5798f7d23..80cd9dd51c500d 100644 --- a/aten/src/THCUNN/generic/ClassNLLCriterion.cu +++ b/aten/src/THCUNN/generic/ClassNLLCriterion.cu @@ -49,12 +49,12 @@ void THNN_(ClassNLLCriterion_updateOutput)( weights = THCTensor_(newContiguous)(state, weights); } - ClassNLLCriterion_updateOutput_no_reduce_kernel + ClassNLLCriterion_updateOutput_no_reduce_kernel <<>>( batch_size, - toDeviceTensor(state, input), + toDeviceTensor(state, input), toDeviceTensor(state, target), - toDeviceTensor(state, output), + toDeviceTensor(state, output), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); @@ -74,14 +74,14 @@ void THNN_(ClassNLLCriterion_updateOutput)( weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); - real *input_data = THCTensor_(data)(state, input); - real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; + scalar_t *input_data = THCTensor_(data)(state, input); + scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; THCIndex_t *target_data = THCIndexTensor_(data)(state, target); - real *output_data = THCTensor_(data)(state, output); - real *total_weight_data = THCTensor_(data)(state, total_weight); + scalar_t *output_data = THCTensor_(data)(state, output); + scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { - cunn_ClassNLLCriterion_updateOutput_kernel1 + cunn_ClassNLLCriterion_updateOutput_kernel1 <<<1, 1, 0, THCState_getCurrentStream(state)>>>( output_data, total_weight_data, @@ -94,7 +94,7 @@ void THNN_(ClassNLLCriterion_updateOutput)( ); } else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) { - cunn_ClassNLLCriterion_updateOutput_kernel + cunn_ClassNLLCriterion_updateOutput_kernel <<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>( output_data, total_weight_data, @@ -167,12 +167,12 @@ void THNN_(ClassNLLCriterion_updateGradInput)( weights = THCTensor_(newContiguous)(state, weights); } - ClassNLLCriterion_updateGradInput_no_reduce_kernel + ClassNLLCriterion_updateGradInput_no_reduce_kernel <<>>( batch_size, toDeviceTensor(state, target), - toDeviceTensor(state, gradOutput), - toDeviceTensor(state, gradInput), + toDeviceTensor(state, gradOutput), + toDeviceTensor(state, gradInput), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); @@ -191,14 +191,14 @@ void THNN_(ClassNLLCriterion_updateGradInput)( target = THCIndexTensor_(newContiguous)(state, target); THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); - real *gradOutput_data = THCTensor_(data)(state, gradOutput); - real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; - real *gradInput_data = THCTensor_(data)(state, gradInput); + scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); + scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; + scalar_t *gradInput_data = THCTensor_(data)(state, gradInput); THCIndex_t *target_data = THCIndexTensor_(data)(state, target); - real *total_weight_data = THCTensor_(data)(state, total_weight); + scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { - cunn_ClassNLLCriterion_updateGradInput_kernel1 + cunn_ClassNLLCriterion_updateGradInput_kernel1 <<<1, 1, 0, THCState_getCurrentStream(state)>>>( gradInput_data, gradOutput_data, @@ -210,7 +210,7 @@ void THNN_(ClassNLLCriterion_updateGradInput)( ignore_index ); } else { - cunn_ClassNLLCriterion_updateGradInput_kernel + cunn_ClassNLLCriterion_updateGradInput_kernel <<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>( gradInput_data, gradOutput_data, diff --git a/aten/src/THCUNN/generic/Col2Im.cu b/aten/src/THCUNN/generic/Col2Im.cu index c14bb1bb78ef0d..8e87032ff9c8be 100644 --- a/aten/src/THCUNN/generic/Col2Im.cu +++ b/aten/src/THCUNN/generic/Col2Im.cu @@ -93,7 +93,7 @@ void THNN_(Col2Im_updateOutput)( THCTensor_(select)(state, input_n, input, 0, elt); THCTensor_(select)(state, output_n, output, 0, elt); - col2im( + col2im( THCState_getCurrentStream(state), THCTensor_(data)(state, input_n), nOutputPlane, diff --git a/aten/src/THCUNN/generic/DistKLDivCriterion.cu b/aten/src/THCUNN/generic/DistKLDivCriterion.cu index e798285b5868b3..8a02b3caa20db4 100644 --- a/aten/src/THCUNN/generic/DistKLDivCriterion.cu +++ b/aten/src/THCUNN/generic/DistKLDivCriterion.cu @@ -17,8 +17,8 @@ void THNN_(DistKLDivCriterion_updateOutput)( if (reduction == Reduction::None) { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply3(state, input, target, output, - kl_updateOutput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, output, + kl_updateOutput_no_reduce_functor()); return; } @@ -31,9 +31,9 @@ void THNN_(DistKLDivCriterion_updateOutput)( input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus(), kl_functor()); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus(), kl_functor()); if (reduction == Reduction::ElementwiseMean) sum /= size; @@ -41,7 +41,7 @@ void THNN_(DistKLDivCriterion_updateOutput)( THCTensor_(free)(state, input); THCTensor_(free)(state, target); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); } void THNN_(DistKLDivCriterion_updateGradInput)( @@ -62,25 +62,25 @@ void THNN_(DistKLDivCriterion_updateGradInput)( if (reduction == Reduction::None) { THCUNN_check_shape(state, gradOutput, input); - THC_pointwiseApply3(state, target, gradOutput, gradInput, - kl_updateGradInput_no_reduce_functor()); + THC_pointwiseApply3(state, target, gradOutput, gradInput, + kl_updateGradInput_no_reduce_functor()); return; } THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); - real norm = (reduction == Reduction::ElementwiseMean ? ScalarConvert::to(accreal(1)/size) : ScalarConvert::to(1)); + scalar_t norm = (reduction == Reduction::ElementwiseMean ? ScalarConvert::to(accreal(1)/size) : ScalarConvert::to(1)); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, - kl_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0))); + kl_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0))); THCTensor_(free)(state, input); THCTensor_(free)(state, target); diff --git a/aten/src/THCUNN/generic/ELU.cu b/aten/src/THCUNN/generic/ELU.cu index 6f78349110ec35..75710dbd9c81b6 100644 --- a/aten/src/THCUNN/generic/ELU.cu +++ b/aten/src/THCUNN/generic/ELU.cu @@ -14,20 +14,20 @@ void THNN_(ELU_updateOutput)( accreal input_scale, bool inplace) { - real negcoef = ScalarConvert::to(alpha * scale); - real poscoef = ScalarConvert::to(scale * input_scale); - real negiptcoef = ScalarConvert::to(input_scale); + scalar_t negcoef = ScalarConvert::to(alpha * scale); + scalar_t poscoef = ScalarConvert::to(scale * input_scale); + scalar_t negiptcoef = ScalarConvert::to(input_scale); THCUNN_assertSameGPU(state, 2, input, output); if (inplace) { - THC_pointwiseApply1(state, input, ELUupdateOutputIP_functor(negcoef, poscoef, negiptcoef)); + THC_pointwiseApply1(state, input, ELUupdateOutputIP_functor(negcoef, poscoef, negiptcoef)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, ELUupdateOutput_functor(negcoef, poscoef, negiptcoef)); + THC_pointwiseApply2(state, output, input, ELUupdateOutput_functor(negcoef, poscoef, negiptcoef)); } } @@ -41,14 +41,14 @@ void THNN_(ELU_updateGradInput)( accreal scale, accreal input_scale) { - real negcoef = ScalarConvert::to(alpha * scale); - real poscoef = ScalarConvert::to(scale * input_scale); - real negiptcoef = ScalarConvert::to(input_scale); + scalar_t negcoef = ScalarConvert::to(alpha * scale); + scalar_t poscoef = ScalarConvert::to(scale * input_scale); + scalar_t negiptcoef = ScalarConvert::to(input_scale); THCUNN_check_nElement(state, output, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, output); - THC_pointwiseApply3(state, gradInput, output, gradOutput, ELUupdateGradInput_functor(negcoef, poscoef, negiptcoef)); + THC_pointwiseApply3(state, gradInput, output, gradOutput, ELUupdateGradInput_functor(negcoef, poscoef, negiptcoef)); } #endif diff --git a/aten/src/THCUNN/generic/FeatureLPPooling.cu b/aten/src/THCUNN/generic/FeatureLPPooling.cu index 3bf7a224715481..3a8282b0f604eb 100644 --- a/aten/src/THCUNN/generic/FeatureLPPooling.cu +++ b/aten/src/THCUNN/generic/FeatureLPPooling.cu @@ -13,39 +13,39 @@ // [batch dim][feature dim] // [batch dim][feature dim][opt dim 1] // [batch dim][feature dim][opt dim 1][opt dim 2] -THCDeviceTensor +THCDeviceTensor THNN_(FeatureLPPooling_upcast)(THCState* state, THCTensor* t, bool batchMode) { int inputDim = THCTensor_(nDimensionLegacyAll)(state, t); if (inputDim == 1) { // [feature dim] - return toDeviceTensor(state, t). + return toDeviceTensor(state, t). upcastOuter<2>().upcastInner<4>(); } else if (inputDim == 2) { if (batchMode) { // [batch dim][feature dim] - return toDeviceTensor(state, t). + return toDeviceTensor(state, t). upcastInner<4>(); } else { // [feature dim][opt dim 1] - return toDeviceTensor(state, t). + return toDeviceTensor(state, t). upcastOuter<3>().upcastInner<4>(); } } else if (inputDim == 3) { if (batchMode) { // [batch dim][feature dim][opt dim 1] - return toDeviceTensor(state, t). + return toDeviceTensor(state, t). upcastInner<4>(); } else { // [feature dim][opt dim 1][opt dim 2] - return toDeviceTensor(state, t). + return toDeviceTensor(state, t). upcastOuter<4>(); } } else { // inputDim == 4 // [batch dim][feature dim][opt dim 1][opt dim 2] THAssert(batchMode); - return toDeviceTensor(state, t); + return toDeviceTensor(state, t); } } @@ -162,8 +162,8 @@ void THNN_(FeatureLPPooling_updateOutput)(THCState* state, THArgCheck(THCTensor_canUse32BitIndexMath(state, inputTH), 2, "input tensor must fit into 32-bit index math"); - THCDeviceTensor input; - THCDeviceTensor output; + THCDeviceTensor input; + THCDeviceTensor output; input = THNN_(FeatureLPPooling_upcast)(state, inputTH, batchMode); @@ -217,10 +217,10 @@ void THNN_(FeatureLPPooling_updateGradInput)(THCState* state, "input must be 1-3 dimensions for non-batch mode"); } - THCDeviceTensor gradOutput; - THCDeviceTensor input; - THCDeviceTensor output; - THCDeviceTensor gradInput; + THCDeviceTensor gradOutput; + THCDeviceTensor input; + THCDeviceTensor output; + THCDeviceTensor gradInput; input = THNN_(FeatureLPPooling_upcast)(state, inputTH, batchMode); diff --git a/aten/src/THCUNN/generic/GatedLinearUnit.cu b/aten/src/THCUNN/generic/GatedLinearUnit.cu index 2381917ec9e1eb..0aa483b83281f0 100644 --- a/aten/src/THCUNN/generic/GatedLinearUnit.cu +++ b/aten/src/THCUNN/generic/GatedLinearUnit.cu @@ -25,7 +25,7 @@ void THNN_(GatedLinear_updateOutput)( THCTensor *secondHalf = THCTensor_(newNarrow)(state, input, dim, inputSize, inputSize); // x = x1:cmul( sigmoid(x2) ) - THC_pointwiseApply3(state, output, secondHalf, firstHalf, gatedLinearCSigMul_functor()); + THC_pointwiseApply3(state, output, secondHalf, firstHalf, gatedLinearCSigMul_functor()); THCTensor_(free)(state, firstHalf); THCTensor_(free)(state, secondHalf); @@ -50,7 +50,7 @@ void THNN_(GatedLinear_updateGradInput)( THCTensor *gradInputfirstHalf = THCTensor_(newNarrow)(state, gradInput, dim, 0, inputSize); const int64_t stride_i = THCTensor_(stride)(state, input, dim) * inputSize; const int64_t stride_gI = THCTensor_(stride)(state, gradInput, dim) * inputSize; - THC_pointwiseApply3(state, gradInputfirstHalf, gradOutput, firstHalf, gatedLinearDerivative(stride_i, stride_gI)); + THC_pointwiseApply3(state, gradInputfirstHalf, gradOutput, firstHalf, gatedLinearDerivative(stride_i, stride_gI)); THCTensor_(free)(state, firstHalf); THCTensor_(free)(state, gradInputfirstHalf); } diff --git a/aten/src/THCUNN/generic/HardTanh.cu b/aten/src/THCUNN/generic/HardTanh.cu index 18195b7e06ece7..184fe4595bfb25 100644 --- a/aten/src/THCUNN/generic/HardTanh.cu +++ b/aten/src/THCUNN/generic/HardTanh.cu @@ -12,20 +12,20 @@ void THNN_(HardTanh_updateOutput)( accreal max_val_, bool inplace) { - real min_val = ScalarConvert::to(min_val_); - real max_val = ScalarConvert::to(max_val_); + scalar_t min_val = ScalarConvert::to(min_val_); + scalar_t max_val = ScalarConvert::to(max_val_); THCUNN_assertSameGPU(state, 2, input, output); if(inplace) { THCTensor_(set)(state, output, input); - THC_pointwiseApply1(state, output, hardtanhupdateOutput_functor(min_val, max_val)); + THC_pointwiseApply1(state, output, hardtanhupdateOutput_functor(min_val, max_val)); } else { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, - hardtanhupdateOutput_functor(min_val, max_val)); + THC_pointwiseApply2(state, output, input, + hardtanhupdateOutput_functor(min_val, max_val)); } } @@ -38,8 +38,8 @@ void THNN_(HardTanh_updateGradInput)( accreal max_val_, bool inplace) { - real min_val = ScalarConvert::to(min_val_); - real max_val = ScalarConvert::to(max_val_); + scalar_t min_val = ScalarConvert::to(min_val_); + scalar_t max_val = ScalarConvert::to(max_val_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); @@ -47,14 +47,14 @@ void THNN_(HardTanh_updateGradInput)( if (inplace) { THCTensor_(set)(state, gradInput, gradOutput); - THC_pointwiseApply2(state, gradInput, input, - hardtanhupdateGradInput_functor(min_val, max_val)); + THC_pointwiseApply2(state, gradInput, input, + hardtanhupdateGradInput_functor(min_val, max_val)); } else { THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, input, gradOutput, - hardtanhupdateGradInput_functor(min_val, max_val)); + THC_pointwiseApply3(state, gradInput, input, gradOutput, + hardtanhupdateGradInput_functor(min_val, max_val)); } } diff --git a/aten/src/THCUNN/generic/IndexLinear.cu b/aten/src/THCUNN/generic/IndexLinear.cu index 080ada5d8ec5d5..f87b0f42fcc8db 100644 --- a/aten/src/THCUNN/generic/IndexLinear.cu +++ b/aten/src/THCUNN/generic/IndexLinear.cu @@ -51,11 +51,11 @@ void THNN_(IndexLinear_updateOutput)( THCTensor_(resize2d)(state, output, batchSize, outDim); int64_t *keysData = THCudaLongTensor_data (state, keys); - real *valuesData = THCTensor_(data) (state, values); + scalar_t *valuesData = THCTensor_(data) (state, values); int64_t *cumSumSizesData = THCudaLongTensor_data (state, cumSumSizes); - real *biasData = THCTensor_(data) (state, bias); - real *weightData = THCTensor_(data) (state, weight); - real *outData = THCTensor_(data) (state, output); + scalar_t *biasData = THCTensor_(data) (state, bias); + scalar_t *weightData = THCTensor_(data) (state, weight); + scalar_t *outData = THCTensor_(data) (state, output); cudaStream_t stream = THCState_getCurrentStream(state); dim3 threads(THREADS_X, THREADS_Y); @@ -67,18 +67,18 @@ void THNN_(IndexLinear_updateOutput)( dim3 blocks(blocks_x, blocks_y, blocks_z); if (blocks_z > 1) { - THCudaCheck(cudaMemsetAsync(outData, 0, outDim * batchSize * sizeof(real), stream)); + THCudaCheck(cudaMemsetAsync(outData, 0, outDim * batchSize * sizeof(scalar_t), stream)); } - real *normalizedValuesData = NULL; + scalar_t *normalizedValuesData = NULL; if (maxNormalize && train) { THCTensor_(resize1d)(state, normalizedValues, keysSize); normalizedValuesData = THCTensor_(data)(state, normalizedValues); - updateOutput<<>> + updateOutput<<>> (outData, normalizedValuesData, valuesData, cumSumSizesData, keysData, batchSize, outDim, weightData, biasData, weightStride, keysOffset, maxNormalize, nnzPerBlock); } else { - updateOutput<<>> + updateOutput<<>> (outData, normalizedValuesData, valuesData, cumSumSizesData, keysData, batchSize, outDim, weightData, biasData, weightStride, keysOffset, maxNormalize, nnzPerBlock); } @@ -132,21 +132,21 @@ void THNN_(IndexLinear_accGradParameters)( THCTensor_(resize2d)(state, gradWeight, keysSize, outDim * (maxNormalize > 0 ? 2 : 1)); - real *valuesData = THCTensor_(data) (state, values); + scalar_t *valuesData = THCTensor_(data) (state, values); int64_t *cumSumSizesData = THCudaLongTensor_data (state, cumSumSizes); - real *gradOutputData = THCTensor_(data) (state, gradOutput); - real *gradBiasData = THCTensor_(data) (state, gradBias); - real *gradWeightData = THCTensor_(data) (state, gradWeight); + scalar_t *gradOutputData = THCTensor_(data) (state, gradOutput); + scalar_t *gradBiasData = THCTensor_(data) (state, gradBias); + scalar_t *gradWeightData = THCTensor_(data) (state, gradWeight); int64_t gradWeightStride = gradWeight->stride(0); cudaStream_t stream = THCState_getCurrentStream(state); dim3 threads(THREADS_X, THREADS_Y); int blocks_x = divup(outDim, threads.x); - accGradBias<<>> + accGradBias<<>> (gradBiasData, gradOutputData, outDim, batchSize, scale, weightDecay); dim3 blocks(blocks_x, batchSize); - accGradWeight<<>> + accGradWeight<<>> (gradWeightData, gradOutputData, valuesData, cumSumSizesData, outDim, gradWeightStride, scale, weightDecay, maxNormalize); } @@ -188,10 +188,10 @@ void THNN_(IndexLinear_accUpdateGradParameters)( int64_t wDim = weight->size(1); int maxNormalize = wDim - outDim; - real *biasData = THCTensor_(data) (state, bias); - real *weightData = THCTensor_(data) (state, weight); - real *gradOutputData = THCTensor_(data) (state, gradOutput); - real *valuesData = THCTensor_(data) (state, values); + scalar_t *biasData = THCTensor_(data) (state, bias); + scalar_t *weightData = THCTensor_(data) (state, weight); + scalar_t *gradOutputData = THCTensor_(data) (state, gradOutput); + scalar_t *valuesData = THCTensor_(data) (state, values); int64_t *keysData = THCudaLongTensor_data (state, keys); int64_t *cumSumSizesData = THCudaLongTensor_data (state, cumSumSizes); int64_t weightStride = weight->stride(0); @@ -200,7 +200,7 @@ void THNN_(IndexLinear_accUpdateGradParameters)( dim3 threads(THREADS_X, THREADS_Y); int blocks_x = divup(outDim, threads.x); - accGradBias<<>> + accGradBias<<>> (biasData, gradOutputData, outDim, batchSize, scale, weightDecay); int64_t nnzPerRow = divup(keysSize, batchSize); @@ -208,7 +208,7 @@ void THNN_(IndexLinear_accUpdateGradParameters)( dim3 blocks(blocks_x, blocks_y); for (int64_t batchId = 0; batchId < batchSize; batchId++) { - accUpdateWeight<<>> + accUpdateWeight<<>> (weightData, weightStride, gradOutputData, outDim, valuesData, cumSumSizesData, keysData, keysOffset, scale, weightDecay, maxNormalize, batchId); @@ -253,8 +253,8 @@ void THNN_(IndexLinear_updateParameters)( int64_t *keysData = THCudaLongTensor_data (state, runningKeys); int64_t *cumSumSizesData = THCudaLongTensor_data (state, cumSumSizes); - real *gradWeightData = THCTensor_(data) (state, gradWeight); - real *weightData = THCTensor_(data) (state, weight); + scalar_t *gradWeightData = THCTensor_(data) (state, gradWeight); + scalar_t *weightData = THCTensor_(data) (state, weight); dim3 threads(THREADS_X, THREADS_Y); int64_t nnzPerRow = divup(keysSize, batchSize); @@ -264,7 +264,7 @@ void THNN_(IndexLinear_updateParameters)( cudaStream_t stream = THCState_getCurrentStream(state); for (int64_t batchId = 0; batchId < batchSize; batchId++) { - updateWeight<<>> + updateWeight<<>> (weightData, gradWeightData, keysData, cumSumSizesData, outDim, gradWeightStride, weightStride, keysOffset, learningRate, weightDecay, maxNormalize, batchId); diff --git a/aten/src/THCUNN/generic/L1Cost.cu b/aten/src/THCUNN/generic/L1Cost.cu index fd85e61cc74b56..0b0079374ea320 100644 --- a/aten/src/THCUNN/generic/L1Cost.cu +++ b/aten/src/THCUNN/generic/L1Cost.cu @@ -12,12 +12,12 @@ void THNN_(L1Cost_updateOutput)( accreal sum; ptrdiff_t size = THCTensor_(nElement)(state, input); input = THCTensor_(newContiguous)(state, input); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - sum = thrust::transform_reduce(input_data, input_data+size, l1cost_functor(), accreal(0), thrust::plus()); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + sum = thrust::transform_reduce(input_data, input_data+size, l1cost_functor(), accreal(0), thrust::plus()); THCTensor_(free)(state, input); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); } void THNN_(L1Cost_updateGradInput)( @@ -33,10 +33,10 @@ void THNN_(L1Cost_updateGradInput)( input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, gradInput, input); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); - thrust::transform(input_data, input_data+size, gradInput_data, l1cost_updateGradInput_functor()); + thrust::transform(input_data, input_data+size, gradInput_data, l1cost_updateGradInput_functor()); THCTensor_(free)(state, input); } diff --git a/aten/src/THCUNN/generic/LeakyReLU.cu b/aten/src/THCUNN/generic/LeakyReLU.cu index dc920907f612aa..afd12cd27312ed 100644 --- a/aten/src/THCUNN/generic/LeakyReLU.cu +++ b/aten/src/THCUNN/generic/LeakyReLU.cu @@ -11,19 +11,19 @@ void THNN_(LeakyReLU_updateOutput)( accreal negval_, bool inplace) { - real negval = ScalarConvert::to(negval_); + scalar_t negval = ScalarConvert::to(negval_); THCUNN_assertSameGPU(state, 2, input, output); if (inplace) { - THC_pointwiseApply1(state, input, LeakyReLUUpdateOutputIP(negval)); + THC_pointwiseApply1(state, input, LeakyReLUUpdateOutputIP(negval)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, LeakyReLUUpdateOutput(negval)); + THC_pointwiseApply2(state, output, input, LeakyReLUUpdateOutput(negval)); } THCudaCheck(cudaGetLastError()); @@ -37,20 +37,20 @@ void THNN_(LeakyReLU_updateGradInput)( accreal negval_, bool inplace) { - real negval = ScalarConvert::to(negval_); + scalar_t negval = ScalarConvert::to(negval_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput); if (inplace) { - THC_pointwiseApply2(state, gradOutput, input, LeakyReLUUpdateGradInputIP(negval)); + THC_pointwiseApply2(state, gradOutput, input, LeakyReLUUpdateGradInputIP(negval)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput(negval)); + THC_pointwiseApply3(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput(negval)); } THCudaCheck(cudaGetLastError()); diff --git a/aten/src/THCUNN/generic/LogSigmoid.cu b/aten/src/THCUNN/generic/LogSigmoid.cu index 02d55daa62ca3e..bb3250530590b5 100644 --- a/aten/src/THCUNN/generic/LogSigmoid.cu +++ b/aten/src/THCUNN/generic/LogSigmoid.cu @@ -12,7 +12,7 @@ void THNN_(LogSigmoid_updateOutput)( { THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, logSigmoid_updateOutput_functor()); + THC_pointwiseApply2(state, output, input, logSigmoid_updateOutput_functor()); } void THNN_(LogSigmoid_updateGradInput)( @@ -25,7 +25,7 @@ void THNN_(LogSigmoid_updateGradInput)( THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, input, gradOutput, logSigmoid_updateGradInput_functor()); + THC_pointwiseApply3(state, gradInput, input, gradOutput, logSigmoid_updateGradInput_functor()); } #endif diff --git a/aten/src/THCUNN/generic/LookupTable.cu b/aten/src/THCUNN/generic/LookupTable.cu index 4b96c6dbd86fbd..56b9d5405978d3 100644 --- a/aten/src/THCUNN/generic/LookupTable.cu +++ b/aten/src/THCUNN/generic/LookupTable.cu @@ -14,7 +14,7 @@ void THNN_(LookupTable_accGradParameters)( int paddingValue, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, sortedIndices, origIndices); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (!(THCIndexTensor_(isContiguous)(state, input) && @@ -39,7 +39,7 @@ void THNN_(LookupTable_accGradParameters)( dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE)); dim3 block(WARP_SIZE, BLOCKDIMY); - cunn_LookupTable_accGradParametersKernelByFeature + cunn_LookupTable_accGradParametersKernelByFeature <<<<>>( + cunn_LookupTable_accGradParametersKernel<<>>( sortedIndices_data, origIndices_data, THCTensor_(data)(state, gradOutput), @@ -151,7 +151,7 @@ void THNN_(LookupTable_accGradParameters)( #define THREADS 256 #define RUN(NORM, IDXTYPE) \ - calculate_norms_and_renorm \ + calculate_norms_and_renorm \ <<>> \ (weightsRaw, idxRaw, normType, maxNorm, THCTensor_(stride)(state, weight, 0)) @@ -178,7 +178,7 @@ void THNN_(LookupTable_renorm)( THCIndex_t numel = THCIndexTensor_(nElement)(state, idx); - real * weightsRaw = THCTensor_(data)(state, weight); + scalar_t * weightsRaw = THCTensor_(data)(state, weight); THCIndex_t * idxRaw = THCIndexTensor_(data)(state, idx); // get the unique indices diff --git a/aten/src/THCUNN/generic/LookupTableBag.cu b/aten/src/THCUNN/generic/LookupTableBag.cu index 6d8cf777108f95..37d00a06d1e649 100644 --- a/aten/src/THCUNN/generic/LookupTableBag.cu +++ b/aten/src/THCUNN/generic/LookupTableBag.cu @@ -38,7 +38,7 @@ void THNN_(LookupTableBag_updateOutput)( dim3 block = dim3(32, 8); int grid = 1024; - cunn_LookupTableBag_updateOutputKernel<<>>( + cunn_LookupTableBag_updateOutputKernel<<>>( THCIndexTensor_(data)(state, input), THCIndexTensor_(data)(state, offsets), THCTensor_(data)(state, weight), @@ -69,7 +69,7 @@ void THNN_(LookupTableBag_accGradParameters)( THCIndexTensor *bag_size, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, offset2bag, sortedIndices, origIndices); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (!(THCIndexTensor_(isContiguous)(state, input) && @@ -172,7 +172,7 @@ void THNN_(LookupTableBag_accGradParameters)( dim3 grid(THCCeilDiv(numel, (ptrdiff_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); - cunn_LookupTableBag_accGradParametersKernel<<>>( + cunn_LookupTableBag_accGradParametersKernel<<>>( sortedIndices_data, origIndices_data, THCTensor_(data)(state, gradOutput), diff --git a/aten/src/THCUNN/generic/MSECriterion.cu b/aten/src/THCUNN/generic/MSECriterion.cu index e41e741f2592b2..7f1926bd68dd3b 100644 --- a/aten/src/THCUNN/generic/MSECriterion.cu +++ b/aten/src/THCUNN/generic/MSECriterion.cu @@ -21,14 +21,14 @@ void THNN_(MSECriterion_updateOutput)( target = THCTensor_(newContiguous)(state, target); THCThrustAllocator thrustAlloc(state); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); accreal sum = thrust::inner_product( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, (accreal) 0, - thrust::plus(), mse_functor()); + thrust::plus(), mse_functor()); if (reduction == Reduction::ElementwiseMean) sum /= size; @@ -36,17 +36,17 @@ void THNN_(MSECriterion_updateOutput)( THCTensor_(free)(state, input); THCTensor_(free)(state, target); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); return; } THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply3( + THC_pointwiseApply3( state, input, target, output, - mse_updateOutput_functor()); + mse_updateOutput_functor()); } void THNN_(MSECriterion_updateGradInput)( @@ -65,7 +65,7 @@ void THNN_(MSECriterion_updateGradInput)( THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); accreal norm = reduction == Reduction::ElementwiseMean ? (accreal)(2)/size : (accreal)(2); - norm *= ScalarConvert::to(THCTensor_(get1d)(state, gradOutput, 0)); + norm *= ScalarConvert::to(THCTensor_(get1d)(state, gradOutput, 0)); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); @@ -73,16 +73,16 @@ void THNN_(MSECriterion_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCThrustAllocator thrustAlloc(state); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, gradInput_data, - mse_updateGradInput_functor(norm)); + mse_updateGradInput_functor(norm)); THCTensor_(free)(state, input); THCTensor_(free)(state, target); @@ -99,24 +99,24 @@ void THNN_(MSECriterion_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCThrustAllocator thrustAlloc(state); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradOutput_data(THCTensor_(data)(state, gradOutput)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradOutput_data(THCTensor_(data)(state, gradOutput)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, gradInput_data, - mse_updateGradInput_functor(2)); + mse_updateGradInput_functor(2)); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif gradInput_data, gradInput_data+size, gradOutput_data, gradInput_data, - thrust::multiplies()); + thrust::multiplies()); THCTensor_(free)(state, input); THCTensor_(free)(state, target); diff --git a/aten/src/THCUNN/generic/MarginCriterion.cu b/aten/src/THCUNN/generic/MarginCriterion.cu index 221f9d9e313aad..9273f5a478ff8b 100644 --- a/aten/src/THCUNN/generic/MarginCriterion.cu +++ b/aten/src/THCUNN/generic/MarginCriterion.cu @@ -10,7 +10,7 @@ void THNN_(MarginCriterion_updateOutput)( bool sizeAverage, accreal margin_) { - real margin = ScalarConvert::to(margin_); + scalar_t margin = ScalarConvert::to(margin_); THCUNN_check_nElement(state, input, target); THCUNN_check_dim_size(state, output, 1, 0, 1); THCUNN_assertSameGPU(state, 2, input, target); @@ -20,10 +20,10 @@ void THNN_(MarginCriterion_updateOutput)( input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); accreal sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus(), - margin_functor(ScalarConvert::to(margin))); + margin_functor(ScalarConvert::to(margin))); if (sizeAverage) sum /= size; @@ -31,7 +31,7 @@ void THNN_(MarginCriterion_updateOutput)( THCTensor_(free)(state, input); THCTensor_(free)(state, target); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); } @@ -43,7 +43,7 @@ void THNN_(MarginCriterion_updateGradInput)( bool sizeAverage, accreal margin_) { - real margin = ScalarConvert::to(margin_); + scalar_t margin = ScalarConvert::to(margin_); THCUNN_check_nElement(state, input, target); THCUNN_assertSameGPU(state, 3, input, target, gradInput); @@ -56,12 +56,12 @@ void THNN_(MarginCriterion_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, - margin_updateGradInput_functor(ScalarConvert::to(margin), norm)); + margin_updateGradInput_functor(ScalarConvert::to(margin), norm)); THCTensor_(free)(state, input); THCTensor_(free)(state, target); diff --git a/aten/src/THCUNN/generic/MultiLabelMarginCriterion.cu b/aten/src/THCUNN/generic/MultiLabelMarginCriterion.cu index 510a8230d74798..2e2a3d56ea8894 100644 --- a/aten/src/THCUNN/generic/MultiLabelMarginCriterion.cu +++ b/aten/src/THCUNN/generic/MultiLabelMarginCriterion.cu @@ -26,7 +26,7 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); - cunn_MultiLabelMarginCriterion_updateOutput_kernel + cunn_MultiLabelMarginCriterion_updateOutput_kernel <<>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), @@ -52,7 +52,7 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0)); THCTensor_(resize1d)(state, output, 1); - cunn_MultiLabelMarginCriterion_updateOutput_kernel + cunn_MultiLabelMarginCriterion_updateOutput_kernel <<>>( THCTensor_(data)(state, output_tmp), THCTensor_(data)(state, input), @@ -62,14 +62,14 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( reduction == Reduction::ElementwiseMean ); THCudaCheck(cudaGetLastError()); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(THCTensor_(sumall)(state, output_tmp))); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(THCTensor_(sumall)(state, output_tmp))); THCTensor_(free)(state, output_tmp); } else { THCTensor_(resize1d)(state, output, input->size(0)); - cunn_MultiLabelMarginCriterion_updateOutput_kernel + cunn_MultiLabelMarginCriterion_updateOutput_kernel <<>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), @@ -114,7 +114,7 @@ void THNN_(MultiLabelMarginCriterion_updateGradInput)( dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); - cunn_MultiLabelMarginCriterion_updateGradInput_kernel + cunn_MultiLabelMarginCriterion_updateGradInput_kernel <<>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), @@ -137,7 +137,7 @@ void THNN_(MultiLabelMarginCriterion_updateGradInput)( dim3 blocks(gradInput->size(0)); dim3 threads(MULTILABELMARGIN_THREADS); - cunn_MultiLabelMarginCriterion_updateGradInput_kernel + cunn_MultiLabelMarginCriterion_updateGradInput_kernel <<>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), diff --git a/aten/src/THCUNN/generic/MultiMarginCriterion.cu b/aten/src/THCUNN/generic/MultiMarginCriterion.cu index 65bd6cdec850bb..349b72ffcfa69c 100644 --- a/aten/src/THCUNN/generic/MultiMarginCriterion.cu +++ b/aten/src/THCUNN/generic/MultiMarginCriterion.cu @@ -13,7 +13,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( THCTensor *weights, accreal margin_) { - real margin = ScalarConvert::to(margin_); + scalar_t margin = ScalarConvert::to(margin_); THCUNN_assertSameGPU(state, 2, input, target); input = THCTensor_(newContiguous)(state, input); if(weights) @@ -25,7 +25,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( THCTensor_(resize1d)(state, output, 1); if (p == 1) { - cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), @@ -37,7 +37,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( } else if (p == 2) { - cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), @@ -62,7 +62,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( THCTensor_(resize1d)(state, output, input->size(0)); if (p == 1) { - cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), @@ -74,7 +74,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( } else if (p == 2) { - cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), @@ -92,7 +92,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer if (p == 1) { - cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<>>( THCTensor_(data)(state, output_), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), @@ -104,7 +104,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( } else if (p == 2) { - cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<>>( THCTensor_(data)(state, output_), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), @@ -116,7 +116,7 @@ void THNN_(MultiMarginCriterion_updateOutput)( } THCudaCheck(cudaGetLastError()); float sum = THCTensor_(sumall)(state, output_); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); THCTensor_(free)(state, output_); } } @@ -141,7 +141,7 @@ void THNN_(MultiMarginCriterion_updateGradInput)( THCTensor *weights, accreal margin_) { - real margin = ScalarConvert::to(margin_); + scalar_t margin = ScalarConvert::to(margin_); THCUNN_assertSameGPU(state, 3, input, gradInput, target); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); @@ -156,7 +156,7 @@ void THNN_(MultiMarginCriterion_updateGradInput)( if (p == 1) { - cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), @@ -170,7 +170,7 @@ void THNN_(MultiMarginCriterion_updateGradInput)( } else if (p == 2) { - cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), @@ -194,7 +194,7 @@ void THNN_(MultiMarginCriterion_updateGradInput)( if (p == 1) { - cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), @@ -208,7 +208,7 @@ void THNN_(MultiMarginCriterion_updateGradInput)( } else if (p == 2) { - cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal> <<>>( + cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), diff --git a/aten/src/THCUNN/generic/PReLU.cu b/aten/src/THCUNN/generic/PReLU.cu index 5f3fd1ea0b5fab..a20e3c5c2db67d 100644 --- a/aten/src/THCUNN/generic/PReLU.cu +++ b/aten/src/THCUNN/generic/PReLU.cu @@ -12,11 +12,11 @@ void THNN_(PReLU_updateOutput)( int64_t nOutputPlane = THCTensor_(numel)(state, weight); weight = THCTensor_(newContiguous)(state, weight); - real *w = THCTensor_(data)(state, weight); + scalar_t *w = THCTensor_(data)(state, weight); if (nOutputPlane == 1) { - THC_pointwiseApply2(state, output, input, PReLUUpdateOutput(w)); + THC_pointwiseApply2(state, output, input, PReLUUpdateOutput(w)); } else { @@ -57,10 +57,10 @@ void THNN_(PReLU_updateGradInput)( int64_t nOutputPlane = THCTensor_(numel)(state, weight); weight = THCTensor_(newContiguous)(state, weight); - real *w = THCTensor_(data)(state, weight); + scalar_t *w = THCTensor_(data)(state, weight); if (nOutputPlane == 1) { - THC_pointwiseApply3(state, gradInput, gradOutput, input, PReLUUpdateGradInput(w)); + THC_pointwiseApply3(state, gradInput, gradOutput, input, PReLUUpdateGradInput(w)); } else { @@ -100,18 +100,18 @@ void THNN_(PReLU_accGradParameters)( THCTensor *gradWeight, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_check_nElement(state, input, gradOutput); int64_t nOutputPlane = THCTensor_(numel)(state, weight); // use grad input for temporary storage, then call updateGradInput again if (nOutputPlane == 1) { - THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParametersShared()); + THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParametersShared()); // introduces a sync point - real sum = ScalarConvert::to(THCTensor_(sumall)(state, gradInput)); - real w = THCTensor_(get1d)(state, gradWeight, 0); + scalar_t sum = ScalarConvert::to(THCTensor_(sumall)(state, gradInput)); + scalar_t w = THCTensor_(get1d)(state, gradWeight, 0); THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale); // restore gradInput @@ -123,11 +123,11 @@ void THNN_(PReLU_accGradParameters)( if (ndim == 1) { - THC_pointwiseApply3(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1(scale)); + THC_pointwiseApply3(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1(scale)); } else { - THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParameters(scale)); + THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParameters(scale)); THCTensor *gradWeightBuf = THCTensor_(new)(state); THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight); diff --git a/aten/src/THCUNN/generic/RReLU.cu b/aten/src/THCUNN/generic/RReLU.cu index bea7f10a4f75f7..f331e8ea7f937a 100644 --- a/aten/src/THCUNN/generic/RReLU.cu +++ b/aten/src/THCUNN/generic/RReLU.cu @@ -22,8 +22,8 @@ void THNN_(RReLU_updateOutput)( { input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, noise, input); - real *input_data = THCTensor_(data)(state, input); - real *noise_data = THCTensor_(data)(state, noise); + scalar_t *input_data = THCTensor_(data)(state, input); + scalar_t *noise_data = THCTensor_(data)(state, noise); ptrdiff_t n = THCTensor_(nElement)(state, input); if (inplace) { @@ -34,7 +34,7 @@ void THNN_(RReLU_updateOutput)( else { THCTensor_(resizeAs)(state, output, input); - real *output_data = THCTensor_(data)(state, output); + scalar_t *output_data = THCTensor_(data)(state, output); rreluUpdateOutputTrain<<>>( n, gen_states, input_data, noise_data, output_data, lower, upper); } @@ -43,16 +43,16 @@ void THNN_(RReLU_updateOutput)( } else { - const real negSlope = ScalarConvert::to((lower + upper) / 2); + const scalar_t negSlope = ScalarConvert::to((lower + upper) / 2); if (inplace) { - THC_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope)); + THC_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope)); + THC_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope)); } } } @@ -90,16 +90,16 @@ void THNN_(RReLU_updateGradInput)( else { // use constant factor for negative input values - const real negSlope = ScalarConvert::to((lower + upper) / 2); + const scalar_t negSlope = ScalarConvert::to((lower + upper) / 2); if (inplace) { - THC_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope)); + THC_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope)); + THC_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope)); } } diff --git a/aten/src/THCUNN/generic/Sigmoid.cu b/aten/src/THCUNN/generic/Sigmoid.cu index a91a5dd759f2d7..73158219fb4f2b 100644 --- a/aten/src/THCUNN/generic/Sigmoid.cu +++ b/aten/src/THCUNN/generic/Sigmoid.cu @@ -22,7 +22,7 @@ void THNN_(Sigmoid_updateGradInput)( THCUNN_check_nElement(state, output, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, output); - THC_pointwiseApply3(state, gradInput, output, gradOutput, sigmoid_updateGradInput_functor()); + THC_pointwiseApply3(state, gradInput, output, gradOutput, sigmoid_updateGradInput_functor()); } #endif diff --git a/aten/src/THCUNN/generic/SmoothL1Criterion.cu b/aten/src/THCUNN/generic/SmoothL1Criterion.cu index 1760b08362e8cf..20851e1af5bf86 100644 --- a/aten/src/THCUNN/generic/SmoothL1Criterion.cu +++ b/aten/src/THCUNN/generic/SmoothL1Criterion.cu @@ -18,8 +18,8 @@ void THNN_(SmoothL1Criterion_updateOutput)( if (reduction == Reduction::None) { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply3(state, input, target, output, - smoothl1_updateOutput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, output, + smoothl1_updateOutput_no_reduce_functor()); return; } @@ -31,14 +31,14 @@ void THNN_(SmoothL1Criterion_updateOutput)( target = THCTensor_(newContiguous)(state, target); THCThrustAllocator thrustAlloc(state); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); accreal sum = thrust::inner_product( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, (accreal) 0, - thrust::plus(), smoothl1_functor() + thrust::plus(), smoothl1_functor() ); if (reduction == Reduction::ElementwiseMean) @@ -47,7 +47,7 @@ void THNN_(SmoothL1Criterion_updateOutput)( THCTensor_(free)(state, input); THCTensor_(free)(state, target); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); } void THNN_(SmoothL1Criterion_updateGradInput)( @@ -69,8 +69,8 @@ void THNN_(SmoothL1Criterion_updateGradInput)( if (reduction == Reduction::None) { THCUNN_check_shape(state, gradOutput, input); - THC_pointwiseApply3(state, input, target, gradInput, - smoothl1_updateGradInput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, gradInput, + smoothl1_updateGradInput_no_reduce_functor()); THCTensor_(cmul)(state, gradInput, gradInput, gradOutput); return; } @@ -78,22 +78,22 @@ void THNN_(SmoothL1Criterion_updateGradInput)( THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); - real norm = ScalarConvert::to(reduction == Reduction::ElementwiseMean ? accreal(1)/size : accreal(1)); + scalar_t norm = ScalarConvert::to(reduction == Reduction::ElementwiseMean ? accreal(1)/size : accreal(1)); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); THCThrustAllocator thrustAlloc(state); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, gradInput_data, - smoothl1_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0)) + smoothl1_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0)) ); THCTensor_(free)(state, input); diff --git a/aten/src/THCUNN/generic/SoftMarginCriterion.cu b/aten/src/THCUNN/generic/SoftMarginCriterion.cu index 47a43685ce1087..e9267d90e3961f 100644 --- a/aten/src/THCUNN/generic/SoftMarginCriterion.cu +++ b/aten/src/THCUNN/generic/SoftMarginCriterion.cu @@ -14,8 +14,8 @@ void THNN_(SoftMarginCriterion_updateOutput)( if (reduction == Reduction::None) { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply3(state, input, target, output, - softmargin_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, output, + softmargin_no_reduce_functor()); return; } @@ -26,9 +26,9 @@ void THNN_(SoftMarginCriterion_updateOutput)( target = THCTensor_(newContiguous)(state, target); THCTensor_(resize1d)(state, output, 1); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus(), softmargin_functor()); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus(), softmargin_functor()); if (reduction == Reduction::ElementwiseMean) sum /= size; @@ -36,7 +36,7 @@ void THNN_(SoftMarginCriterion_updateOutput)( THCTensor_(free)(state, input); THCTensor_(free)(state, target); - THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); + THCTensor_(set1d)(state, output, 0, ScalarConvert::to(sum)); } void THNN_(SoftMarginCriterion_updateGradInput)( @@ -54,8 +54,8 @@ void THNN_(SoftMarginCriterion_updateGradInput)( if (reduction == Reduction::None) { THCUNN_check_shape(state, gradOutput, input); - THC_pointwiseApply3(state, input, target, gradInput, - softmargin_updateGradInput_no_reduce_functor()); + THC_pointwiseApply3(state, input, target, gradInput, + softmargin_updateGradInput_no_reduce_functor()); THCTensor_(cmul)(state, gradInput, gradInput, gradOutput); return; } @@ -67,12 +67,12 @@ void THNN_(SoftMarginCriterion_updateGradInput)( target = THCTensor_(newContiguous)(state, target); - thrust::device_ptr input_data(THCTensor_(data)(state, input)); - thrust::device_ptr target_data(THCTensor_(data)(state, target)); - thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); + thrust::device_ptr input_data(THCTensor_(data)(state, input)); + thrust::device_ptr target_data(THCTensor_(data)(state, target)); + thrust::device_ptr gradInput_data(THCTensor_(data)(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, - softmargin_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0))); + softmargin_updateGradInput_functor(norm, THCTensor_(get1d)(state, gradOutput, 0))); THCTensor_(free)(state, input); THCTensor_(free)(state, target); diff --git a/aten/src/THCUNN/generic/SoftPlus.cu b/aten/src/THCUNN/generic/SoftPlus.cu index 5154d8d4e2951b..ca3a9ad6c15e84 100644 --- a/aten/src/THCUNN/generic/SoftPlus.cu +++ b/aten/src/THCUNN/generic/SoftPlus.cu @@ -11,11 +11,11 @@ void THNN_(SoftPlus_updateOutput)( accreal beta_, accreal threshold_) { - real beta = ScalarConvert::to(beta_); - real threshold = ScalarConvert::to(threshold_); + scalar_t beta = ScalarConvert::to(beta_); + scalar_t threshold = ScalarConvert::to(threshold_); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, softPlusupdateOutput_functor(threshold, beta)); + THC_pointwiseApply2(state, output, input, softPlusupdateOutput_functor(threshold, beta)); } void THNN_(SoftPlus_updateGradInput)( @@ -27,12 +27,12 @@ void THNN_(SoftPlus_updateGradInput)( accreal beta_, accreal threshold_) { - real beta = ScalarConvert::to(beta_); - real threshold = ScalarConvert::to(threshold_); + scalar_t beta = ScalarConvert::to(beta_); + scalar_t threshold = ScalarConvert::to(threshold_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 4, input, output, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, output); - THC_pointwiseApply3(state, gradInput, output, gradOutput, softPlusupdateGradInput_functor(threshold, beta)); + THC_pointwiseApply3(state, gradInput, output, gradOutput, softPlusupdateGradInput_functor(threshold, beta)); } #endif diff --git a/aten/src/THCUNN/generic/SoftShrink.cu b/aten/src/THCUNN/generic/SoftShrink.cu index 0743f7085d5b52..032b8387f78497 100644 --- a/aten/src/THCUNN/generic/SoftShrink.cu +++ b/aten/src/THCUNN/generic/SoftShrink.cu @@ -10,10 +10,10 @@ void THNN_(SoftShrink_updateOutput)( THCTensor *output, accreal lambda_) { - real lambda = ScalarConvert::to(lambda_); + scalar_t lambda = ScalarConvert::to(lambda_); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, SoftShrinkUpdateOutput(lambda)); + THC_pointwiseApply2(state, output, input, SoftShrinkUpdateOutput(lambda)); THCudaCheck(cudaGetLastError()); } @@ -24,11 +24,11 @@ void THNN_(SoftShrink_updateGradInput)( THCTensor *gradInput, accreal lambda_) { - real lambda = ScalarConvert::to(lambda_); + scalar_t lambda = ScalarConvert::to(lambda_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, input, gradOutput, SoftShrinkUpdateGradInput(lambda)); + THC_pointwiseApply3(state, gradInput, input, gradOutput, SoftShrinkUpdateGradInput(lambda)); THCudaCheck(cudaGetLastError()); } diff --git a/aten/src/THCUNN/generic/SparseLinear.cu b/aten/src/THCUNN/generic/SparseLinear.cu index 0363dcf0e3996a..0ad89ea711e7d5 100644 --- a/aten/src/THCUNN/generic/SparseLinear.cu +++ b/aten/src/THCUNN/generic/SparseLinear.cu @@ -87,7 +87,7 @@ void THNN_(SparseLinear_updateOutput)( } // output = W * x - real one = ScalarConvert::to(1); + scalar_t one = ScalarConvert::to(1); cusparseMatDescr_t descr = 0; cusparseCreateMatDescr(&descr); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); @@ -186,7 +186,7 @@ void THNN_(SparseLinear_accGradParameters)( THCTensor_(copy)(state, buf, tgradOutput); THCTensor_(free)(state, tgradOutput); - real one = ScalarConvert::to(1); + scalar_t one = ScalarConvert::to(1); cusparseMatDescr_t descr = 0; cusparseCreateMatDescr(&descr); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); diff --git a/aten/src/THCUNN/generic/SpatialAdaptiveAveragePooling.cu b/aten/src/THCUNN/generic/SpatialAdaptiveAveragePooling.cu index b25bbb94e4ea5f..32d136b6247d2a 100644 --- a/aten/src/THCUNN/generic/SpatialAdaptiveAveragePooling.cu +++ b/aten/src/THCUNN/generic/SpatialAdaptiveAveragePooling.cu @@ -15,8 +15,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( { THCUNN_assertSameGPU(state, 2, input, output); - real *output_data; - real *input_data; + scalar_t *output_data; + scalar_t *input_data; THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); @@ -89,8 +89,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; gradOutput = THCTensor_(newContiguous)(state, gradOutput); diff --git a/aten/src/THCUNN/generic/SpatialAdaptiveMaxPooling.cu b/aten/src/THCUNN/generic/SpatialAdaptiveMaxPooling.cu index 6ca5c9b42b827d..5e2d240fcc023e 100644 --- a/aten/src/THCUNN/generic/SpatialAdaptiveMaxPooling.cu +++ b/aten/src/THCUNN/generic/SpatialAdaptiveMaxPooling.cu @@ -17,8 +17,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( THCUNN_assertSameGPU(state, 3, input, output, indices); THCIndex_t *indices_data; - real *output_data; - real *input_data; + scalar_t *output_data; + scalar_t *input_data; THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); @@ -101,8 +101,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput); THCIndex_t *indices_data; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; gradOutput = THCTensor_(newContiguous)(state, gradOutput); diff --git a/aten/src/THCUNN/generic/SpatialAveragePooling.cu b/aten/src/THCUNN/generic/SpatialAveragePooling.cu index 7811acc4247666..18268c72615096 100644 --- a/aten/src/THCUNN/generic/SpatialAveragePooling.cu +++ b/aten/src/THCUNN/generic/SpatialAveragePooling.cu @@ -120,22 +120,22 @@ void THNN_(SpatialAveragePooling_updateOutput)( } input = THCTensor_(newContiguous)(state, input); - real* input_data = THCTensor_(data)(state, input); + scalar_t* input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols); - real* output_data = THCTensor_(data)(state, output); + scalar_t* output_data = THCTensor_(data)(state, output); int count = THCTensor_(nElement)(state, output); if(count_include_pad) - AvePoolForward + AvePoolForward <<>>( count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, output_data); else - AvePoolForward + AvePoolForward <<>>( count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, @@ -212,7 +212,7 @@ void THNN_(SpatialAveragePooling_updateGradInput)( int count = THCTensor_(nElement)(state, input); if(count_include_pad) - AvePoolBackward + AvePoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCTensor_(data)(state, gradOutput), @@ -220,7 +220,7 @@ void THNN_(SpatialAveragePooling_updateGradInput)( kH, kW, dH, dW, padH, padW, THCTensor_(data)(state, gradInput)); else - AvePoolBackward + AvePoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCTensor_(data)(state, gradOutput), diff --git a/aten/src/THCUNN/generic/SpatialClassNLLCriterion.cu b/aten/src/THCUNN/generic/SpatialClassNLLCriterion.cu index ae211774a580db..2c7aff0b9c6829 100644 --- a/aten/src/THCUNN/generic/SpatialClassNLLCriterion.cu +++ b/aten/src/THCUNN/generic/SpatialClassNLLCriterion.cu @@ -77,12 +77,12 @@ void THNN_(SpatialClassNLLCriterion_updateOutput)( } int64_t count = batch_size * H * W; - SpatialClassNLLCriterion_updateOutput_no_reduce_kernel + SpatialClassNLLCriterion_updateOutput_no_reduce_kernel <<>>( count, - toDeviceTensor(state, input), + toDeviceTensor(state, input), toDeviceTensor(state, target), - toDeviceTensor(state, output), + toDeviceTensor(state, output), weights ? THCTensor_(data)(state, weights) : NULL, ignore_index); @@ -96,11 +96,11 @@ void THNN_(SpatialClassNLLCriterion_updateOutput)( weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); - real *input_data = THCTensor_(data)(state, input); - real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; + scalar_t *input_data = THCTensor_(data)(state, input); + scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; THCIndex_t *target_data = THCIndexTensor_(data)(state, target); - real *output_data = THCTensor_(data)(state, output); - real *total_weight_data = THCTensor_(data)(state, total_weight); + scalar_t *output_data = THCTensor_(data)(state, output); + scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0); THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size; @@ -108,10 +108,10 @@ void THNN_(SpatialClassNLLCriterion_updateOutput)( blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; - THCTensor_(fill)(state, output, ScalarConvert::to(0)); - THCTensor_(fill)(state, total_weight, ScalarConvert::to(0)); + THCTensor_(fill)(state, output, ScalarConvert::to(0)); + THCTensor_(fill)(state, total_weight, ScalarConvert::to(0)); - cunn_SpatialClassNLLCriterion_updateOutput_kernel + cunn_SpatialClassNLLCriterion_updateOutput_kernel <<>>( output_data, total_weight_data, @@ -177,12 +177,12 @@ void THNN_(SpatialClassNLLCriterion_updateGradInput)( } int64_t count = batch_size * H * W; - SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel + SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel <<>>( count, toDeviceTensor(state, target), - toDeviceTensor(state, gradOutput), - toDeviceTensor(state, gradInput), + toDeviceTensor(state, gradOutput), + toDeviceTensor(state, gradInput), weights ? THCTensor_(data)(state, weights) : NULL, ignore_index); @@ -196,11 +196,11 @@ void THNN_(SpatialClassNLLCriterion_updateGradInput)( weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); - real *gradOutput_data = THCTensor_(data)(state, gradOutput); - real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; - real *gradInput_data = THCTensor_(data)(state, gradInput); + scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); + scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; + scalar_t *gradInput_data = THCTensor_(data)(state, gradInput); THCIndex_t *target_data = THCIndexTensor_(data)(state, target); - real *total_weight_data = THCTensor_(data)(state, total_weight); + scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0); THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size; diff --git a/aten/src/THCUNN/generic/SpatialConvolutionLocal.cu b/aten/src/THCUNN/generic/SpatialConvolutionLocal.cu index 8c1c15ddc0abfd..e917bc767b4b3a 100644 --- a/aten/src/THCUNN/generic/SpatialConvolutionLocal.cu +++ b/aten/src/THCUNN/generic/SpatialConvolutionLocal.cu @@ -154,8 +154,8 @@ void THNN_(SpatialConvolutionLocal_updateOutput)( // weight: oH*oW x nOutputPlane x nInputPlane*kH*kW // finput3d: oH*oW x nInputPlane*kH*kW x 1 - THCTensor_(baddbmm)(state, output3d, ScalarConvert::to(1), - output3d, ScalarConvert::to(1), + THCTensor_(baddbmm)(state, output3d, ScalarConvert::to(1), + output3d, ScalarConvert::to(1), weight, finput3d); // output3d: oH*oW x nOutputPlane x 1 @@ -259,13 +259,13 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)( // weight: oH*oW x nInputPlane*kH*kW x nOutputPlane // gradOutput3d: oH*oW x nOutputPlane x 1 THCTensor_(baddbmm)(state, fgradInput3d, - ScalarConvert::to(0), - fgradInput3d, ScalarConvert::to(1), + ScalarConvert::to(0), + fgradInput3d, ScalarConvert::to(1), tweight, gradOutput3d); // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1 // Unpack columns back into input: - col2im( + col2im( THCState_getCurrentStream(state), THCTensor_(data)(state, fgradInput_n), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, @@ -312,7 +312,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)( int64_t outputWidth, int64_t outputHeight, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, gradBias, finput); @@ -379,7 +379,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)( // gradOutput3d: oH*oW x nOutputPlane x 1 // finput3d: oH*oW x 1 x kW*kH*nInputPlane - THCTensor_(baddbmm)(state, gradWeight, ScalarConvert::to(1), + THCTensor_(baddbmm)(state, gradWeight, ScalarConvert::to(1), gradWeight, scale, gradOutput3d, finput3d); // gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane diff --git a/aten/src/THCUNN/generic/SpatialConvolutionMM.cu b/aten/src/THCUNN/generic/SpatialConvolutionMM.cu index 7860404b685f52..616e9db75b650a 100644 --- a/aten/src/THCUNN/generic/SpatialConvolutionMM.cu +++ b/aten/src/THCUNN/generic/SpatialConvolutionMM.cu @@ -146,7 +146,7 @@ void THNN_(SpatialConvolutionMM_updateOutput)( if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Helpers @@ -178,10 +178,10 @@ void THNN_(SpatialConvolutionMM_updateOutput)( state, 't', 'n', n_, m_, k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, output_n), n_ ); } else { @@ -215,10 +215,10 @@ void THNN_(SpatialConvolutionMM_updateOutput)( state, 'n', 'n', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, columns), n, THCTensor_(data)(state, weight), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, output_n), n ); } @@ -322,15 +322,15 @@ void THNN_(SpatialConvolutionMM_updateGradInput)( state, 'n', 't', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradOutput_n), n, THCTensor_(data)(state, weight), m, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, gradColumns), n ); // Unpack columns back into input: - col2im( + col2im( THCState_getCurrentStream(state), THCTensor_(data)(state, gradColumns), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, @@ -368,7 +368,7 @@ void THNN_(SpatialConvolutionMM_accGradParameters)( int padW, int padH, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, gradBias, columns, ones); if (gradWeight) { THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous"); @@ -416,7 +416,7 @@ void THNN_(SpatialConvolutionMM_accGradParameters)( if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Resize temporary columns @@ -466,7 +466,7 @@ void THNN_(SpatialConvolutionMM_accGradParameters)( scale, THCTensor_(data)(state, columns), k, THCTensor_(data)(state, gradOutput_n), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradWeight), n ); } @@ -491,7 +491,7 @@ void THNN_(SpatialConvolutionMM_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), 1 ); #endif @@ -503,7 +503,7 @@ void THNN_(SpatialConvolutionMM_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), m_ ); #endif diff --git a/aten/src/THCUNN/generic/SpatialCrossMapLRN.cu b/aten/src/THCUNN/generic/SpatialCrossMapLRN.cu index b0e65ed8b8fbab..92834631946069 100644 --- a/aten/src/THCUNN/generic/SpatialCrossMapLRN.cu +++ b/aten/src/THCUNN/generic/SpatialCrossMapLRN.cu @@ -5,9 +5,9 @@ void THNN_(LRNforward)(THCState* state, THCTensor* input, THCTensor* output, THCTensor* scale, int local_size, accreal alpha_, accreal beta_, accreal k_) { - real alpha = ScalarConvert::to(alpha_); - real beta = ScalarConvert::to(beta_); - real k = ScalarConvert::to(k_); + scalar_t alpha = ScalarConvert::to(alpha_); + scalar_t beta = ScalarConvert::to(beta_); + scalar_t k = ScalarConvert::to(k_); THCTensor_(resizeAs)(state, output, input); THCTensor_(resizeAs)(state, scale, input); @@ -34,7 +34,7 @@ void THNN_(LRNforward)(THCState* state, THCTensor* input, THCTensor* output, input = THCTensor_(newContiguous)(state, input); int n_threads = batchSize * imsize_h * imsize_w; - LRNFillScale <<>>( + LRNFillScale <<>>( n_threads, THCTensor_(data)(state, input), batchSize, nInputPlane, imsize_h, imsize_w, local_size, alpha / local_size, k, THCTensor_(data)(state, scale)); n_threads *= nInputPlane; @@ -51,9 +51,9 @@ void THNN_(LRNbackward)(THCState* state, THCTensor* input, THCTensor* output, THCTensor* gradOutput, THCTensor* gradInput, THCTensor* scale, int local_size, accreal alpha_, accreal beta_, accreal k_) { - real alpha = ScalarConvert::to(alpha_); - real beta = ScalarConvert::to(beta_); - real k = ScalarConvert::to(k_); + scalar_t alpha = ScalarConvert::to(alpha_); + scalar_t beta = ScalarConvert::to(beta_); + scalar_t k = ScalarConvert::to(k_); (void) k; THCTensor_(resizeAs)(state, gradInput, input); @@ -80,10 +80,10 @@ void THNN_(LRNbackward)(THCState* state, THCTensor* input, THCTensor* output, gradOutput = THCTensor_(newContiguous)(state, gradOutput); int n_threads = batchSize * imsize_h * imsize_w; - LRNComputeDiff <<>>( + LRNComputeDiff <<>>( n_threads, THCTensor_(data)(state, input), THCTensor_(data)(state, output), THCTensor_(data)(state, scale), THCTensor_(data)(state, gradOutput), batchSize, nInputPlane, imsize_h, imsize_w, - local_size, -beta, ScalarConvert::to(2) * alpha * beta / local_size, + local_size, -beta, ScalarConvert::to(2) * alpha * beta / local_size, THCTensor_(data)(state, gradInput)); THCudaCheck(cudaGetLastError()); diff --git a/aten/src/THCUNN/generic/SpatialDepthwiseConvolution.cu b/aten/src/THCUNN/generic/SpatialDepthwiseConvolution.cu index 546ec2ae3c6185..068cad38b9b263 100644 --- a/aten/src/THCUNN/generic/SpatialDepthwiseConvolution.cu +++ b/aten/src/THCUNN/generic/SpatialDepthwiseConvolution.cu @@ -53,12 +53,12 @@ void THNN_(SpatialDepthwiseConvolution_updateOutput)( // Create THCDeviceTensor // Kernel currently relies upon all the Tensors to be contiguous, but we made // them contiguous above - THCDeviceTensor dInput = toDeviceTensor(state, input); - THCDeviceTensor dWeight = toDeviceTensor(state, weight); - THCDeviceTensor dOutput = toDeviceTensor(state, output); - THCDeviceTensor dBias; + THCDeviceTensor dInput = toDeviceTensor(state, input); + THCDeviceTensor dWeight = toDeviceTensor(state, weight); + THCDeviceTensor dOutput = toDeviceTensor(state, output); + THCDeviceTensor dBias; if (bias) { - dBias = toDeviceTensor(state, bias); + dBias = toDeviceTensor(state, bias); } int inputChannels = input->size(1); @@ -70,17 +70,17 @@ void THNN_(SpatialDepthwiseConvolution_updateOutput)( dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); if (kW == 3 && kH == 3) { - spatialDepthwiseConvolutionUpdateOutput<<>>( + spatialDepthwiseConvolutionUpdateOutput<<>>( dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (kW == 1 && kH == 1) { - spatialDepthwiseConvolutionUpdateOutput<<>>( + spatialDepthwiseConvolutionUpdateOutput<<>>( dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { - spatialDepthwiseConvolutionUpdateOutput<<>>( + spatialDepthwiseConvolutionUpdateOutput<<>>( dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); @@ -130,9 +130,9 @@ void THNN_(SpatialDepthwiseConvolution_updateGradInput)( int depthwiseMultiplier = outputChannels / inputChannels; - THCDeviceTensor dGradOutput = toDeviceTensor(state, gradOutput); - THCDeviceTensor dGradInput = toDeviceTensor(state, gradInput); - THCDeviceTensor dWeight = toDeviceTensor(state, weight); + THCDeviceTensor dGradOutput = toDeviceTensor(state, gradOutput); + THCDeviceTensor dGradInput = toDeviceTensor(state, gradInput); + THCDeviceTensor dWeight = toDeviceTensor(state, weight); // Kernel currently relies upon all the Tensors to be contiguous THAssert(dGradOutput.isContiguous()); @@ -146,43 +146,43 @@ void THNN_(SpatialDepthwiseConvolution_updateGradInput)( dim3 block(CUDA_NUM_THREADS); if (kW == 3 && kH == 3) if (dW == 1 && dH == 1){ - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (kW == 1 && kH == 1) if (dW == 1 && dH == 1){ - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 1 && dH == 1){ - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { - spatialDepthwiseConvolutionUpdateGradInput<<>>( + spatialDepthwiseConvolutionUpdateGradInput<<>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } @@ -225,9 +225,9 @@ void THNN_(SpatialDepthwiseConvolution_accGradParameters)( int depthwiseMultiplier = outputChannels / inputChannels; - THCDeviceTensor dGradOutput = toDeviceTensor(state, gradOutput); - THCDeviceTensor dInput = toDeviceTensor(state, input); - THCDeviceTensor dGradWeight = toDeviceTensor(state, gradWeight); + THCDeviceTensor dGradOutput = toDeviceTensor(state, gradOutput); + THCDeviceTensor dInput = toDeviceTensor(state, input); + THCDeviceTensor dGradWeight = toDeviceTensor(state, gradWeight); // Kernel currently relies upon all the Tensors to be contiguous THAssert(dGradOutput.isContiguous()); @@ -244,7 +244,7 @@ void THNN_(SpatialDepthwiseConvolution_accGradParameters)( dim3 block(getGradParamsNumThreads(batchSize)); int smem = block.x * sizeof(accreal); - spatialDepthwiseConvolutionAccGradParameters<<>>( + spatialDepthwiseConvolutionAccGradParameters<<>>( dGradOutput, dInput, dGradWeight, batchSize, inputChannels, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); diff --git a/aten/src/THCUNN/generic/SpatialDilatedConvolution.cu b/aten/src/THCUNN/generic/SpatialDilatedConvolution.cu index 4225583735460e..34ecf3ea094e18 100644 --- a/aten/src/THCUNN/generic/SpatialDilatedConvolution.cu +++ b/aten/src/THCUNN/generic/SpatialDilatedConvolution.cu @@ -130,7 +130,7 @@ void THNN_(SpatialDilatedConvolution_updateOutput)( if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Helpers @@ -162,10 +162,10 @@ void THNN_(SpatialDilatedConvolution_updateOutput)( state, 't', 'n', n_, m_, k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, output_n), n_ ); } else { @@ -200,10 +200,10 @@ void THNN_(SpatialDilatedConvolution_updateOutput)( state, 'n', 'n', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, columns), n, THCTensor_(data)(state, weight), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, output_n), n ); } @@ -298,15 +298,15 @@ void THNN_(SpatialDilatedConvolution_updateGradInput)( state, 'n', 't', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradOutput_n), n, THCTensor_(data)(state, weight), m, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, gradColumns), n ); // Unpack columns back into input: - col2im( + col2im( THCState_getCurrentStream(state), THCTensor_(data)(state, gradColumns), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, @@ -345,7 +345,7 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( int dilationW, int dilationH, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones); if (gradBias) { THCUNN_assertSameGPU(state, 2, gradWeight, gradBias); @@ -387,7 +387,7 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Resize temporary columns @@ -438,7 +438,7 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, columns), k, THCTensor_(data)(state, gradOutput_n), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradWeight), n ); } @@ -463,7 +463,7 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), 1 ); #endif @@ -475,7 +475,7 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), m_ ); #endif diff --git a/aten/src/THCUNN/generic/SpatialDilatedMaxPooling.cu b/aten/src/THCUNN/generic/SpatialDilatedMaxPooling.cu index c1a0e9a7d425f7..a82ed9542990f1 100644 --- a/aten/src/THCUNN/generic/SpatialDilatedMaxPooling.cu +++ b/aten/src/THCUNN/generic/SpatialDilatedMaxPooling.cu @@ -135,17 +135,17 @@ void THNN_(SpatialDilatedMaxPooling_updateOutput)( } input = THCTensor_(newContiguous)(state, input); - real* input_data = THCTensor_(data)(state, input); + scalar_t* input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols); THCUNN_resizeAs_indices(state, indices, output); THCIndex_t* indices_data = THCIndexTensor_(data)(state, indices); - real* output_data = THCTensor_(data)(state, output); + scalar_t* output_data = THCTensor_(data)(state, output); int count = THCTensor_(nElement)(state, output); - MaxPoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> + MaxPoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); @@ -227,7 +227,7 @@ void THNN_(SpatialDilatedMaxPooling_updateGradInput)( uint64_t maxGridZ = THCState_getCurrentDeviceProperties(state)->maxGridSize[2]; if (maxGridY < grid.y) grid.y = maxGridY; if (maxGridZ < grid.z) grid.z = maxGridZ; - MaxPoolBackward <<< grid, BACKWARD_THREADS, 0, THCState_getCurrentStream(state) >>> + MaxPoolBackward <<< grid, BACKWARD_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCTensor_(data)(state, gradOutput), THCIndexTensor_(data)(state, indices), diff --git a/aten/src/THCUNN/generic/SpatialFractionalMaxPooling.cu b/aten/src/THCUNN/generic/SpatialFractionalMaxPooling.cu index a4a880b8808a63..b7b73cbbc8ec6d 100644 --- a/aten/src/THCUNN/generic/SpatialFractionalMaxPooling.cu +++ b/aten/src/THCUNN/generic/SpatialFractionalMaxPooling.cu @@ -39,11 +39,11 @@ void THNN_(SpatialFractionalMaxPooling_updateOutput)( "poolSizeW (%d) too large relative to input width (%d)", poolSizeW, inputW); - THCDeviceTensor devInput; - THCDeviceTensor devOutput; + THCDeviceTensor devInput; + THCDeviceTensor devOutput; THCDeviceTensor devIndices; - THCDeviceTensor devSamples = - toDeviceTensor(state, randomSamples); + THCDeviceTensor devSamples = + toDeviceTensor(state, randomSamples); if (numInputDims == 3) { /* resize output */ @@ -51,16 +51,16 @@ void THNN_(SpatialFractionalMaxPooling_updateOutput)( /* indices will contain the locations for each output point */ THCIndexTensor_(resize3d)(state, indices, numPlanes, outputH, outputW); - devInput = toDeviceTensor(state, input).upcastOuter<4>(); - devOutput = toDeviceTensor(state, output).upcastOuter<4>(); + devInput = toDeviceTensor(state, input).upcastOuter<4>(); + devOutput = toDeviceTensor(state, output).upcastOuter<4>(); devIndices = toDeviceTensor(state, indices).upcastOuter<4>(); } else { THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THCIndexTensor_(resize4d)(state, indices, numBatch, numPlanes, outputH, outputW); - devInput = toDeviceTensor(state, input); - devOutput = toDeviceTensor(state, output); + devInput = toDeviceTensor(state, input); + devOutput = toDeviceTensor(state, output); devIndices = toDeviceTensor(state, indices); } @@ -73,7 +73,7 @@ void THNN_(SpatialFractionalMaxPooling_updateOutput)( dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); #define SFMP_UPDATE_OUTPUT(POOL_W) \ - SpatialFractionalMaxPooling_updateOutput \ + SpatialFractionalMaxPooling_updateOutput \ <<>>( \ devInput, devOutput, devIndices, devSamples, poolSizeW, poolSizeH); @@ -125,18 +125,18 @@ void THNN_(SpatialFractionalMaxPooling_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - THCDeviceTensor devGradInput; - THCDeviceTensor devGradOutput; + THCDeviceTensor devGradInput; + THCDeviceTensor devGradOutput; THCDeviceTensor devIndices; /* backprop */ if (numInputDims == 3) { - devGradInput = toDeviceTensor(state, gradInput).upcastOuter<4>(); - devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<4>(); + devGradInput = toDeviceTensor(state, gradInput).upcastOuter<4>(); + devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<4>(); devIndices = toDeviceTensor(state, indices).upcastOuter<4>(); } else { - devGradInput = toDeviceTensor(state, gradInput); - devGradOutput = toDeviceTensor(state, gradOutput); + devGradInput = toDeviceTensor(state, gradInput); + devGradOutput = toDeviceTensor(state, gradOutput); devIndices = toDeviceTensor(state, indices); } diff --git a/aten/src/THCUNN/generic/SpatialFullDilatedConvolution.cu b/aten/src/THCUNN/generic/SpatialFullDilatedConvolution.cu index 8d039d54068aaf..61993adc8bbd88 100644 --- a/aten/src/THCUNN/generic/SpatialFullDilatedConvolution.cu +++ b/aten/src/THCUNN/generic/SpatialFullDilatedConvolution.cu @@ -128,7 +128,7 @@ void THNN_(SpatialFullDilatedConvolution_updateOutput)( if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Helpers @@ -158,15 +158,15 @@ void THNN_(SpatialFullDilatedConvolution_updateOutput)( state, 'n', 't', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, input_n), n, THCTensor_(data)(state, weight), m, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, columns), n ); // Unpack columns back into input: - col2im( + col2im( THCState_getCurrentStream(state), THCTensor_(data)(state, columns), nOutputPlane, outputHeight, outputWidth, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, @@ -192,10 +192,10 @@ void THNN_(SpatialFullDilatedConvolution_updateOutput)( state, 't', 'n', n_, m_, k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, output_n), n_ ); } @@ -300,10 +300,10 @@ void THNN_(SpatialFullDilatedConvolution_updateGradInput)( state, 'n', 'n', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradColumns), n, THCTensor_(data)(state, weight), k, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, gradInput_n), n ); } @@ -340,7 +340,7 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( int adjW, int adjH, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones); THNN_(SpatialFullDilatedConvolution_shapeCheck) @@ -387,7 +387,7 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Resize temporary columns @@ -437,7 +437,7 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, columns), k, THCTensor_(data)(state, input_n), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradWeight), n ); } @@ -462,7 +462,7 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), 1 ); #endif @@ -474,7 +474,7 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), m_ ); #endif diff --git a/aten/src/THCUNN/generic/SpatialReflectionPadding.cu b/aten/src/THCUNN/generic/SpatialReflectionPadding.cu index 402a1126d7e71d..7d1d23a004fd66 100644 --- a/aten/src/THCUNN/generic/SpatialReflectionPadding.cu +++ b/aten/src/THCUNN/generic/SpatialReflectionPadding.cu @@ -48,19 +48,19 @@ void THNN_(SpatialReflectionPadding_updateOutput)(THCState *state, " Calculated output H: %d W: %d", inputH, inputW, outputH, outputW); - THCDeviceTensor devInput; - THCDeviceTensor devOutput; + THCDeviceTensor devInput; + THCDeviceTensor devOutput; if (numInputDims == 3) { THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW); - devInput = toDeviceTensor(state, input).upcastOuter<4>(); - devOutput = toDeviceTensor(state, output).upcastOuter<4>(); + devInput = toDeviceTensor(state, input).upcastOuter<4>(); + devOutput = toDeviceTensor(state, output).upcastOuter<4>(); } else { THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW); - devInput = toDeviceTensor(state, input); - devOutput = toDeviceTensor(state, output); + devInput = toDeviceTensor(state, input); + devOutput = toDeviceTensor(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); @@ -112,15 +112,15 @@ void THNN_(SpatialReflectionPadding_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - THCDeviceTensor devGradInput; - THCDeviceTensor devGradOutput; + THCDeviceTensor devGradInput; + THCDeviceTensor devGradOutput; if (numInputDims == 3) { - devGradInput = toDeviceTensor(state, gradInput).upcastOuter<4>(); - devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<4>(); + devGradInput = toDeviceTensor(state, gradInput).upcastOuter<4>(); + devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<4>(); } else { - devGradInput = toDeviceTensor(state, gradInput); - devGradOutput = toDeviceTensor(state, gradOutput); + devGradInput = toDeviceTensor(state, gradInput); + devGradOutput = toDeviceTensor(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); diff --git a/aten/src/THCUNN/generic/SpatialReplicationPadding.cu b/aten/src/THCUNN/generic/SpatialReplicationPadding.cu index 0475a1d668adb6..1801fa063936ce 100644 --- a/aten/src/THCUNN/generic/SpatialReplicationPadding.cu +++ b/aten/src/THCUNN/generic/SpatialReplicationPadding.cu @@ -38,19 +38,19 @@ void THNN_(SpatialReplicationPadding_updateOutput)( " Calculated output H: %d W: %d", inputH, inputW, outputH, outputW); - THCDeviceTensor devInput; - THCDeviceTensor devOutput; + THCDeviceTensor devInput; + THCDeviceTensor devOutput; if (numInputDims == 3) { THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW); - devInput = toDeviceTensor(state, input).upcastOuter<4>(); - devOutput = toDeviceTensor(state, output).upcastOuter<4>(); + devInput = toDeviceTensor(state, input).upcastOuter<4>(); + devOutput = toDeviceTensor(state, output).upcastOuter<4>(); } else { THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW); - devInput = toDeviceTensor(state, input); - devOutput = toDeviceTensor(state, output); + devInput = toDeviceTensor(state, input); + devOutput = toDeviceTensor(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); @@ -102,15 +102,15 @@ void THNN_(SpatialReplicationPadding_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - THCDeviceTensor devGradInput; - THCDeviceTensor devGradOutput; + THCDeviceTensor devGradInput; + THCDeviceTensor devGradOutput; if (numInputDims == 3) { - devGradInput = toDeviceTensor(state, gradInput).upcastOuter<4>(); - devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<4>(); + devGradInput = toDeviceTensor(state, gradInput).upcastOuter<4>(); + devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<4>(); } else { - devGradInput = toDeviceTensor(state, gradInput); - devGradOutput = toDeviceTensor(state, gradOutput); + devGradInput = toDeviceTensor(state, gradInput); + devGradOutput = toDeviceTensor(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); diff --git a/aten/src/THCUNN/generic/SpatialSubSampling.cu b/aten/src/THCUNN/generic/SpatialSubSampling.cu index d44168218e44c9..e65c570b0b8ebf 100644 --- a/aten/src/THCUNN/generic/SpatialSubSampling.cu +++ b/aten/src/THCUNN/generic/SpatialSubSampling.cu @@ -40,10 +40,10 @@ void THNN_(SpatialSubSampling_updateOutput)( int kW, int kH, int dW, int dH) { - real *weight_data = THCTensor_(data)(state, weight); - real *bias_data = THCTensor_(data)(state, bias); - real *output_data; - real *input_data; + scalar_t *weight_data = THCTensor_(data)(state, weight); + scalar_t *bias_data = THCTensor_(data)(state, bias); + scalar_t *output_data; + scalar_t *input_data; int nInputPlane = THCTensor_(size)(state, weight, 0); @@ -69,7 +69,7 @@ void THNN_(SpatialSubSampling_updateOutput)( dim3 threads(32,8); // run subsample kernel - subsample <<>> ( + subsample <<>> ( input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); THCudaCheck(cudaGetLastError()); @@ -93,7 +93,7 @@ void THNN_(SpatialSubSampling_updateOutput)( dim3 threads(32,8); // run subsample kernel - subsample <<>> ( + subsample <<>> ( input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); THCudaCheck(cudaGetLastError()); @@ -122,10 +122,10 @@ void THNN_(SpatialSubSampling_updateGradInput)( int64_t nInputCols = input->size(2); int64_t nInputRows = input->size(1); - real *weight_data = THCTensor_(data)(state, weight); + scalar_t *weight_data = THCTensor_(data)(state, weight); gradOutput = THCTensor_(newContiguous)(state, gradOutput); - real *gradOutput_data = THCTensor_(data)(state, gradOutput); - real *gradInput_data; + scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); + scalar_t *gradInput_data; THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); @@ -153,10 +153,10 @@ void THNN_(SpatialSubSampling_updateGradInput)( int64_t nInputRows = input->size(2); int64_t nbatch = input->size(0); - real *weight_data = THCTensor_(data)(state, weight); + scalar_t *weight_data = THCTensor_(data)(state, weight); gradOutput = THCTensor_(newContiguous)(state, gradOutput); - real *gradOutput_data = THCTensor_(data)(state, gradOutput); - real *gradInput_data; + scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); + scalar_t *gradInput_data; THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); @@ -202,11 +202,11 @@ void THNN_(SpatialSubSampling_accGradParameters)( int64_t nInputCols = input->size(2); int64_t nInputRows = input->size(1); - real *gradWeight_data = THCTensor_(data)(state, gradWeight); - real *gradBias_data = THCTensor_(data)(state, gradBias); + scalar_t *gradWeight_data = THCTensor_(data)(state, gradWeight); + scalar_t *gradBias_data = THCTensor_(data)(state, gradBias); gradOutput = THCTensor_(newContiguous)(state, gradOutput); - real *gradOutput_data = THCTensor_(data)(state, gradOutput); - real *input_data; + scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); + scalar_t *input_data; input = THCTensor_(newContiguous)(state, input); input_data = THCTensor_(data)(state, input); @@ -216,7 +216,7 @@ void THNN_(SpatialSubSampling_accGradParameters)( dim3 threads(32,8); // run gradweight kernel - subgradweight <<>> ( + subgradweight <<>> ( input_data, gradOutput_data, gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); THCudaCheck(cudaGetLastError()); @@ -225,11 +225,11 @@ void THNN_(SpatialSubSampling_accGradParameters)( int64_t nInputRows = input->size(2); int64_t nbatch = input->size(0); - real *gradWeight_data = THCTensor_(data)(state, gradWeight); - real *gradBias_data = THCTensor_(data)(state, gradBias); + scalar_t *gradWeight_data = THCTensor_(data)(state, gradWeight); + scalar_t *gradBias_data = THCTensor_(data)(state, gradBias); gradOutput = THCTensor_(newContiguous)(state, gradOutput); - real *gradOutput_data = THCTensor_(data)(state, gradOutput); - real *input_data; + scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); + scalar_t *input_data; input = THCTensor_(newContiguous)(state, input); input_data = THCTensor_(data)(state, input); @@ -241,7 +241,7 @@ void THNN_(SpatialSubSampling_accGradParameters)( // run gradweight kernel int64_t sl; for (sl=0; sl <<>> ( + subgradweight <<>> ( input_data + sl*input->stride(0), gradOutput_data + sl*gradOutput->stride(0), gradWeight_data, gradBias_data, diff --git a/aten/src/THCUNN/generic/SpatialUpSamplingBilinear.cu b/aten/src/THCUNN/generic/SpatialUpSamplingBilinear.cu index f9cc0a4452acd2..25a175fbbc2705 100644 --- a/aten/src/THCUNN/generic/SpatialUpSamplingBilinear.cu +++ b/aten/src/THCUNN/generic/SpatialUpSamplingBilinear.cu @@ -52,8 +52,8 @@ void THNN_(SpatialUpSamplingBilinear_updateOutput)( THCTensor_(size)(state, input, 1), outputHeight, outputWidth); THCTensor_(zero)(state, output); - THCDeviceTensor idata = toDeviceTensor(state, input); - THCDeviceTensor odata = toDeviceTensor(state, output); + THCDeviceTensor idata = toDeviceTensor(state, input); + THCDeviceTensor odata = toDeviceTensor(state, output); THAssert(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0); const accreal rheight = linear_upsampling_compute_scale(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale(inputWidth, outputWidth, align_corners); @@ -61,7 +61,7 @@ void THNN_(SpatialUpSamplingBilinear_updateOutput)( const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - caffe_gpu_interp2_kernel << <<>>(num_kernels, rheight, rwidth, align_corners, idata, odata); THCudaCheck(cudaGetLastError()); } @@ -88,15 +88,15 @@ void THNN_(SpatialUpSamplingBilinear_updateGradInput)( THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize4d)(state, gradInput, nbatch, nchannels, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); - THCDeviceTensor data1 = toDeviceTensor(state, gradInput); - THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); + THCDeviceTensor data1 = toDeviceTensor(state, gradInput); + THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); const accreal rheight = linear_upsampling_compute_scale(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - caffe_gpu_interp2_kernel_backward << <<>>(num_kernels, rheight, rwidth, align_corners, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); diff --git a/aten/src/THCUNN/generic/SpatialUpSamplingNearest.cu b/aten/src/THCUNN/generic/SpatialUpSamplingNearest.cu index 022dc72935a5f3..433cbf91343353 100644 --- a/aten/src/THCUNN/generic/SpatialUpSamplingNearest.cu +++ b/aten/src/THCUNN/generic/SpatialUpSamplingNearest.cu @@ -54,13 +54,13 @@ void THNN_(SpatialUpSamplingNearest_updateOutput)( outputWidth); THCTensor_(zero)(state, output); - THCDeviceTensor idata = toDeviceTensor(state, input); - THCDeviceTensor odata = toDeviceTensor(state, output); + THCDeviceTensor idata = toDeviceTensor(state, input); + THCDeviceTensor odata = toDeviceTensor(state, output); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - nearest_neighbor_4d_kernel << <<>>(num_kernels, idata, odata); THCudaCheck(cudaGetLastError()); } @@ -85,14 +85,14 @@ void THNN_(SpatialUpSamplingNearest_updateGradInput)( THCTensor_(resize4d)(state, gradInput, nbatch, nchannels, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); - THCDeviceTensor data1 = toDeviceTensor(state, gradInput); - THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); + THCDeviceTensor data1 = toDeviceTensor(state, gradInput); + THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - nearest_neighbor_4d_kernel_backward << <<>>(num_kernels, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); diff --git a/aten/src/THCUNN/generic/Sqrt.cu b/aten/src/THCUNN/generic/Sqrt.cu index 57a6fc80818e49..7644310e7bbe4e 100644 --- a/aten/src/THCUNN/generic/Sqrt.cu +++ b/aten/src/THCUNN/generic/Sqrt.cu @@ -10,10 +10,10 @@ void THNN_(Sqrt_updateOutput)( THCTensor *output, accreal eps_) { - real eps = ScalarConvert::to(eps_); + scalar_t eps = ScalarConvert::to(eps_); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, sqrtupdateOutput_functor(eps)); + THC_pointwiseApply2(state, output, input, sqrtupdateOutput_functor(eps)); } void THNN_(Sqrt_updateGradInput)( @@ -26,7 +26,7 @@ void THNN_(Sqrt_updateGradInput)( THCUNN_check_shape(state, output, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, output); - THC_pointwiseApply3(state, gradInput, output, gradOutput, sqrtupdateGradInput_functor()); + THC_pointwiseApply3(state, gradInput, output, gradOutput, sqrtupdateGradInput_functor()); } #endif diff --git a/aten/src/THCUNN/generic/Square.cu b/aten/src/THCUNN/generic/Square.cu index 745502bf0f1436..aa085b278fc579 100644 --- a/aten/src/THCUNN/generic/Square.cu +++ b/aten/src/THCUNN/generic/Square.cu @@ -11,7 +11,7 @@ void THNN_(Square_updateOutput)( { THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, squareupdateOutput_functor()); + THC_pointwiseApply2(state, output, input, squareupdateOutput_functor()); } void THNN_(Square_updateGradInput)( @@ -23,7 +23,7 @@ void THNN_(Square_updateGradInput)( THCUNN_check_shape(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, input, gradOutput, squareupdateGradInput_functor()); + THC_pointwiseApply3(state, gradInput, input, gradOutput, squareupdateGradInput_functor()); } #endif diff --git a/aten/src/THCUNN/generic/Tanh.cu b/aten/src/THCUNN/generic/Tanh.cu index 32abd4793cc617..8e23ec0d5cf2d3 100644 --- a/aten/src/THCUNN/generic/Tanh.cu +++ b/aten/src/THCUNN/generic/Tanh.cu @@ -23,7 +23,7 @@ void THNN_(Tanh_updateGradInput)( THCUNN_check_shape(state, output, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, output); - THC_pointwiseApply3(state, gradInput, output, gradOutput, tanh_updateGradInput_functor()); + THC_pointwiseApply3(state, gradInput, output, gradOutput, tanh_updateGradInput_functor()); } #endif diff --git a/aten/src/THCUNN/generic/TemporalConvolution.cu b/aten/src/THCUNN/generic/TemporalConvolution.cu index 886cf07b4e75e9..651118346c2da5 100644 --- a/aten/src/THCUNN/generic/TemporalConvolution.cu +++ b/aten/src/THCUNN/generic/TemporalConvolution.cu @@ -102,7 +102,7 @@ void THNN_(TemporalConvolution_updateOutput)( THCTensor *tweight = THCTensor_(new)(state); THCTensor_(transpose)(state, tweight, weight, 0, 1); - THCTensor_(addmm)(state, outputWindow, ScalarConvert::to(1), outputWindow, ScalarConvert::to(1), inputWindow, tweight); + THCTensor_(addmm)(state, outputWindow, ScalarConvert::to(1), outputWindow, ScalarConvert::to(1), inputWindow, tweight); THCTensor_(free)(state, tweight); } } @@ -150,7 +150,7 @@ void THNN_(TemporalConvolution_updateOutput)( THCTensor *tweight = THCTensor_(new)(state); THCTensor_(transpose)(state, tweight, weight, 0, 1); - THCTensor_(addmm)(state, outputWindow, ScalarConvert::to(1), outputWindow, ScalarConvert::to(1), inputWindow, tweight); + THCTensor_(addmm)(state, outputWindow, ScalarConvert::to(1), outputWindow, ScalarConvert::to(1), inputWindow, tweight); THCTensor_(free)(state, tweight); } } @@ -225,7 +225,7 @@ void THNN_(TemporalConvolution_updateGradInput)( nFrame, inputFrameStride*gradInput->size(1), kW*gradInput->size(1), 1); - THCTensor_(addmm)(state, gradInputWindow, ScalarConvert::to(1), gradInputWindow, ScalarConvert::to(1), gradOutputWindow, weight); + THCTensor_(addmm)(state, gradInputWindow, ScalarConvert::to(1), gradInputWindow, ScalarConvert::to(1), gradOutputWindow, weight); } } else @@ -257,7 +257,7 @@ void THNN_(TemporalConvolution_updateGradInput)( nFrame, inputFrameStride*gradInputSample->size(1), kW*gradInputSample->size(1), 1); - THCTensor_(addmm)(state, gradInputWindow, ScalarConvert::to(1), gradInputWindow, ScalarConvert::to(1), gradOutputWindow, weight); + THCTensor_(addmm)(state, gradInputWindow, ScalarConvert::to(1), gradInputWindow, ScalarConvert::to(1), gradOutputWindow, weight); } } THCTensor_(free)(state, gradOutputSample); @@ -280,7 +280,7 @@ void THNN_(TemporalConvolution_accGradParameters)( int kW, int dW, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); int64_t nInputFrame; int64_t nOutputFrame; @@ -336,7 +336,7 @@ void THNN_(TemporalConvolution_accGradParameters)( THCTensor *tgradOutputWindow = THCTensor_(new)(state); THCTensor_(transpose)(state, tgradOutputWindow, gradOutputWindow, 0, 1); - THCTensor_(addmm)(state, gradWeight, ScalarConvert::to(1), gradWeight, scale, tgradOutputWindow, inputWindow); + THCTensor_(addmm)(state, gradWeight, ScalarConvert::to(1), gradWeight, scale, tgradOutputWindow, inputWindow); THCTensor_(free)(state, tgradOutputWindow); } } @@ -379,7 +379,7 @@ void THNN_(TemporalConvolution_accGradParameters)( THCTensor *tgradOutputWindow = THCTensor_(new)(state); THCTensor_(transpose)(state, tgradOutputWindow, gradOutputWindow, 0, 1); - THCTensor_(addmm)(state, gradWeight, ScalarConvert::to(1), gradWeight, scale, tgradOutputWindow, inputWindow); + THCTensor_(addmm)(state, gradWeight, ScalarConvert::to(1), gradWeight, scale, tgradOutputWindow, inputWindow); THCTensor_(free)(state, tgradOutputWindow); } } diff --git a/aten/src/THCUNN/generic/TemporalMaxPooling.cu b/aten/src/THCUNN/generic/TemporalMaxPooling.cu index a950aa730afb59..3889c74dddc2e9 100644 --- a/aten/src/THCUNN/generic/TemporalMaxPooling.cu +++ b/aten/src/THCUNN/generic/TemporalMaxPooling.cu @@ -61,8 +61,8 @@ void THNN_(TemporalMaxPooling_updateOutput)( int output_w; int nthreads; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THCIndex_t *indices_data; THCUNN_assertSameGPU(state, 3, input, output, indices); @@ -133,8 +133,8 @@ void THNN_(TemporalMaxPooling_updateGradInput)( int output_w; int nthreads; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THCIndex_t *indices_data; THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, indices); diff --git a/aten/src/THCUNN/generic/TemporalReflectionPadding.cu b/aten/src/THCUNN/generic/TemporalReflectionPadding.cu index 310f22d03e5dfa..908d3a76b8f770 100644 --- a/aten/src/THCUNN/generic/TemporalReflectionPadding.cu +++ b/aten/src/THCUNN/generic/TemporalReflectionPadding.cu @@ -38,19 +38,19 @@ void THNN_(TemporalReflectionPadding_updateOutput)(THCState *state, " Calculated output W: %d", inputW, outputW); - THCDeviceTensor devInput; - THCDeviceTensor devOutput; + THCDeviceTensor devInput; + THCDeviceTensor devOutput; if (numInputDims == 2) { THCTensor_(resize2d)(state, output, numPlanes, outputW); - devInput = toDeviceTensor(state, input).upcastOuter<3>(); - devOutput = toDeviceTensor(state, output).upcastOuter<3>(); + devInput = toDeviceTensor(state, input).upcastOuter<3>(); + devOutput = toDeviceTensor(state, output).upcastOuter<3>(); } else { THCTensor_(resize3d)(state, output, numBatch, numPlanes, outputW); - devInput = toDeviceTensor(state, input); - devOutput = toDeviceTensor(state, output); + devInput = toDeviceTensor(state, input); + devOutput = toDeviceTensor(state, output); } int outputPlaneSize = devOutput.getSize(2); @@ -94,15 +94,15 @@ void THNN_(TemporalReflectionPadding_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - THCDeviceTensor devGradInput; - THCDeviceTensor devGradOutput; + THCDeviceTensor devGradInput; + THCDeviceTensor devGradOutput; if (numInputDims == 2) { - devGradInput = toDeviceTensor(state, gradInput).upcastOuter<3>(); - devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<3>(); + devGradInput = toDeviceTensor(state, gradInput).upcastOuter<3>(); + devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<3>(); } else { - devGradInput = toDeviceTensor(state, gradInput); - devGradOutput = toDeviceTensor(state, gradOutput); + devGradInput = toDeviceTensor(state, gradInput); + devGradOutput = toDeviceTensor(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2); diff --git a/aten/src/THCUNN/generic/TemporalReplicationPadding.cu b/aten/src/THCUNN/generic/TemporalReplicationPadding.cu index e9e560a765606f..88ca5f80c4a071 100644 --- a/aten/src/THCUNN/generic/TemporalReplicationPadding.cu +++ b/aten/src/THCUNN/generic/TemporalReplicationPadding.cu @@ -33,19 +33,19 @@ void THNN_(TemporalReplicationPadding_updateOutput)( " Calculated output W: %d", inputW, outputW); - THCDeviceTensor devInput; - THCDeviceTensor devOutput; + THCDeviceTensor devInput; + THCDeviceTensor devOutput; if (numInputDims == 2) { THCTensor_(resize2d)(state, output, numPlanes, outputW); - devInput = toDeviceTensor(state, input).upcastOuter<3>(); - devOutput = toDeviceTensor(state, output).upcastOuter<3>(); + devInput = toDeviceTensor(state, input).upcastOuter<3>(); + devOutput = toDeviceTensor(state, output).upcastOuter<3>(); } else { THCTensor_(resize3d)(state, output, numBatch, numPlanes, outputW); - devInput = toDeviceTensor(state, input); - devOutput = toDeviceTensor(state, output); + devInput = toDeviceTensor(state, input); + devOutput = toDeviceTensor(state, output); } int outputPlaneSize = devOutput.getSize(2); @@ -89,15 +89,15 @@ void THNN_(TemporalReplicationPadding_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - THCDeviceTensor devGradInput; - THCDeviceTensor devGradOutput; + THCDeviceTensor devGradInput; + THCDeviceTensor devGradOutput; if (numInputDims == 2) { - devGradInput = toDeviceTensor(state, gradInput).upcastOuter<3>(); - devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<3>(); + devGradInput = toDeviceTensor(state, gradInput).upcastOuter<3>(); + devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<3>(); } else { - devGradInput = toDeviceTensor(state, gradInput); - devGradOutput = toDeviceTensor(state, gradOutput); + devGradInput = toDeviceTensor(state, gradInput); + devGradOutput = toDeviceTensor(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2); diff --git a/aten/src/THCUNN/generic/TemporalRowConvolution.cu b/aten/src/THCUNN/generic/TemporalRowConvolution.cu index 208016e93fe960..9b64c5b986ef7c 100644 --- a/aten/src/THCUNN/generic/TemporalRowConvolution.cu +++ b/aten/src/THCUNN/generic/TemporalRowConvolution.cu @@ -107,7 +107,7 @@ void THNN_(TemporalRowConvolution_updateOutput)( if (ones->dim() != 2 || ones->size(0) * ones->size(1) < nOutputFrame) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, 1, nOutputFrame); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Helpers @@ -137,9 +137,9 @@ void THNN_(TemporalRowConvolution_updateOutput)( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm( #endif - state, 't', 'n', n_, m_, k_, ScalarConvert::to(1), + state, 't', 'n', n_, m_, k_, ScalarConvert::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, - ScalarConvert::to(0), THCTensor_(data)(state, output_n), + ScalarConvert::to(0), THCTensor_(data)(state, output_n), n_); } else { THCTensor_(zero)(state, output_n); @@ -156,8 +156,8 @@ void THNN_(TemporalRowConvolution_updateOutput)( // weight: inputFrameSize x 1 x kW // columns: inputFrameSize x kW x nOutputFrame - THCTensor_(baddbmm)(state, output3d, ScalarConvert::to(1), - output3d, ScalarConvert::to(1), weight, + THCTensor_(baddbmm)(state, output3d, ScalarConvert::to(1), + output3d, ScalarConvert::to(1), weight, columns); // output3d: inputFrameSize x 1 x nOutputFrame @@ -256,13 +256,13 @@ void THNN_(TemporalRowConvolution_updateGradInput)( // weight: inputFrameSize x kW x 1 // gradOutput3d: inputFrameSize x 1 x nOutputFrame - THCTensor_(baddbmm)(state, gradColumns, ScalarConvert::to(0), - gradColumns, ScalarConvert::to(1), tweight, + THCTensor_(baddbmm)(state, gradColumns, ScalarConvert::to(0), + gradColumns, ScalarConvert::to(1), tweight, gradOutput3d); // gradColumns: inputFrameSize x kW x nOutputFrame // Unpack columns back into input: - col2row(THCState_getCurrentStream(state), + col2row(THCState_getCurrentStream(state), THCTensor_(data)(state, gradColumns), inputFrameSize, nInputFrame, kW, padW, dW, 1, THCTensor_(data)(state, gradInput_n)); @@ -299,7 +299,7 @@ void THNN_(TemporalRowConvolution_accGradParameters)( THCTensor *fgradInput, int kW, int dW, int padW, bool featFirst, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); // Aliases THCTensor *columns = finput; THCTensor *ones = fgradInput; @@ -348,7 +348,7 @@ void THNN_(TemporalRowConvolution_accGradParameters)( if (ones->dim() != 2 || ones->size(0) * ones->size(1) < nOutputFrame) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, 1, nOutputFrame); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // // Resize temporary columns @@ -378,7 +378,7 @@ void THNN_(TemporalRowConvolution_accGradParameters)( // gradOutput3d: inputFrameSize x 1 x nOutputFrame // columns: inputFrameSize x nOutputFrame x kW - THCTensor_(baddbmm)(state, gradWeight, ScalarConvert::to(1), + THCTensor_(baddbmm)(state, gradWeight, ScalarConvert::to(1), gradWeight, scale, gradOutput3d, tcolumns); // gradWeight: inputFrameSize x 1 x kW @@ -395,14 +395,14 @@ void THNN_(TemporalRowConvolution_accGradParameters)( THCudaBlas_Dgemv( #endif state, 't', k_, m_, scale, THCTensor_(data)(state, gradOutput_n), k_, - THCTensor_(data)(state, ones), 1, ScalarConvert::to(1), + THCTensor_(data)(state, ones), 1, ScalarConvert::to(1), THCTensor_(data)(state, gradBias), 1); #endif #ifdef THC_REAL_IS_HALF // half not supported due to baddbmm THCudaBlas_Hgemm(state, 't', 'n', m_, 1, k_, scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), m_); #endif } diff --git a/aten/src/THCUNN/generic/TemporalUpSamplingLinear.cu b/aten/src/THCUNN/generic/TemporalUpSamplingLinear.cu index 6199eef92826db..8561a95f0422cd 100644 --- a/aten/src/THCUNN/generic/TemporalUpSamplingLinear.cu +++ b/aten/src/THCUNN/generic/TemporalUpSamplingLinear.cu @@ -47,15 +47,15 @@ void THNN_(TemporalUpSamplingLinear_updateOutput)( THCTensor_(size)(state, input, 1), outputWidth); THCTensor_(zero)(state, output); - THCDeviceTensor idata = toDeviceTensor(state, input); - THCDeviceTensor odata = toDeviceTensor(state, output); + THCDeviceTensor idata = toDeviceTensor(state, input); + THCDeviceTensor odata = toDeviceTensor(state, output); THAssert(inputWidth > 0 && outputWidth > 0); const accreal rwidth = linear_upsampling_compute_scale(inputWidth, outputWidth, align_corners); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - caffe_gpu_interp2_kernel << <<>>(num_kernels, rwidth, align_corners, idata, odata); THCudaCheck(cudaGetLastError()); } @@ -79,14 +79,14 @@ void THNN_(TemporalUpSamplingLinear_updateGradInput)( THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize3d)(state, gradInput, nbatch, nchannels, inputWidth); THCTensor_(zero)(state, gradInput); - THCDeviceTensor data1 = toDeviceTensor(state, gradInput); - THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); + THCDeviceTensor data1 = toDeviceTensor(state, gradInput); + THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); const accreal rwidth = linear_upsampling_compute_scale(inputWidth, outputWidth, align_corners); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - caffe_gpu_interp2_kernel_backward << <<>>(num_kernels, rwidth, align_corners, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); diff --git a/aten/src/THCUNN/generic/TemporalUpSamplingNearest.cu b/aten/src/THCUNN/generic/TemporalUpSamplingNearest.cu index 91535f28d6ca0c..250db2d9e67583 100644 --- a/aten/src/THCUNN/generic/TemporalUpSamplingNearest.cu +++ b/aten/src/THCUNN/generic/TemporalUpSamplingNearest.cu @@ -46,13 +46,13 @@ void THNN_(TemporalUpSamplingNearest_updateOutput)( outputWidth); THCTensor_(zero)(state, output); - THCDeviceTensor idata = toDeviceTensor(state, input); - THCDeviceTensor odata = toDeviceTensor(state, output); + THCDeviceTensor idata = toDeviceTensor(state, input); + THCDeviceTensor odata = toDeviceTensor(state, output); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - nearest_neighbor_3d_kernel << <<>>(num_kernels, idata, odata); THCudaCheck(cudaGetLastError()); } @@ -73,14 +73,14 @@ void THNN_(TemporalUpSamplingNearest_updateGradInput)( THCTensor_(resize3d)(state, gradInput, nbatch, nchannels, inputWidth); THCTensor_(zero)(state, gradInput); - THCDeviceTensor data1 = toDeviceTensor(state, gradInput); - THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); + THCDeviceTensor data1 = toDeviceTensor(state, gradInput); + THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); const int num_kernels = outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - nearest_neighbor_3d_kernel_backward << <<>>(num_kernels, data1, data2); THCudaCheck(cudaGetLastError()); diff --git a/aten/src/THCUNN/generic/Threshold.cu b/aten/src/THCUNN/generic/Threshold.cu index 794ad45ca187e5..1fac3f998b07ba 100644 --- a/aten/src/THCUNN/generic/Threshold.cu +++ b/aten/src/THCUNN/generic/Threshold.cu @@ -12,22 +12,22 @@ void THNN_(Threshold_updateOutput)( accreal val_, bool inplace) { - real threshold = ScalarConvert::to(threshold_); - real val = ScalarConvert::to(val_); + scalar_t threshold = ScalarConvert::to(threshold_); + scalar_t val = ScalarConvert::to(val_); THCUNN_assertSameGPU(state, 2, input, output); if (inplace) { - THC_pointwiseApply1(state, input, - ThresholdUpdateOutputIP(threshold, val) + THC_pointwiseApply1(state, input, + ThresholdUpdateOutputIP(threshold, val) ); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); - THC_pointwiseApply2(state, output, input, - ThresholdUpdateOutput(threshold, val) + THC_pointwiseApply2(state, output, input, + ThresholdUpdateOutput(threshold, val) ); } @@ -43,24 +43,24 @@ void THNN_(Threshold_updateGradInput)( accreal val_, bool inplace) { - real threshold = ScalarConvert::to(threshold_); - real val = ScalarConvert::to(val_); + scalar_t threshold = ScalarConvert::to(threshold_); + scalar_t val = ScalarConvert::to(val_); (void) val; THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput); if (inplace) { - THC_pointwiseApply2(state, gradOutput, input, - ThresholdUpdateGradInputIP(threshold) + THC_pointwiseApply2(state, gradOutput, input, + ThresholdUpdateGradInputIP(threshold) ); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); - THC_pointwiseApply3(state, gradInput, input, gradOutput, - ThresholdUpdateGradInput(threshold) + THC_pointwiseApply3(state, gradInput, input, gradOutput, + ThresholdUpdateGradInput(threshold) ); } diff --git a/aten/src/THCUNN/generic/VolumetricAdaptiveAveragePooling.cu b/aten/src/THCUNN/generic/VolumetricAdaptiveAveragePooling.cu index 3b34077d1cee84..3a84ba432c7d0e 100644 --- a/aten/src/THCUNN/generic/VolumetricAdaptiveAveragePooling.cu +++ b/aten/src/THCUNN/generic/VolumetricAdaptiveAveragePooling.cu @@ -20,8 +20,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)( "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); - real *output_data; - real *input_data; + scalar_t *output_data; + scalar_t *input_data; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; @@ -99,8 +99,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; diff --git a/aten/src/THCUNN/generic/VolumetricAdaptiveMaxPooling.cu b/aten/src/THCUNN/generic/VolumetricAdaptiveMaxPooling.cu index adc23e15dabedc..f3754c846e5314 100644 --- a/aten/src/THCUNN/generic/VolumetricAdaptiveMaxPooling.cu +++ b/aten/src/THCUNN/generic/VolumetricAdaptiveMaxPooling.cu @@ -21,8 +21,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)( "4D or 5D (batch mode) tensor expected for input, but got: %s"); THCIndex_t *indices_data; - real *output_data; - real *input_data; + scalar_t *output_data; + scalar_t *input_data; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; @@ -105,8 +105,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput)( THCTensor_(zero)(state, gradInput); THCIndex_t *indices_data; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; diff --git a/aten/src/THCUNN/generic/VolumetricAveragePooling.cu b/aten/src/THCUNN/generic/VolumetricAveragePooling.cu index 62b93f953c95f6..e730a79be2bca5 100644 --- a/aten/src/THCUNN/generic/VolumetricAveragePooling.cu +++ b/aten/src/THCUNN/generic/VolumetricAveragePooling.cu @@ -211,10 +211,10 @@ void THNN_(VolumetricAveragePooling_updateOutput)( THCTensor_(retain)(state, output); } - THCDeviceTensor cudaInput; - THCDeviceTensor cudaOutput; - cudaInput = toDeviceTensor(state, input); - cudaOutput = toDeviceTensor(state, output); + THCDeviceTensor cudaInput; + THCDeviceTensor cudaOutput; + cudaInput = toDeviceTensor(state, input); + cudaOutput = toDeviceTensor(state, output); int totalZ = outputTime * inputSlices * batchSize; int offsetZ = 0; @@ -234,7 +234,7 @@ void THNN_(VolumetricAveragePooling_updateOutput)( LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7); default: - cuda_VolumetricAveragePooling_updateOutput + cuda_VolumetricAveragePooling_updateOutput <<>>( cudaInput, cudaOutput, @@ -322,10 +322,10 @@ void THNN_(VolumetricAveragePooling_updateGradInput)( THCTensor_(retain)(state, gradInput); } - THCDeviceTensor cudaGradInput; - THCDeviceTensor cudaGradOutput; - cudaGradInput = toDeviceTensor(state, gradInput); - cudaGradOutput = toDeviceTensor(state, gradOutput); + THCDeviceTensor cudaGradInput; + THCDeviceTensor cudaGradOutput; + cudaGradInput = toDeviceTensor(state, gradInput); + cudaGradOutput = toDeviceTensor(state, gradOutput); dim3 block(32, 8); @@ -340,7 +340,7 @@ void THNN_(VolumetricAveragePooling_updateGradInput)( dim3 grid(THCCeilDiv(inputWidth, static_cast(block.x)), THCCeilDiv(inputHeight, static_cast(block.y)), totalZ > 65535 ? 65535 : totalZ); - cuda_VolumetricAveragePooling_updateGradInput_Stride1 + cuda_VolumetricAveragePooling_updateGradInput_Stride1 <<>>( cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW), offsetZ); THCudaCheck(cudaGetLastError()); @@ -358,14 +358,14 @@ void THNN_(VolumetricAveragePooling_updateGradInput)( totalZ > 65535 ? 65535 : totalZ); if (kernelsOverlap) { - cuda_VolumetricAveragePooling_updateGradInput_atomicAdd + cuda_VolumetricAveragePooling_updateGradInput_atomicAdd <<>>( cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); } else { - cuda_VolumetricAveragePooling_updateGradInput + cuda_VolumetricAveragePooling_updateGradInput <<>>( cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); diff --git a/aten/src/THCUNN/generic/VolumetricConvolution.cu b/aten/src/THCUNN/generic/VolumetricConvolution.cu index 94fc702ff124e9..827ee4002c5874 100644 --- a/aten/src/THCUNN/generic/VolumetricConvolution.cu +++ b/aten/src/THCUNN/generic/VolumetricConvolution.cu @@ -169,7 +169,7 @@ void THNN_(VolumetricConvolution_updateOutput)( { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputHeight, outputWidth, outputDepth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Helpers @@ -202,10 +202,10 @@ void THNN_(VolumetricConvolution_updateOutput)( state, 't', 'n', n_, m_, k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, output_n), n_ ); } else { @@ -237,10 +237,10 @@ void THNN_(VolumetricConvolution_updateOutput)( state, 'n', 'n', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, columns), n, THCTensor_(data)(state, weight), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, output_n), n ); } @@ -337,15 +337,15 @@ void THNN_(VolumetricConvolution_updateGradInput)( state, 'n', 't', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradOutput_n), n, THCTensor_(data)(state, weight), m, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, gradColumns), n ); // Unpack columns back into input: - col2im3d( + col2im3d( THCState_getCurrentStream(state), THCTensor_(data)(state, gradColumns), nInputPlane, inputHeight, inputWidth, inputDepth, kT, kH, kW, padT, padH, padW, dT, dH, dW, @@ -381,7 +381,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( int padT, int padW, int padH, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCTensor *columns = finput; THCTensor *ones = fgradInput; THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones); @@ -422,7 +422,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputHeight, outputWidth, outputDepth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Resize temporary columns @@ -467,7 +467,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( scale, THCTensor_(data)(state, columns), k, THCTensor_(data)(state, gradOutput_n), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradWeight), n ); @@ -491,7 +491,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), 1 ); #endif @@ -503,7 +503,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), m_ ); #endif diff --git a/aten/src/THCUNN/generic/VolumetricDilatedConvolution.cu b/aten/src/THCUNN/generic/VolumetricDilatedConvolution.cu index d6ffba3519553c..266af372019771 100644 --- a/aten/src/THCUNN/generic/VolumetricDilatedConvolution.cu +++ b/aten/src/THCUNN/generic/VolumetricDilatedConvolution.cu @@ -143,7 +143,7 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)( if (ones->dim() != 2 || ones->size(0)*ones->size(1)*ones->size(2) < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputDepth, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Helpers @@ -175,10 +175,10 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)( state, 't', 'n', n_, m_, k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, output_n), n_ ); } else { @@ -213,10 +213,10 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)( state, 'n', 'n', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, columns), n, THCTensor_(data)(state, weight), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, output_n), n ); } @@ -314,15 +314,15 @@ void THNN_(VolumetricDilatedConvolution_updateGradInput)( state, 'n', 't', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradOutput_n), n, THCTensor_(data)(state, weight), m, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, gradColumns), n ); // Unpack columns back into input: - col2vol( + col2vol( THCState_getCurrentStream(state), THCTensor_(data)(state, gradColumns), nInputPlane, inputDepth, inputHeight, inputWidth, @@ -363,7 +363,7 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( int dilationT, int dilationW, int dilationH, accreal scale_) { - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, gradBias, columns, ones); THNN_(VolumetricDilatedConvolution_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, @@ -397,7 +397,7 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( if (ones->dim() != 3 || ones->size(0)*ones->size(1)*ones->size(2) < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputDepth, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Resize temporary columns @@ -447,7 +447,7 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, columns), k, THCTensor_(data)(state, gradOutput_n), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradWeight), n ); } @@ -472,7 +472,7 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), 1 ); #endif @@ -484,7 +484,7 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), m_ ); #endif diff --git a/aten/src/THCUNN/generic/VolumetricDilatedMaxPooling.cu b/aten/src/THCUNN/generic/VolumetricDilatedMaxPooling.cu index 09380645390d3d..9cf2c00ec3079d 100644 --- a/aten/src/THCUNN/generic/VolumetricDilatedMaxPooling.cu +++ b/aten/src/THCUNN/generic/VolumetricDilatedMaxPooling.cu @@ -235,10 +235,10 @@ void THNN_(VolumetricDilatedMaxPooling_updateOutput)( THCTensor_(retain)(state, output); } - real* inputData = THCTensor_(data)(state, input); + scalar_t* inputData = THCTensor_(data)(state, input); - THCDeviceTensor cudaOutput; - cudaOutput = toDeviceTensor(state, output); + THCDeviceTensor cudaOutput; + cudaOutput = toDeviceTensor(state, output); THCIndexTensor *indices1 = THCIndexTensor_(newWithStorage)( state, THCIndexTensor_(storage)(state, indices), @@ -354,9 +354,9 @@ void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( THCTensor_(retain)(state, gradInput); } - THCDeviceTensor cudaGradOutput; - cudaGradOutput = toDeviceTensor(state, gradOutput); - real* gradInputData = THCTensor_(data)(state, gradInput); + THCDeviceTensor cudaGradOutput; + cudaGradOutput = toDeviceTensor(state, gradOutput); + scalar_t* gradInputData = THCTensor_(data)(state, gradInput); THCIndexTensor *indices1 = THCIndexTensor_(newWithStorage)( state, THCIndexTensor_(storage)(state, indices), diff --git a/aten/src/THCUNN/generic/VolumetricFractionalMaxPooling.cu b/aten/src/THCUNN/generic/VolumetricFractionalMaxPooling.cu index 9aae36fb3f281b..eedb8e681bd0d9 100644 --- a/aten/src/THCUNN/generic/VolumetricFractionalMaxPooling.cu +++ b/aten/src/THCUNN/generic/VolumetricFractionalMaxPooling.cu @@ -45,11 +45,11 @@ void THNN_(VolumetricFractionalMaxPooling_updateOutput)( "poolSizeT (%d) too large relative to input time (%d)", poolSizeT, inputT); - THCDeviceTensor devInput; - THCDeviceTensor devOutput; + THCDeviceTensor devInput; + THCDeviceTensor devOutput; THCDeviceTensor devIndices; - THCDeviceTensor devSamples = - toDeviceTensor(state, randomSamples); + THCDeviceTensor devSamples = + toDeviceTensor(state, randomSamples); if (numInputDims == 4) { /* resize output */ @@ -57,16 +57,16 @@ void THNN_(VolumetricFractionalMaxPooling_updateOutput)( /* indices will contain the locations for each output point */ THCIndexTensor_(resize4d)(state, indices, numPlanes, outputH, outputW, outputT); - devInput = toDeviceTensor(state, input).upcastOuter<5>(); - devOutput = toDeviceTensor(state, output).upcastOuter<5>(); + devInput = toDeviceTensor(state, input).upcastOuter<5>(); + devOutput = toDeviceTensor(state, output).upcastOuter<5>(); devIndices = toDeviceTensor(state, indices).upcastOuter<5>(); } else { THCTensor_(resize5d)(state, output, numBatch, numPlanes, outputH, outputW, outputT); /* indices will contain the locations for each output point */ THCIndexTensor_(resize5d)(state, indices, numBatch, numPlanes, outputH, outputW, outputT); - devInput = toDeviceTensor(state, input); - devOutput = toDeviceTensor(state, output); + devInput = toDeviceTensor(state, input); + devOutput = toDeviceTensor(state, output); devIndices = toDeviceTensor(state, indices); } @@ -79,7 +79,7 @@ void THNN_(VolumetricFractionalMaxPooling_updateOutput)( dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); #define SFMP_UPDATE_OUTPUT(POOL_W) \ - VolumetricFractionalMaxPooling_updateOutput \ + VolumetricFractionalMaxPooling_updateOutput \ <<>>( \ devInput, devOutput, devIndices, devSamples, poolSizeT, poolSizeW, poolSizeH); @@ -136,18 +136,18 @@ void THNN_(VolumetricFractionalMaxPooling_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - THCDeviceTensor devGradInput; - THCDeviceTensor devGradOutput; + THCDeviceTensor devGradInput; + THCDeviceTensor devGradOutput; THCDeviceTensor devIndices; /* backprop */ if (numInputDims == 4) { - devGradInput = toDeviceTensor(state, gradInput).upcastOuter<5>(); - devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<5>(); + devGradInput = toDeviceTensor(state, gradInput).upcastOuter<5>(); + devGradOutput = toDeviceTensor(state, gradOutput).upcastOuter<5>(); devIndices = toDeviceTensor(state, indices).upcastOuter<5>(); } else { - devGradInput = toDeviceTensor(state, gradInput); - devGradOutput = toDeviceTensor(state, gradOutput); + devGradInput = toDeviceTensor(state, gradInput); + devGradOutput = toDeviceTensor(state, gradOutput); devIndices = toDeviceTensor(state, indices); } diff --git a/aten/src/THCUNN/generic/VolumetricFullDilatedConvolution.cu b/aten/src/THCUNN/generic/VolumetricFullDilatedConvolution.cu index 10a5fdc2643193..32534d3e6461e8 100644 --- a/aten/src/THCUNN/generic/VolumetricFullDilatedConvolution.cu +++ b/aten/src/THCUNN/generic/VolumetricFullDilatedConvolution.cu @@ -147,7 +147,7 @@ void THNN_(VolumetricFullDilatedConvolution_updateOutput)( if (ones->dim() != 3 || ones->size(0)*ones->size(1)*ones->size(2) < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputDepth, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Helpers @@ -177,15 +177,15 @@ void THNN_(VolumetricFullDilatedConvolution_updateOutput)( state, 'n', 't', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, input_n), n, THCTensor_(data)(state, weight), m, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, columns), n ); // Unpack columns back into input: - col2vol( + col2vol( THCState_getCurrentStream(state), THCTensor_(data)(state, columns), nOutputPlane, outputDepth, outputHeight, outputWidth, @@ -214,10 +214,10 @@ void THNN_(VolumetricFullDilatedConvolution_updateOutput)( state, 't', 'n', n_, m_, k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, output_n), n_ ); } @@ -331,10 +331,10 @@ void THNN_(VolumetricFullDilatedConvolution_updateGradInput)( state, 'n', 'n', n, m, k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradColumns), n, THCTensor_(data)(state, weight), k, - ScalarConvert::to(0), + ScalarConvert::to(0), THCTensor_(data)(state, gradInput_n), n ); } @@ -375,7 +375,7 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( THCTensor *columns = finput; THCTensor *ones = fgradInput; - real scale = ScalarConvert::to(scale_); + scalar_t scale = ScalarConvert::to(scale_); THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones); THNN_(VolumetricFullDilatedConvolution_shapeCheck)( @@ -425,7 +425,7 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( if (ones->dim() != 3 || ones->size(0)*ones->size(1)*ones->size(2) < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputDepth, outputHeight, outputWidth); - THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); } // Resize temporary columns @@ -476,7 +476,7 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, columns), k, THCTensor_(data)(state, input_n), k, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradWeight), n ); } @@ -501,7 +501,7 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), 1 ); #endif @@ -513,7 +513,7 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, - ScalarConvert::to(1), + ScalarConvert::to(1), THCTensor_(data)(state, gradBias), m_ ); #endif diff --git a/aten/src/THCUNN/generic/VolumetricMaxUnpooling.cu b/aten/src/THCUNN/generic/VolumetricMaxUnpooling.cu index 708b1f512fc7b7..bd68f220b5451c 100644 --- a/aten/src/THCUNN/generic/VolumetricMaxUnpooling.cu +++ b/aten/src/THCUNN/generic/VolumetricMaxUnpooling.cu @@ -137,12 +137,12 @@ void THNN_(VolumetricMaxUnpooling_updateOutput)( THCIndexTensor_(free)(state, old_indices); } - real* outputData = THCTensor_(data)(state, output); + scalar_t* outputData = THCTensor_(data)(state, output); - THCDeviceTensor cudaInput; + THCDeviceTensor cudaInput; THCDeviceTensor cudaIndices; - cudaInput = toDeviceTensor(state, input); + cudaInput = toDeviceTensor(state, input); cudaIndices = toDeviceTensor(state, indices); int totalZ = inputTime * inputSlices * batchSize; @@ -231,12 +231,12 @@ void THNN_(VolumetricMaxUnpooling_updateGradInput)( THCTensor_(retain)(state, gradInput); } - real* gradOutputData = THCTensor_(data)(state, gradOutput); + scalar_t* gradOutputData = THCTensor_(data)(state, gradOutput); - THCDeviceTensor cudaGradInput; + THCDeviceTensor cudaGradInput; THCDeviceTensor cudaIndices; - cudaGradInput = toDeviceTensor(state, gradInput); + cudaGradInput = toDeviceTensor(state, gradInput); cudaIndices = toDeviceTensor(state, indices); int totalZ = inputTime * inputSlices * batchSize; diff --git a/aten/src/THCUNN/generic/VolumetricReplicationPadding.cu b/aten/src/THCUNN/generic/VolumetricReplicationPadding.cu index a78cbc6a9815cc..17421e3bd1a073 100644 --- a/aten/src/THCUNN/generic/VolumetricReplicationPadding.cu +++ b/aten/src/THCUNN/generic/VolumetricReplicationPadding.cu @@ -93,20 +93,20 @@ void THNN_(VolumetricReplicationPadding_updateOutput)( int outputH = inputH + ptop + pbottom; int outputW = inputW + pleft + pright; - THCDeviceTensor devInput; - THCDeviceTensor devOutput; + THCDeviceTensor devInput; + THCDeviceTensor devOutput; if (numInputDims == 4) { THCTensor_(resize4d)(state, output, numPlanes, outputD, outputH, outputW); - devInput = toDeviceTensor(state, input).upcastOuter<5>(); - devOutput = toDeviceTensor(state, output).upcastOuter<5>(); + devInput = toDeviceTensor(state, input).upcastOuter<5>(); + devOutput = toDeviceTensor(state, output).upcastOuter<5>(); } else { THCTensor_(resize5d)(state, output, numBatch, numPlanes, outputD, outputH, outputW); - devInput = toDeviceTensor(state, input); - devOutput = toDeviceTensor(state, output); + devInput = toDeviceTensor(state, input); + devOutput = toDeviceTensor(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3) * @@ -116,7 +116,7 @@ void THNN_(VolumetricReplicationPadding_updateOutput)( devOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); - VolumetricReplicationPadding_updateOutput<<>>( + VolumetricReplicationPadding_updateOutput<<>>( devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright); } @@ -148,16 +148,16 @@ void THNN_(VolumetricReplicationPadding_updateGradInput)( THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); - THCDeviceTensor devGradInput; - THCDeviceTensor devGradOutput; + THCDeviceTensor devGradInput; + THCDeviceTensor devGradOutput; if (numInputDims == 4) { - devGradInput = toDeviceTensor(state, gradInput).upcastOuter<5>(); + devGradInput = toDeviceTensor(state, gradInput).upcastOuter<5>(); devGradOutput = - toDeviceTensor(state, gradOutput).upcastOuter<5>(); + toDeviceTensor(state, gradOutput).upcastOuter<5>(); } else { - devGradInput = toDeviceTensor(state, gradInput); - devGradOutput = toDeviceTensor(state, gradOutput); + devGradInput = toDeviceTensor(state, gradInput); + devGradOutput = toDeviceTensor(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3) * diff --git a/aten/src/THCUNN/generic/VolumetricUpSamplingNearest.cu b/aten/src/THCUNN/generic/VolumetricUpSamplingNearest.cu index cd289a021cd2f1..36534a15e4b3b3 100644 --- a/aten/src/THCUNN/generic/VolumetricUpSamplingNearest.cu +++ b/aten/src/THCUNN/generic/VolumetricUpSamplingNearest.cu @@ -59,13 +59,13 @@ void THNN_(VolumetricUpSamplingNearest_updateOutput)( outputWidth); THCTensor_(zero)(state, output); - THCDeviceTensor idata = toDeviceTensor(state, input); - THCDeviceTensor odata = toDeviceTensor(state, output); + THCDeviceTensor idata = toDeviceTensor(state, input); + THCDeviceTensor odata = toDeviceTensor(state, output); const int num_kernels = outputDepth * outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - nearest_neighbor_5d_kernel << <<>>(num_kernels, idata, odata); THCudaCheck(cudaGetLastError()); } @@ -93,12 +93,12 @@ void THNN_(VolumetricUpSamplingNearest_updateGradInput)( THCTensor_(resize5d)(state, gradInput, nbatch, nchannels, inputDepth, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); - THCDeviceTensor data1 = toDeviceTensor(state, gradInput); - THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); + THCDeviceTensor data1 = toDeviceTensor(state, gradInput); + THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); const int num_kernels = outputDepth * outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - nearest_neighbor_5d_kernel_backward << <<>>(num_kernels, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); diff --git a/aten/src/THCUNN/generic/VolumetricUpSamplingTrilinear.cu b/aten/src/THCUNN/generic/VolumetricUpSamplingTrilinear.cu index 1dbad86a7a91b9..9e230e1cb07e71 100644 --- a/aten/src/THCUNN/generic/VolumetricUpSamplingTrilinear.cu +++ b/aten/src/THCUNN/generic/VolumetricUpSamplingTrilinear.cu @@ -55,8 +55,8 @@ void THNN_(VolumetricUpSamplingTrilinear_updateOutput)( THCTensor_(size)(state, input, 1), outputDepth, outputHeight, outputWidth); THCTensor_(zero)(state, output); - THCDeviceTensor idata = toDeviceTensor(state, input); - THCDeviceTensor odata = toDeviceTensor(state, output); + THCDeviceTensor idata = toDeviceTensor(state, input); + THCDeviceTensor odata = toDeviceTensor(state, output); THAssert(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 && outputDepth > 0 && outputHeight > 0 && outputWidth > 0); const accreal rdepth = linear_upsampling_compute_scale(inputDepth, outputDepth, align_corners); const accreal rheight = linear_upsampling_compute_scale(inputHeight, outputHeight, align_corners); @@ -65,7 +65,7 @@ void THNN_(VolumetricUpSamplingTrilinear_updateOutput)( const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - caffe_gpu_interp2_kernel << <<>>(num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata); THCudaCheck(cudaGetLastError()); } @@ -94,8 +94,8 @@ void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)( THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize5d)(state, gradInput, nbatch, nchannels, inputDepth, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); - THCDeviceTensor data1 = toDeviceTensor(state, gradInput); - THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); + THCDeviceTensor data1 = toDeviceTensor(state, gradInput); + THCDeviceTensor data2 = toDeviceTensor(state, gradOutput); const accreal rdepth = linear_upsampling_compute_scale(inputDepth, outputDepth, align_corners); const accreal rheight = linear_upsampling_compute_scale(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale(inputWidth, outputWidth, align_corners); @@ -103,7 +103,7 @@ void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)( const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); - caffe_gpu_interp2_kernel_backward << <<>>(num_kernels, rdepth, rheight, rwidth, align_corners, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); diff --git a/aten/src/THNN/generic/Abs.c b/aten/src/THNN/generic/Abs.c index 28721ec8eaf247..61e9a519bd431d 100644 --- a/aten/src/THNN/generic/Abs.c +++ b/aten/src/THNN/generic/Abs.c @@ -19,8 +19,8 @@ void THNN_(Abs_updateGradInput)( { THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - real z = *input_data; + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, + scalar_t z = *input_data; *gradInput_data = *gradOutput_data * (z >= 0 ? 1 : -1); ); } diff --git a/aten/src/THNN/generic/AbsCriterion.c b/aten/src/THNN/generic/AbsCriterion.c index 05f14773ef5658..2125224246397f 100644 --- a/aten/src/THNN/generic/AbsCriterion.c +++ b/aten/src/THNN/generic/AbsCriterion.c @@ -13,15 +13,15 @@ void THNN_(AbsCriterion_updateOutput)( if (reduction == Reduction::None) { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, target, real, output, + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, target, scalar_t, output, *output_data = fabs(*input_data - *target_data); ); return; } - real sum = 0; + scalar_t sum = 0; THTensor_(resize1d)(output, 1); - TH_TENSOR_APPLY2(real, input, real, target, + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, target, sum += fabs(*input_data - *target_data); ); @@ -44,19 +44,19 @@ void THNN_(AbsCriterion_updateGradInput)( if (reduction == Reduction::None) { THNN_CHECK_SHAPE(gradOutput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, *gradInput_data = ((*input_data - *target_data) >= 0 ? 1 : -1); ); - TH_TENSOR_APPLY2(real, gradInput, real, gradOutput, + TH_TENSOR_APPLY2(scalar_t, gradInput, scalar_t, gradOutput, *gradInput_data *= *gradOutput_data; ); return; } THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); - real norm = (reduction == Reduction::ElementwiseMean ? 1./((real)THTensor_(nElement)(input)) : 1.) * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0); + scalar_t norm = (reduction == Reduction::ElementwiseMean ? 1./((scalar_t)THTensor_(nElement)(input)) : 1.) * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, *gradInput_data = (*input_data - *target_data) >= 0 ? norm : -norm; ); } diff --git a/aten/src/THNN/generic/BCECriterion.c b/aten/src/THNN/generic/BCECriterion.c index 079493e3056500..109368541c5916 100644 --- a/aten/src/THNN/generic/BCECriterion.c +++ b/aten/src/THNN/generic/BCECriterion.c @@ -4,7 +4,7 @@ #define EPS 1e-12 -static inline real safe_log(real a) { +static inline scalar_t safe_log(scalar_t a) { if (a == 0.) { return log(EPS); } @@ -24,9 +24,9 @@ void THNN_(BCECriterion_updateOutput)( if (reduction == Reduction::None) { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, target, real, output, - real x = *input_data; - real y = *target_data; + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, target, scalar_t, output, + scalar_t x = *input_data; + scalar_t y = *target_data; THAssertMsg(x >= 0. && x <= 1., "input value should be between 0~1, but got %f", (double) x); @@ -39,22 +39,22 @@ void THNN_(BCECriterion_updateOutput)( } THTensor_(resize1d)(output, 1); - real sum = 0; + scalar_t sum = 0; if (weights) { - TH_TENSOR_APPLY3(real, input, real, target, real, weights, - real x = *input_data; - real y = *target_data; - real w = *weights_data; + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, target, scalar_t, weights, + scalar_t x = *input_data; + scalar_t y = *target_data; + scalar_t w = *weights_data; THAssertMsg(x >= 0. && x <= 1., "input value should be between 0~1, but got %f", (double) x); sum -= (safe_log(x) * y + safe_log(1. - x) * (1. - y)) * w; ); } else { - TH_TENSOR_APPLY2(real, input, real, target, - real x = *input_data; - real y = *target_data; + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, target, + scalar_t x = *input_data; + scalar_t y = *target_data; THAssertMsg(x >= 0. && x <= 1., "input value should be between 0~1, but got %f", (double) x); @@ -84,14 +84,14 @@ void THNN_(BCECriterion_updateGradInput)( if (reduction == Reduction::None) { THNN_CHECK_NELEMENT(gradOutput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real x = *input_data; - real y = *target_data; + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, + scalar_t x = *input_data; + scalar_t y = *target_data; *gradInput_data = -(y - x) / ((1. - x + EPS) * (x + EPS)); ); if (weights) { - TH_TENSOR_APPLY3(real, gradInput, real, weights, real, gradOutput, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, weights, scalar_t, gradOutput, *gradInput_data = *gradInput_data * *weights_data * *gradOutput_data; ); } else { @@ -101,11 +101,11 @@ void THNN_(BCECriterion_updateGradInput)( } THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); - real norm = (reduction == Reduction::ElementwiseMean ? 1./((real)THTensor_(nElement)(input)) : 1.); + scalar_t norm = (reduction == Reduction::ElementwiseMean ? 1./((scalar_t)THTensor_(nElement)(input)) : 1.); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real x = *input_data; - real y = *target_data; + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, + scalar_t x = *input_data; + scalar_t y = *target_data; *gradInput_data = - norm * (y - x) / ((1. - x + EPS) * (x + EPS)) * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0); ); diff --git a/aten/src/THNN/generic/BatchNormalization.c b/aten/src/THNN/generic/BatchNormalization.c index 3f4e13f2bd1b21..1d481cb59ff4e8 100644 --- a/aten/src/THNN/generic/BatchNormalization.c +++ b/aten/src/THNN/generic/BatchNormalization.c @@ -24,37 +24,37 @@ void THNN_(BatchNormalization_updateOutput)( THTensor *in = THTensor_(newSelect)(input, 1, f); THTensor *out = THTensor_(newSelect)(output, 1, f); - real mean, invstd; + scalar_t mean, invstd; if (train) { // compute mean per input accreal sum = 0; - TH_TENSOR_APPLY(real, in, sum += *in_data;); + TH_TENSOR_APPLY(scalar_t, in, sum += *in_data;); - mean = (real) sum / n; - THTensor_(set1d)(save_mean, f, (real) mean); + mean = (scalar_t) sum / n; + THTensor_(set1d)(save_mean, f, (scalar_t) mean); // compute variance per input sum = 0; - TH_TENSOR_APPLY(real, in, + TH_TENSOR_APPLY(scalar_t, in, sum += (*in_data - mean) * (*in_data - mean);); if (sum == 0 && eps == 0.0) { invstd = 0; } else { - invstd = (real) (1 / sqrt(sum/n + eps)); + invstd = (scalar_t) (1 / sqrt(sum/n + eps)); } - THTensor_(set1d)(save_std, f, (real) invstd); + THTensor_(set1d)(save_std, f, (scalar_t) invstd); // update running averages if (running_mean) { THTensor_(set1d)(running_mean, f, - (real) (momentum * mean + (1 - momentum) * THTensor_(get1d)(running_mean, f))); + (scalar_t) (momentum * mean + (1 - momentum) * THTensor_(get1d)(running_mean, f))); } if (running_var) { accreal unbiased_var = sum / (n - 1); THTensor_(set1d)(running_var, f, - (real) (momentum * unbiased_var + (1 - momentum) * THTensor_(get1d)(running_var, f))); + (scalar_t) (momentum * unbiased_var + (1 - momentum) * THTensor_(get1d)(running_var, f))); } } else { mean = THTensor_(get1d)(running_mean, f); @@ -62,11 +62,11 @@ void THNN_(BatchNormalization_updateOutput)( } // compute output - real w = weight ? THTensor_(get1d)(weight, f) : 1; - real b = bias ? THTensor_(get1d)(bias, f) : 0; + scalar_t w = weight ? THTensor_(get1d)(weight, f) : 1; + scalar_t b = bias ? THTensor_(get1d)(bias, f) : 0; - TH_TENSOR_APPLY2(real, in, real, out, - *out_data = (real) (((*in_data - mean) * invstd) * w + b);); + TH_TENSOR_APPLY2(scalar_t, in, scalar_t, out, + *out_data = (scalar_t) (((*in_data - mean) * invstd) * w + b);); c10::raw::intrusive_ptr::decref(out); c10::raw::intrusive_ptr::decref(in); @@ -93,8 +93,8 @@ void THNN_(BatchNormalization_backward)( for (f = 0; f < nInput; ++f) { THTensor *in = THTensor_(newSelect)(input, 1, f); THTensor *gradOut = THTensor_(newSelect)(gradOutput, 1, f); - real w = weight ? THTensor_(get1d)(weight, f) : 1; - real mean, invstd; + scalar_t w = weight ? THTensor_(get1d)(weight, f) : 1; + scalar_t mean, invstd; if (train) { mean = THTensor_(get1d)(save_mean, f); invstd = THTensor_(get1d)(save_std, f); @@ -105,11 +105,11 @@ void THNN_(BatchNormalization_backward)( // sum over all gradOutput in feature plane accreal sum = 0; - TH_TENSOR_APPLY(real, gradOut, sum += *gradOut_data;); + TH_TENSOR_APPLY(scalar_t, gradOut, sum += *gradOut_data;); // dot product of the Q(X) and gradOuput accreal dotp = 0; - TH_TENSOR_APPLY2(real, in, real, gradOut, + TH_TENSOR_APPLY2(scalar_t, in, scalar_t, gradOut, dotp += (*in_data - mean) * (*gradOut_data);); if (gradInput) { @@ -122,12 +122,12 @@ void THNN_(BatchNormalization_backward)( // dL/dX = (Q(dL/dY) - dot(Y, dL/dY) * Y) / σ * w // projection of gradOutput on to output scaled by std - real k = (real) dotp * invstd * invstd / n; - TH_TENSOR_APPLY2(real, gradIn, real, in, + scalar_t k = (scalar_t) dotp * invstd * invstd / n; + TH_TENSOR_APPLY2(scalar_t, gradIn, scalar_t, in, *gradIn_data = (*in_data - mean) * k;); accreal gradMean = sum / n; - TH_TENSOR_APPLY2(real, gradIn, real, gradOut, + TH_TENSOR_APPLY2(scalar_t, gradIn, scalar_t, gradOut, *gradIn_data = (*gradOut_data - gradMean - *gradIn_data) * invstd * w;); } else { @@ -135,7 +135,7 @@ void THNN_(BatchNormalization_backward)( // Q(X) = X - running_mean ; i.e. input centered to zero mean // Y = Q(X) / running_std ; i.e. BN output before weight and bias // dL/dX = w / running_std - TH_TENSOR_APPLY2(real, gradIn, real, gradOut, + TH_TENSOR_APPLY2(scalar_t, gradIn, scalar_t, gradOut, *gradIn_data = *gradOut_data * invstd * w;); } @@ -143,12 +143,12 @@ void THNN_(BatchNormalization_backward)( } if (gradWeight) { - real val = THTensor_(get1d)(gradWeight, f); + scalar_t val = THTensor_(get1d)(gradWeight, f); THTensor_(set1d)(gradWeight, f, val + scale * dotp * invstd); } if (gradBias) { - real val = THTensor_(get1d)(gradBias, f); + scalar_t val = THTensor_(get1d)(gradBias, f); THTensor_(set1d)(gradBias, f, val + scale * sum); } diff --git a/aten/src/THNN/generic/ClassNLLCriterion.c b/aten/src/THNN/generic/ClassNLLCriterion.c index af6d73a23f1315..a76d8d04e605c3 100644 --- a/aten/src/THNN/generic/ClassNLLCriterion.c +++ b/aten/src/THNN/generic/ClassNLLCriterion.c @@ -44,7 +44,7 @@ void THNN_(ClassNLLCriterion_updateOutput)( THTensor_(fastSet1d)(output, i, 0.0f); continue; } - real cur_weight = weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f; + scalar_t cur_weight = weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f; THTensor_(fastSet1d)(output, i, -THTensor_(fastGet2d)(input, i, cur_target) * cur_weight); } else { int tmp = -1; @@ -65,11 +65,11 @@ void THNN_(ClassNLLCriterion_updateOutput)( target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; - real *input_data = input->data(); + scalar_t *input_data = input->data(); THIndex_t *target_data = THIndexTensor_(data)(target); - real *weights_data = weights ? weights->data() : NULL; - real *output_data = output->data(); - real *total_weight_data = total_weight->data(); + scalar_t *weights_data = weights ? weights->data() : NULL; + scalar_t *output_data = output->data(); + scalar_t *total_weight_data = total_weight->data(); output_data[0] = total_weight_data[0] = 0.0; @@ -92,7 +92,7 @@ void THNN_(ClassNLLCriterion_updateOutput)( if (cur_target != ignore_index) { THAssert(cur_target >= 0 && cur_target < n_classes); - real cur_weight = weights ? weights_data[cur_target] : 1.0f; + scalar_t cur_weight = weights ? weights_data[cur_target] : 1.0f; total_weight_data[0] += cur_weight; output_data[0] -= input_data[i * n_target + cur_target] * cur_weight; } @@ -155,13 +155,13 @@ void THNN_(ClassNLLCriterion_updateGradInput)( if (cur_target == ignore_index) { continue; } - real weight = weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f; + scalar_t weight = weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f; THTensor_(fastSet2d)(gradInput, i, cur_target, -weight * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, i)); } return; } - real *total_weight_data = total_weight->data(); + scalar_t *total_weight_data = total_weight->data(); if (*total_weight_data <= 0) { return; } @@ -172,10 +172,10 @@ void THNN_(ClassNLLCriterion_updateGradInput)( weights = weights ? THTensor_(newContiguous)(weights) : NULL; THIndex_t *target_data = THIndexTensor_(data)(target); - real *weights_data = weights ? weights->data() : NULL; - real *gradInput_data = gradInput->data(); + scalar_t *weights_data = weights ? weights->data() : NULL; + scalar_t *gradInput_data = gradInput->data(); - real gradOutput_value = THTensor_(get1d)(gradOutput, 0); + scalar_t gradOutput_value = THTensor_(get1d)(gradOutput, 0); if (THTensor_(nDimensionLegacyAll)(input) == 1) { int cur_target = target_data[0] - TH_INDEX_BASE; diff --git a/aten/src/THNN/generic/Col2Im.c b/aten/src/THNN/generic/Col2Im.c index 115bc53cc87f91..ddc512eb8e1721 100644 --- a/aten/src/THNN/generic/Col2Im.c +++ b/aten/src/THNN/generic/Col2Im.c @@ -56,14 +56,14 @@ // // ALSO do vol2col -static void THNN_(im2col)(const real* data_im, const int64_t channels, +static void THNN_(im2col)(const scalar_t* data_im, const int64_t channels, const int64_t height, const int64_t width, const int64_t output_height, const int64_t output_width, const int64_t kernel_h, const int64_t kernel_w, const int64_t pad_h, const int64_t pad_w, const int64_t stride_h, const int64_t stride_w, const int64_t dilation_h, const int64_t dilation_w, - real* data_col) { + scalar_t* data_col) { const int64_t height_col = output_height; const int64_t width_col = output_width; const int64_t channels_col = channels * kernel_h * kernel_w; @@ -83,15 +83,15 @@ static void THNN_(im2col)(const real* data_im, const int64_t channels, } } -static void THNN_(col2im)(const real* data_col, const int64_t channels, +static void THNN_(col2im)(const scalar_t* data_col, const int64_t channels, const int64_t height, const int64_t width, const int64_t output_height, const int64_t output_width, const int64_t kernel_h, const int64_t kernel_w, const int64_t pad_h, const int64_t pad_w, const int64_t stride_h, const int64_t stride_w, const int64_t dilation_h, const int64_t dilation_w, - real* data_im) { - memset(data_im, 0, sizeof(real) * height * width * channels); + scalar_t* data_im) { + memset(data_im, 0, sizeof(scalar_t) * height * width * channels); const int64_t height_col = output_height; const int64_t width_col = output_width; const int64_t channels_col = channels * kernel_h * kernel_w; @@ -199,14 +199,14 @@ void THNN_(Col2Im_updateOutput)( THTensor_(select)(output_n, output, 0, elt); THNN_(col2im)( - input_n->data(), + input_n->data(), nOutputPlane, outputHeight, outputWidth, height_col, width_col, kH, kW, padH, padW, sH, sW, - dH, dW, output_n->data()); + dH, dW, output_n->data()); } c10::raw::intrusive_ptr::decref(input_n); diff --git a/aten/src/THNN/generic/DistKLDivCriterion.c b/aten/src/THNN/generic/DistKLDivCriterion.c index 64dfb345fe7620..b4e3b9ca6f3d65 100644 --- a/aten/src/THNN/generic/DistKLDivCriterion.c +++ b/aten/src/THNN/generic/DistKLDivCriterion.c @@ -13,7 +13,7 @@ void THNN_(DistKLDivCriterion_updateOutput)( if (reduction == Reduction::None) { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, target, real, output, + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, target, scalar_t, output, *output_data = *target_data > 0 ? *target_data * (log(*target_data) - *input_data) : 0; ); return; @@ -21,9 +21,9 @@ void THNN_(DistKLDivCriterion_updateOutput)( THTensor_(resize1d)(output, 1); - real sum = 0; + scalar_t sum = 0; - TH_TENSOR_APPLY2(real, input, real, target, + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, target, sum += *target_data > 0 ? *target_data * (log(*target_data) - *input_data) : 0; ); @@ -46,7 +46,7 @@ void THNN_(DistKLDivCriterion_updateGradInput)( if (reduction == Reduction::None) { THNN_CHECK_SHAPE(input, gradOutput); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, target, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, target, *gradInput_data = *target_data > 0 ? (-*target_data) * *gradOutput_data : 0; ); return; @@ -54,9 +54,9 @@ void THNN_(DistKLDivCriterion_updateGradInput)( THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); - real norm = (reduction == Reduction::ElementwiseMean ? 1./((real)THTensor_(nElement)(input)) : 1.); + scalar_t norm = (reduction == Reduction::ElementwiseMean ? 1./((scalar_t)THTensor_(nElement)(input)) : 1.); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, *gradInput_data = *target_data > 0 ? norm * (-*target_data) * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0) : 0; ); } diff --git a/aten/src/THNN/generic/ELU.c b/aten/src/THNN/generic/ELU.c index 62111ebbf4d7c2..00f211a3806836 100644 --- a/aten/src/THNN/generic/ELU.c +++ b/aten/src/THNN/generic/ELU.c @@ -11,17 +11,17 @@ void THNN_(ELU_updateOutput)( accreal input_scale, bool inplace) { - real negcoef = TH_CONVERT_ACCREAL_TO_REAL(alpha_ * scale); - real poscoef = TH_CONVERT_ACCREAL_TO_REAL(scale * input_scale); - real negiptcoef = TH_CONVERT_ACCREAL_TO_REAL(input_scale); + scalar_t negcoef = TH_CONVERT_ACCREAL_TO_REAL(alpha_ * scale); + scalar_t poscoef = TH_CONVERT_ACCREAL_TO_REAL(scale * input_scale); + scalar_t negiptcoef = TH_CONVERT_ACCREAL_TO_REAL(input_scale); if (inplace) { - TH_TENSOR_APPLY(real, input, + TH_TENSOR_APPLY(scalar_t, input, *input_data = *input_data <= 0 ? (exp(*input_data * negiptcoef)-1) * negcoef : *input_data * poscoef; ); THTensor_(set)(output, input); } else { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, input, real, output, + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, output, *output_data = *input_data <= 0 ? (exp(*input_data * negiptcoef)-1) * negcoef : *input_data * poscoef; ); } @@ -36,12 +36,12 @@ void THNN_(ELU_updateGradInput)( accreal scale, accreal input_scale) { - real negcoef = TH_CONVERT_ACCREAL_TO_REAL(alpha_ * scale); - real poscoef = TH_CONVERT_ACCREAL_TO_REAL(scale * input_scale); - real negiptcoef = TH_CONVERT_ACCREAL_TO_REAL(input_scale); + scalar_t negcoef = TH_CONVERT_ACCREAL_TO_REAL(alpha_ * scale); + scalar_t poscoef = TH_CONVERT_ACCREAL_TO_REAL(scale * input_scale); + scalar_t negiptcoef = TH_CONVERT_ACCREAL_TO_REAL(input_scale); THNN_CHECK_NELEMENT(output, gradOutput); THTensor_(resizeAs)(gradInput, output); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, output, *gradInput_data = *output_data <= 0 ? *gradOutput_data * negiptcoef * (*output_data + negcoef) : *gradOutput_data * poscoef; ); } diff --git a/aten/src/THNN/generic/FeatureLPPooling.c b/aten/src/THNN/generic/FeatureLPPooling.c index bc6453fb4875ac..52f60dc8041fa7 100644 --- a/aten/src/THNN/generic/FeatureLPPooling.c +++ b/aten/src/THNN/generic/FeatureLPPooling.c @@ -215,8 +215,8 @@ THNN_(FeatureLPPooling_updateOutput)( FeatureLPPoolingSizes outputDesc = THNN_(FeatureLPPooling_upcastCPU)(output, batchMode); - real* inputP = input->data(); - real* outputP = output->data(); + scalar_t* inputP = input->data(); + scalar_t* outputP = output->data(); FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i; @@ -309,10 +309,10 @@ THNN_(FeatureLPPooling_updateGradInput)( FeatureLPPoolingSizes gradInputDesc = THNN_(FeatureLPPooling_upcastCPU)(gradInput, batchMode); - real* gradOutputP = gradOutput->data(); - real* gradInputP = gradInput->data(); - real* outputP = output->data(); - real* inputP = input->data(); + scalar_t* gradOutputP = gradOutput->data(); + scalar_t* gradInputP = gradInput->data(); + scalar_t* outputP = output->data(); + scalar_t* inputP = input->data(); FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i; @@ -325,11 +325,11 @@ THNN_(FeatureLPPooling_updateGradInput)( // Load output (f(x_is)). It is possible that this is zero, in // which case we'll ignore this point. - real outputV = + scalar_t outputV = outputP[ flpGetOffset(&outputDesc, batch, outputFeature, opt1, opt2)]; - if (outputV == (real) 0) { + if (outputV == (scalar_t) 0) { continue; } @@ -337,15 +337,15 @@ THNN_(FeatureLPPooling_updateGradInput)( FEATURE_LP_SIZE_TYPE inputFeature = outputFeature * stride + i; THAssert(inputFeature < inputDesc.size[1]); - real gradOutputV = + scalar_t gradOutputV = gradOutputP[ flpGetOffset(&gradOutputDesc, batch, outputFeature, opt1, opt2)]; - real inputV = + scalar_t inputV = inputP[ flpGetOffset(&inputDesc, batch, inputFeature, opt1, opt2)]; // Calculate grad * (x_i / f(x_is))^(p - 1) - real v = gradOutputV * pow(inputV / outputV, power - (accreal) 1); + scalar_t v = gradOutputV * pow(inputV / outputV, power - (accreal) 1); gradInputP[ flpGetOffset(&gradInputDesc, batch, inputFeature, opt1, opt2)] diff --git a/aten/src/THNN/generic/GatedLinearUnit.c b/aten/src/THNN/generic/GatedLinearUnit.c index 9aaa6f66b7f483..068aca25dd6817 100644 --- a/aten/src/THNN/generic/GatedLinearUnit.c +++ b/aten/src/THNN/generic/GatedLinearUnit.c @@ -53,8 +53,8 @@ void THNN_(GatedLinear_updateGradInput)( THTensor_(sigmoid)(gradInputfirstHalf, secondHalf); - TH_TENSOR_APPLY2(real, gradInputsecondHalf, real, gradInputfirstHalf, - real z = *gradInputfirstHalf_data; + TH_TENSOR_APPLY2(scalar_t, gradInputsecondHalf, scalar_t, gradInputfirstHalf, + scalar_t z = *gradInputfirstHalf_data; *gradInputsecondHalf_data = (1. - z) * z; ); diff --git a/aten/src/THNN/generic/HardShrink.c b/aten/src/THNN/generic/HardShrink.c index 18dea95a049118..387655c90603d3 100644 --- a/aten/src/THNN/generic/HardShrink.c +++ b/aten/src/THNN/generic/HardShrink.c @@ -8,10 +8,10 @@ void THNN_(HardShrink_updateOutput)( THTensor *output, accreal lambda_) { - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); + scalar_t lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, output, real, input, + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, if (*input_data > lambda) *output_data = *input_data; else if (*input_data >= -lambda) @@ -28,10 +28,10 @@ void THNN_(HardShrink_updateGradInput)( THTensor *gradInput, accreal lambda_) { - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); + scalar_t lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, if (*input_data >= -lambda && *input_data <= lambda) *gradInput_data = 0; else diff --git a/aten/src/THNN/generic/HardTanh.c b/aten/src/THNN/generic/HardTanh.c index 2cc95fcff2ca8c..e0bd9eb67d1842 100644 --- a/aten/src/THNN/generic/HardTanh.c +++ b/aten/src/THNN/generic/HardTanh.c @@ -10,8 +10,8 @@ void THNN_(HardTanh_updateOutput)( accreal max_val_, bool inplace) { - real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); - real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); + scalar_t min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); + scalar_t max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); if (inplace) THTensor_(set)(output, input); else @@ -21,7 +21,7 @@ void THNN_(HardTanh_updateOutput)( { if (inplace) { - TH_TENSOR_APPLY(real, input, + TH_TENSOR_APPLY(scalar_t, input, if (*input_data < min_val) *input_data = min_val; else if (*input_data > max_val) @@ -30,7 +30,7 @@ void THNN_(HardTanh_updateOutput)( } else { - TH_TENSOR_APPLY2(real, output, real, input, + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, if (*input_data < min_val) *output_data = min_val; else if (*input_data > max_val) @@ -42,8 +42,8 @@ void THNN_(HardTanh_updateOutput)( } else { - real* ptr_input = input->data(); - real* ptr_output = output->data(); + scalar_t* ptr_input = input->data(); + scalar_t* ptr_output = output->data(); ptrdiff_t i; ptrdiff_t n = THTensor_(nElement)(input); @@ -79,8 +79,8 @@ void THNN_(HardTanh_updateGradInput)( accreal max_val_, bool inplace) { - real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); - real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); + scalar_t min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); + scalar_t max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); THNN_CHECK_NELEMENT(input, gradOutput); if (inplace) @@ -95,13 +95,13 @@ void THNN_(HardTanh_updateGradInput)( { if (inplace) { - TH_TENSOR_APPLY2(real, gradOutput, real, input, + TH_TENSOR_APPLY2(scalar_t, gradOutput, scalar_t, input, if (*input_data <= min_val || *input_data >= max_val) *gradOutput_data = 0; ); } else - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, if (*input_data <= min_val || *input_data >= max_val) *gradInput_data = 0; else @@ -110,9 +110,9 @@ void THNN_(HardTanh_updateGradInput)( } else { - real* ptr_gradOutput = gradOutput->data(); - real* ptr_gradInput = gradInput->data(); - real* ptr_input = input->data(); + scalar_t* ptr_gradOutput = gradOutput->data(); + scalar_t* ptr_gradInput = gradInput->data(); + scalar_t* ptr_input = input->data(); ptrdiff_t i; ptrdiff_t n = THTensor_(nElement)(input); diff --git a/aten/src/THNN/generic/Im2Col.c b/aten/src/THNN/generic/Im2Col.c index 9723716e54d815..b788254070568b 100644 --- a/aten/src/THNN/generic/Im2Col.c +++ b/aten/src/THNN/generic/Im2Col.c @@ -83,12 +83,12 @@ void THNN_(Im2Col_updateOutput)( THTensor_(select)(output_n, output, 0, elt); THNN_(im2col)( - input_n->data(), + input_n->data(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, sH, sW, - dH, dW, output_n->data()); + dH, dW, output_n->data()); } c10::raw::intrusive_ptr::decref(input_n); diff --git a/aten/src/THNN/generic/IndexLinear.c b/aten/src/THNN/generic/IndexLinear.c index 2cd9aa343deadc..ed708bf5a235cd 100644 --- a/aten/src/THNN/generic/IndexLinear.c +++ b/aten/src/THNN/generic/IndexLinear.c @@ -51,22 +51,22 @@ void THNN_(IndexLinear_updateOutput)( int64_t* cumSumSizesData = THLongTensor_data(cumSumSizes); /* Define/resize the normalized values tensor if maxNormalize is > 0 */ - real* normalizedValuesData = NULL; + scalar_t* normalizedValuesData = NULL; if (maxNormalize) { THTensor_(resize1d)(normalizedValues, keysSize); - normalizedValuesData = normalizedValues->data(); + normalizedValuesData = normalizedValues->data(); } /* Resize the output */ THTensor_(resize2d)(output, batchSize, outDim); /* Access the storage data/strides */ - real* outputData = output->data(); - real* valuesData = values->data(); - real* weightData = weight->data(); + scalar_t* outputData = output->data(); + scalar_t* valuesData = values->data(); + scalar_t* weightData = weight->data(); int64_t weightStride0 = weight->stride(0); - real* biasData = bias->data(); + scalar_t* biasData = bias->data(); int64_t* keysData = THLongTensor_data(keys); /* Make sure these inputs are contiguous to accelerate computations */ @@ -97,9 +97,9 @@ void THNN_(IndexLinear_updateOutput)( if(keysSize*outDim > THNN_SPARSE_OMP_THRESHOLD && batchSize > 1) for (j = 0; j < batchSize; j++) { - real* loutputData = outputData + j; - real val = 0; - real absVal = 0; + scalar_t* loutputData = outputData + j; + scalar_t val = 0; + scalar_t absVal = 0; int64_t offset = j == 0 ? 0 : cumSumSizesData[j - 1]; for (i = 0; i < sizesData[j]; i++) @@ -143,8 +143,8 @@ void THNN_(IndexLinear_updateOutput)( for (j = 0; j < batchSize; j++) { int64_t offset = j == 0 ? 0 : cumSumSizesData[j - 1]; - real* loutputData = outputData + j; - real val = 0; + scalar_t* loutputData = outputData + j; + scalar_t val = 0; for (i = 0; i < sizesData[j]; i++) { @@ -167,17 +167,17 @@ void THNN_(IndexLinear_updateOutput)( for (j = 0; j < batchSize; j++) { int64_t offset = j == 0 ? 0 : cumSumSizesData[j - 1]; - real val; - real* loutputData = outputData + j*outDim; - real* lweightData = weightData; - memcpy(loutputData, biasData, outDim*sizeof(real)); + scalar_t val; + scalar_t* loutputData = outputData + j*outDim; + scalar_t* lweightData = weightData; + memcpy(loutputData, biasData, outDim*sizeof(scalar_t)); for (i = 0; i < sizesData[j]; i++) { int64_t woffset = weightStride0*(keysData[offset] + keysOffset); if (maxNormalize) { val = valuesData[offset]; - real absVal = fabs(val); + scalar_t absVal = fabs(val); if (train) { if (absVal > weightData[woffset]) @@ -193,10 +193,10 @@ void THNN_(IndexLinear_updateOutput)( * *``` * weightData[woffset+2] = weightData[woffset+2]==0?1:(weightData[woffset+2] / (weightData[woffset+2] + 1)); - * real alpha = 1; - * real beta = 0.01; - * real gamma = 1 - 0.000001; - * real l = weightData[woffset+2]==0?1/gamma:(weightData[woffset+2] - beta) / (alpha - beta); + * scalar_t alpha = 1; + * scalar_t beta = 0.01; + * scalar_t gamma = 1 - 0.000001; + * scalar_t l = weightData[woffset+2]==0?1/gamma:(weightData[woffset+2] - beta) / (alpha - beta); * l = gamma*l; * weightData[woffset+2] = (alpha-beta)*l + beta; * ``` @@ -247,8 +247,8 @@ void THNN_(IndexLinear_updateParameters)( accreal weightDecay_, accreal learningRate_) { - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); + scalar_t weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); + scalar_t learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); /* Retrieve all the dimensions of the problem */ int64_t outDim = THTensor_(size)(bias, 0); int64_t woutDim = THTensor_(size)(weight, 1); @@ -256,11 +256,11 @@ void THNN_(IndexLinear_updateParameters)( int64_t keysSize = THLongTensor_size(runningKeys, 0); /* Access the storage data/strides */ - real* gradWeightData = gradWeight->data(); - real* weightData = weight->data(); + scalar_t* gradWeightData = gradWeight->data(); + scalar_t* weightData = weight->data(); int64_t weightStride0 = weight->stride(0); - real* gradBiasData = gradBias->data(); - real* biasData = bias->data(); + scalar_t* gradBiasData = gradBias->data(); + scalar_t* biasData = bias->data(); int64_t* keysData = THLongTensor_data(runningKeys); /* Make sure these inputs are contiguous to accelerate computations */ @@ -288,7 +288,7 @@ void THNN_(IndexLinear_updateParameters)( for (j = 0; j < keysSize; j++) { int64_t woffset = weightStride0*(keysData[j] + keysOffset) + maxNormalize; - real lr = learningRate*weightData[woffset-2]; + scalar_t lr = learningRate*weightData[woffset-2]; weightData[woffset-1] -= weightData[woffset]*gradWeightData[2*j]*lr; weightData[woffset] -= gradWeightData[2*j+1]*lr - weightDecay * weightData[woffset-2] * weightData[woffset]; } @@ -298,7 +298,7 @@ void THNN_(IndexLinear_updateParameters)( for (j = 0; j < keysSize; j++) { int64_t woffset = weightStride0*(keysData[j] + keysOffset) + maxNormalize; - real lr = learningRate*weightData[woffset-2]; + scalar_t lr = learningRate*weightData[woffset-2]; weightData[woffset-1] -= weightData[woffset]*gradWeightData[2*j]*lr; weightData[woffset] -= gradWeightData[2*j+1]*lr; } @@ -327,11 +327,11 @@ void THNN_(IndexLinear_updateParameters)( { for (j = 0; j < keysSize; j++) { - real lr = learningRate; - real wd = weightDecay; - real* lweightData; + scalar_t lr = learningRate; + scalar_t wd = weightDecay; + scalar_t* lweightData; int64_t woffset = weightStride0*(keysData[j] + keysOffset); - real* lgradWeightData = gradWeightData + j*outDim; + scalar_t* lgradWeightData = gradWeightData + j*outDim; if (maxNormalize) { lgradWeightData += j*outDim; @@ -392,8 +392,8 @@ void THNN_(IndexLinear_accUpdateGradParameters)( accreal weightDecay_, accreal scale_) { - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); /* Retrieve all the dimensions of the problem */ int64_t batchSize = THLongTensor_size(sizes, 0); int64_t outDim = THTensor_(size)(bias, 0); @@ -402,10 +402,10 @@ void THNN_(IndexLinear_accUpdateGradParameters)( THArgCheck(THNN_(checkKeysValues)(keys, values), 1, "Keys and values should have the same number of elements"); /* Access the storage data/strides */ - real* gradOutputData = gradOutput->data(); - real* valuesData =values->data(); - real* weightData = weight->data(); - real* biasData = bias->data(); + scalar_t* gradOutputData = gradOutput->data(); + scalar_t* valuesData =values->data(); + scalar_t* weightData = weight->data(); + scalar_t* biasData = bias->data(); int64_t weightStride0 = weight->stride(0); int64_t* keysData = THLongTensor_data(keys); int64_t* sizesData = THLongTensor_data(sizes); @@ -430,9 +430,9 @@ void THNN_(IndexLinear_accUpdateGradParameters)( int64_t offset = 0; for (j = 0; j < batchSize; j++) { - real* lgradOutputData = gradOutputData + j; + scalar_t* lgradOutputData = gradOutputData + j; *biasData -= *lgradOutputData * scale; - real val = *lgradOutputData * scale; + scalar_t val = *lgradOutputData * scale; for (i = 0; i < sizesData[j]; i++) { int64_t idx = weightStride0*(keysData[offset] + keysOffset) + maxNormalize; @@ -460,9 +460,9 @@ void THNN_(IndexLinear_accUpdateGradParameters)( int64_t offset = 0; for (j = 0; j < batchSize; j++) { - real* lgradOutputData = gradOutputData + j; + scalar_t* lgradOutputData = gradOutputData + j; *biasData -= *lgradOutputData * scale; - real val = *lgradOutputData * scale; + scalar_t val = *lgradOutputData * scale; for (i = 0; i < sizesData[j]; i++) { int64_t idx = weightStride0*(keysData[offset] + keysOffset); @@ -476,7 +476,7 @@ void THNN_(IndexLinear_accUpdateGradParameters)( int64_t offset = 0; for (j = 0; j < batchSize; j++) { - real val = gradOutputData[j] * scale; + scalar_t val = gradOutputData[j] * scale; for (i = 0; i < sizesData[j]; i++) { weightData[(keysData[offset] + keysOffset)*weightStride0] -= val * valuesData[offset]; @@ -491,13 +491,13 @@ void THNN_(IndexLinear_accUpdateGradParameters)( int64_t offset = 0; for (j = 0; j < batchSize; j++) { - real* lgradOutputData = gradOutputData + j*outDim; - real* lweightData = weightData; + scalar_t* lgradOutputData = gradOutputData + j*outDim; + scalar_t* lweightData = weightData; THVector_(cadd)(biasData, biasData, lgradOutputData, -scale, outDim); for (i = 0; i < sizesData[j]; i++) { - real val = valuesData[offset] * scale; - real wd = weightDecay; + scalar_t val = valuesData[offset] * scale; + scalar_t wd = weightDecay; // Max normalize case if (maxNormalize) @@ -561,11 +561,11 @@ void THNN_(IndexLinear_accUpdateGradParameters)( offset = 0; for (j = 0; j < batchSize; j++) { - real* lweightData = weightData; + scalar_t* lweightData = weightData; for (i = 0; i < sizesData[j]; i++) { - real val = valuesData[offset] * scale; - real wd = weightDecay; + scalar_t val = valuesData[offset] * scale; + scalar_t wd = weightDecay; lweightData = weightData + weightStride0*(keysData[offset] + keysOffset) + (maxNormalize-2); lweightData[0] = 0; @@ -594,7 +594,7 @@ void THNN_(IndexLinear_accGradParameters)( accreal weightDecay_, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); /* Retrieve all the dimensions of the problem */ int64_t batchSize = THLongTensor_size(sizes, 0); int64_t keysSize = THLongTensor_size(keys, 0); @@ -614,10 +614,10 @@ void THNN_(IndexLinear_accGradParameters)( THTensor_(resize2d)(gradWeight, keysSize, outDim * (maxNormalize>0?2:1)); /* Access the storage data/strides */ - real* gradOutputData = gradOutput->data(); - real* valuesData =values->data(); - real* gradWeightData = gradWeight->data(); - real* gradBiasData = gradBias->data(); + scalar_t* gradOutputData = gradOutput->data(); + scalar_t* valuesData =values->data(); + scalar_t* gradWeightData = gradWeight->data(); + scalar_t* gradBiasData = gradBias->data(); /* Make sure these inputs are contiguous to accelerate computations */ THArgCheck(THLongTensor_isContiguous(keys), 1, "keys vector must be contiguous"); @@ -640,9 +640,9 @@ void THNN_(IndexLinear_accGradParameters)( for (j = 0; j < batchSize; j++) { int64_t offset = j==0?0:cumSizesData[j-1]; - real val = gradOutputData[j] * scale; - real* lgradWeightData = gradWeightData + offset; - real* lvaluesData = valuesData + offset; + scalar_t val = gradOutputData[j] * scale; + scalar_t* lgradWeightData = gradWeightData + offset; + scalar_t* lvaluesData = valuesData + offset; int64_t end = sizesData[j]; if (maxNormalize) @@ -679,12 +679,12 @@ void THNN_(IndexLinear_accGradParameters)( for (j = 0; j < batchSize; j++) { int64_t offset = j==0?0:cumSizesData[j-1]; - real* lgradOutputData = gradOutputData + j*outDim; - real* lgradWeightData = gradWeightData; + scalar_t* lgradOutputData = gradOutputData + j*outDim; + scalar_t* lgradWeightData = gradWeightData; THVector_(cadd)(gradBiasData, gradBiasData, lgradOutputData, scale, outDim); for (i = 0; i < sizesData[j]; i++) { - real val = valuesData[offset] * scale; + scalar_t val = valuesData[offset] * scale; lgradWeightData = gradWeightData + offset*outDim; if (maxNormalize) { diff --git a/aten/src/THNN/generic/L1Cost.c b/aten/src/THNN/generic/L1Cost.c index 8f5eb174f9103d..5bec8df713aaa9 100644 --- a/aten/src/THNN/generic/L1Cost.c +++ b/aten/src/THNN/generic/L1Cost.c @@ -10,7 +10,7 @@ void THNN_(L1Cost_updateOutput)( THNN_CHECK_DIM_SIZE(output, 1, 0, 1); accreal sum = 0; - TH_TENSOR_APPLY(real, input, + TH_TENSOR_APPLY(scalar_t, input, sum += fabs(*input_data); ); @@ -25,7 +25,7 @@ void THNN_(L1Cost_updateGradInput)( { THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY2(real, gradInput, real, input, + TH_TENSOR_APPLY2(scalar_t, gradInput, scalar_t, input, if (*input_data > 0) *gradInput_data = 1; else if (*input_data < 0) diff --git a/aten/src/THNN/generic/LeakyReLU.c b/aten/src/THNN/generic/LeakyReLU.c index abca9fbc8bba09..3818aaad4cad3f 100644 --- a/aten/src/THNN/generic/LeakyReLU.c +++ b/aten/src/THNN/generic/LeakyReLU.c @@ -9,10 +9,10 @@ void THNN_(LeakyReLU_updateOutput)( accreal negval_, bool inplace) { - real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_); + scalar_t negval = TH_CONVERT_ACCREAL_TO_REAL(negval_); if (inplace) { - TH_TENSOR_APPLY(real, input, + TH_TENSOR_APPLY(scalar_t, input, if (*input_data <= 0) *input_data *= negval; ); @@ -21,8 +21,8 @@ void THNN_(LeakyReLU_updateOutput)( else { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, output, real, input, - const real r = (*input_data > 0) ? 1 : negval; + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, + const scalar_t r = (*input_data > 0) ? 1 : negval; *output_data = *input_data * r; ); } @@ -36,11 +36,11 @@ void THNN_(LeakyReLU_updateGradInput)( accreal negval_, bool inplace) { - real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_); + scalar_t negval = TH_CONVERT_ACCREAL_TO_REAL(negval_); THNN_CHECK_NELEMENT(input, gradOutput); if (inplace) { - TH_TENSOR_APPLY2(real, gradOutput, real, input, + TH_TENSOR_APPLY2(scalar_t, gradOutput, scalar_t, input, if (*input_data <= 0) *gradOutput_data *= negval; ); @@ -49,7 +49,7 @@ void THNN_(LeakyReLU_updateGradInput)( else { THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, *gradInput_data = *input_data > 0 ? *gradOutput_data : *gradOutput_data * negval; ); } diff --git a/aten/src/THNN/generic/Linear.c b/aten/src/THNN/generic/Linear.c index ef74077aeae1c1..419cd6f9007c53 100644 --- a/aten/src/THNN/generic/Linear.c +++ b/aten/src/THNN/generic/Linear.c @@ -91,7 +91,7 @@ void THNN_(Linear_accGradParameters)( THTensor *addBuffer, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int64_t dim = THTensor_(nDimensionLegacyAll)(input); if (dim == 1) { THTensor_(addr)(gradWeight,1,gradWeight,scale,gradOutput,input); diff --git a/aten/src/THNN/generic/LogSigmoid.c b/aten/src/THNN/generic/LogSigmoid.c index 556af4f5a75af4..14ca6dc3ea87b9 100644 --- a/aten/src/THNN/generic/LogSigmoid.c +++ b/aten/src/THNN/generic/LogSigmoid.c @@ -11,9 +11,9 @@ void THNN_(LogSigmoid_updateOutput)( THTensor_(resizeAs)(output, input); THTensor_(resizeAs)(buffer, input); //Use the LogSumExp trick to make this stable against overflow - TH_TENSOR_APPLY3(real, output, real, input, real, buffer, - real max_elem = fmax(0, -*input_data); - real z = exp(-max_elem) + exp(-*input_data - max_elem); + TH_TENSOR_APPLY3(scalar_t, output, scalar_t, input, scalar_t, buffer, + scalar_t max_elem = fmax(0, -*input_data); + scalar_t z = exp(-max_elem) + exp(-*input_data - max_elem); *buffer_data = z; *output_data = -(max_elem + log(z)); ); @@ -35,10 +35,10 @@ void THNN_(LogSigmoid_updateGradInput)( * -max_deriv - (z-1)/z if x is >= 0 or * -max_deriv + (z-1)/z if x is < 0 */ - TH_TENSOR_APPLY3(real, input, real, gradInput, real, buffer, - real z = *buffer_data; - real max_deriv = 0.0; - real sign = -1.0; + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, gradInput, scalar_t, buffer, + scalar_t z = *buffer_data; + scalar_t max_deriv = 0.0; + scalar_t sign = -1.0; if (*input_data < 0){ max_deriv = -1.0; sign = 1.0; diff --git a/aten/src/THNN/generic/LookupTable.c b/aten/src/THNN/generic/LookupTable.c index cf6325b5370e65..454c44993799bf 100644 --- a/aten/src/THNN/generic/LookupTable.c +++ b/aten/src/THNN/generic/LookupTable.c @@ -34,7 +34,7 @@ void THNN_(LookupTable_accGradParameters)( int paddingValue, accreal ascale) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(ascale); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(ascale); ptrdiff_t i; THInteger_t *count_data = NULL; @@ -67,8 +67,8 @@ void THNN_(LookupTable_accGradParameters)( gradOutput = THTensor_(newContiguous)(gradOutput); - real *gw = gradWeight->data(); - real *go = gradOutput->data(); + scalar_t *gw = gradWeight->data(); + scalar_t *go = gradOutput->data(); int64_t stride = THTensor_(stride)(gradWeight, 0); if (count_data) @@ -95,7 +95,7 @@ void THNN_(LookupTable_accGradParameters)( int64_t k = input_data[i] - TH_INDEX_BASE; if (k >= start && k < end) { - real scale_ = scale; + scalar_t scale_ = scale; if (count_data) scale_ /= count_data[k]; THBlas_(axpy)(stride, scale_, go + i*stride, 1, gw + k*stride, 1); } @@ -113,7 +113,7 @@ void THNN_(LookupTable_accGradParameters)( if (input_data[i] != paddingValue) { int64_t k = input_data[i] - TH_INDEX_BASE; - real scale_ = scale; + scalar_t scale_ = scale; if (count_data) scale_ /= count_data[k]; THBlas_(axpy)(stride, scale_, go + i*stride, 1, gw + k*stride, 1); } @@ -127,13 +127,13 @@ void THNN_(LookupTable_accGradParameters)( */ static void THNN_(LookupTable_renormRow)( - real *row_data, + scalar_t *row_data, int64_t stride, - real maxNorm, - real normType) + scalar_t maxNorm, + scalar_t normType) { - real norm = 0; - real new_norm; + scalar_t norm = 0; + scalar_t new_norm; int64_t j; for (j=0; jdata(); + scalar_t *gw = weight->data(); for (i=0; i= numw + TH_INDEX_BASE) { THError("input need to be in the range %ld <= input < %ld, " diff --git a/aten/src/THNN/generic/MSECriterion.c b/aten/src/THNN/generic/MSECriterion.c index b7c6e07d0d0398..f8612b0777b615 100644 --- a/aten/src/THNN/generic/MSECriterion.c +++ b/aten/src/THNN/generic/MSECriterion.c @@ -16,7 +16,7 @@ void THNN_(MSECriterion_updateOutput)( accreal sum = 0; - TH_TENSOR_APPLY2(real, input, real, target, + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, target, accreal z = (*input_data - *target_data); sum += z*z; ); @@ -24,13 +24,13 @@ void THNN_(MSECriterion_updateOutput)( if (reduction == Reduction::ElementwiseMean) sum /= THTensor_(nElement)(input); - THTensor_(set1d)(output, 0, (real)sum); + THTensor_(set1d)(output, 0, (scalar_t)sum); return; } THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, target, real, output, - real z = (*input_data - *target_data); + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, target, scalar_t, output, + scalar_t z = (*input_data - *target_data); *output_data = z*z; ); } @@ -48,19 +48,19 @@ void THNN_(MSECriterion_updateGradInput)( if (reduction != Reduction::None) { THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); - real norm = reduction == Reduction::ElementwiseMean ? 2./((real)THTensor_(nElement)(input)) : 2.; + scalar_t norm = reduction == Reduction::ElementwiseMean ? 2./((scalar_t)THTensor_(nElement)(input)) : 2.; norm *= THTensor_(get1d)(gradOutput, 0); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, *gradInput_data = norm * (*input_data - *target_data); ); return; } THNN_CHECK_SHAPE(input, gradOutput); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, *gradInput_data = 2. * (*input_data - *target_data); ); - TH_TENSOR_APPLY2(real, gradInput, real, gradOutput, + TH_TENSOR_APPLY2(scalar_t, gradInput, scalar_t, gradOutput, *gradInput_data *= *gradOutput_data; ); } diff --git a/aten/src/THNN/generic/MarginCriterion.c b/aten/src/THNN/generic/MarginCriterion.c index d6d9b60b9973a6..46838cb60f655a 100644 --- a/aten/src/THNN/generic/MarginCriterion.c +++ b/aten/src/THNN/generic/MarginCriterion.c @@ -10,13 +10,13 @@ void THNN_(MarginCriterion_updateOutput)( bool sizeAverage, accreal margin_) { - real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); + scalar_t margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); THNN_CHECK_NELEMENT(input, target); THNN_CHECK_DIM_SIZE(output, 1, 0, 1); - real sum = 0; + scalar_t sum = 0; - TH_TENSOR_APPLY2(real, input, real, target, - real z = (margin - *input_data * *target_data); + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, target, + scalar_t z = (margin - *input_data * *target_data); sum += z>0 ? z : 0; ); @@ -34,12 +34,12 @@ void THNN_(MarginCriterion_updateGradInput)( bool sizeAverage, accreal margin_) { - real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); + scalar_t margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); THNN_CHECK_NELEMENT(input, target); - real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.); + scalar_t norm = (sizeAverage ? 1./((scalar_t)THTensor_(nElement)(input)) : 1.); THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, *gradInput_data = (*input_data * *target_data) < margin ? -norm * *target_data : 0; ); } diff --git a/aten/src/THNN/generic/MultiLabelMarginCriterion.c b/aten/src/THNN/generic/MultiLabelMarginCriterion.c index db60397337cd76..36d953889a3f01 100644 --- a/aten/src/THNN/generic/MultiLabelMarginCriterion.c +++ b/aten/src/THNN/generic/MultiLabelMarginCriterion.c @@ -11,11 +11,11 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( THTensor *isTarget, int64_t reduction) { - real *input_data, *isTarget_data; + scalar_t *input_data, *isTarget_data; THIndex_t *target_data; int64_t nframe, dim; int64_t t, d, dt, ddt; - real sum; + scalar_t sum; AT_CHECK(!input->is_empty() && input->dim() <= 2, "non-empty vector or matrix expected, got size: ", input->sizes()); @@ -40,14 +40,14 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( target = THIndexTensor_(newContiguous)(target); input = THTensor_(newContiguous)(input); - input_data = input->data(); + input_data = input->data(); target_data = THIndexTensor_(data)(target); if (!isTarget->sizes().equals(target->sizes())) { THTensor_(resizeNd)(isTarget, target->dim(), THTensor_getSizePtr(target), nullptr); } THTensor_(zero)(isTarget); - isTarget_data = isTarget->data(); + isTarget_data = isTarget->data(); if (reduction != Reduction::None) { @@ -66,7 +66,7 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( for (dt = 0; dt < dim; dt++) { THIndex_t target_idx = target_data[dt] - TH_INDEX_BASE; - real input_target; + scalar_t input_target; if (target_idx < 0) break; @@ -75,7 +75,7 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( { if (!isTarget_data[d]) { - real z = 1 - input_target + input_data[d]; + scalar_t z = 1 - input_target + input_data[d]; if (z > 0) sum += z; } @@ -112,7 +112,7 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( for (dt = 0; dt < dim; dt++) { THIndex_t target_idx = target_data[dt] - TH_INDEX_BASE; - real input_target; + scalar_t input_target; if (target_idx < 0) break; @@ -121,7 +121,7 @@ void THNN_(MultiLabelMarginCriterion_updateOutput)( { if (!isTarget_data[d]) { - real z = 1 - input_target + input_data[d]; + scalar_t z = 1 - input_target + input_data[d]; if (z > 0) sum += z; } @@ -149,13 +149,13 @@ void THNN_(MultiLabelMarginCriterion_updateGradInput)( THTensor *isTarget, int64_t reduction) { - real *input_data; - real *gradInput_data; + scalar_t *input_data; + scalar_t *gradInput_data; THIndex_t *target_data; - real *isTarget_data; + scalar_t *isTarget_data; int64_t nframe, dim; int64_t t, d, dt; - real g; + scalar_t g; AT_CHECK(!input->is_empty() && input->dim() <= 2, "vector or matrix expected, got size: ", input->sizes()); @@ -188,23 +188,23 @@ void THNN_(MultiLabelMarginCriterion_updateGradInput)( target = THIndexTensor_(newContiguous)(target); input = THTensor_(newContiguous)(input); isTarget = THTensor_(newContiguous)(isTarget); - input_data = input->data(); + input_data = input->data(); target_data = THIndexTensor_(data)(target); - isTarget_data = isTarget->data(); + isTarget_data = isTarget->data(); THTensor_(resizeAs)(gradInput, input); gradInput = THTensor_(newContiguous)(gradInput); THTensor_(zero)(gradInput); - gradInput_data = gradInput->data(); + gradInput_data = gradInput->data(); - g = reduction == Reduction::ElementwiseMean ? (1./((real)(nframe*dim))) : (1./((real)dim)); + g = reduction == Reduction::ElementwiseMean ? (1./((scalar_t)(nframe*dim))) : (1./((scalar_t)dim)); for (t = 0; t < nframe; t++) { for (dt = 0; dt < dim; dt++) { THIndex_t target_idx = target_data[dt] - TH_INDEX_BASE; - real input_target; + scalar_t input_target; if (target_idx < 0) break; @@ -213,7 +213,7 @@ void THNN_(MultiLabelMarginCriterion_updateGradInput)( { if (!isTarget_data[d]) { - real z = 1 - input_target + input_data[d]; + scalar_t z = 1 - input_target + input_data[d]; if (z > 0) { gradInput_data[target_idx] -= g; @@ -227,7 +227,7 @@ void THNN_(MultiLabelMarginCriterion_updateGradInput)( isTarget_data += dim; gradInput_data += dim; } - gradInput_data = gradInput->data(); + gradInput_data = gradInput->data(); if (reduction != Reduction::None) { diff --git a/aten/src/THNN/generic/MultiMarginCriterion.c b/aten/src/THNN/generic/MultiMarginCriterion.c index 088bba9d0708ad..940ce69320e29f 100644 --- a/aten/src/THNN/generic/MultiMarginCriterion.c +++ b/aten/src/THNN/generic/MultiMarginCriterion.c @@ -13,12 +13,12 @@ void THNN_(MultiMarginCriterion_updateOutput)( THTensor *weights, accreal margin_) { - real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); - real *input_data, *weights_data; + scalar_t margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); + scalar_t *input_data, *weights_data; THIndex_t *target_data; int64_t nframe, dim; int64_t t, d; - real sum; + scalar_t sum; AT_CHECK(!input->is_empty() && input->dim() <= 2, "non-empty vector or matrix expected, got size: ", input->sizes()); @@ -46,9 +46,9 @@ void THNN_(MultiMarginCriterion_updateOutput)( input = THTensor_(newContiguous)(input); target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; - input_data = input->data(); + input_data = input->data(); target_data = THIndexTensor_(data)(target); - weights_data = weights ? weights->data() : NULL; + weights_data = weights ? weights->data() : NULL; if (reduction == Reduction::None) { @@ -58,15 +58,15 @@ void THNN_(MultiMarginCriterion_updateOutput)( { sum = 0; THIndex_t target_idx = target_data[t] - TH_INDEX_BASE; - real input_target = input_data[target_idx]; + scalar_t input_target = input_data[target_idx]; for (d = 0; d < dim; d++) { - real z = margin - input_target + input_data[d]; + scalar_t z = margin - input_target + input_data[d]; if (d == target_idx) continue; if (z > 0) { - real h = (p==1) ? z : z*z; + scalar_t h = (p==1) ? z : z*z; if(weights_data) h *= weights_data[target_idx]; sum += h; @@ -86,15 +86,15 @@ void THNN_(MultiMarginCriterion_updateOutput)( for (t = 0; t < nframe; t++) { THIndex_t target_idx = target_data[t] - TH_INDEX_BASE; - real input_target = input_data[target_idx]; + scalar_t input_target = input_data[target_idx]; for (d = 0; d < dim; d++) { - real z = margin - input_target + input_data[d]; + scalar_t z = margin - input_target + input_data[d]; if (d == target_idx) continue; if (z > 0) { - real h = (p==1) ? z : z*z; + scalar_t h = (p==1) ? z : z*z; if(weights_data) h *= weights_data[target_idx]; sum += h; @@ -127,14 +127,14 @@ void THNN_(MultiMarginCriterion_updateGradInput)( THTensor *weights, accreal margin_) { - real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); - real *input_data; - real *gradInput_data; + scalar_t margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); + scalar_t *input_data; + scalar_t *gradInput_data; THIndex_t *target_data; - real *weights_data; + scalar_t *weights_data; int64_t nframe, dim; int64_t t, d; - real g; + scalar_t g; AT_CHECK(!input->is_empty() && (input->dim() <= 2), "non-empty vector or matrix expected, got size: ", input->sizes()); @@ -152,34 +152,34 @@ void THNN_(MultiMarginCriterion_updateGradInput)( "inconsistent target size, got: ", target->sizes()); } - g = (reduction == Reduction::ElementwiseMean ? 1./((real)(nframe*dim)) : 1./((real)dim)); + g = (reduction == Reduction::ElementwiseMean ? 1./((scalar_t)(nframe*dim)) : 1./((scalar_t)dim)); input = THTensor_(newContiguous)(input); target = THIndexTensor_(newContiguous)(target); - input_data = input->data(); + input_data = input->data(); THTensor_(resizeAs)(gradInput, input); THArgCheck(THTensor_(isContiguous)(gradInput), 5, "gradInput must be contiguous"); - gradInput_data = gradInput->data(); + gradInput_data = gradInput->data(); target_data = THIndexTensor_(data)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; - weights_data = weights ? weights->data() : NULL; + weights_data = weights ? weights->data() : NULL; for (t = 0; t < nframe; t++) { THIndex_t target_idx = target_data[t] - TH_INDEX_BASE; - real input_target = input_data[target_idx]; - real gradInput_target = 0; + scalar_t input_target = input_data[target_idx]; + scalar_t gradInput_target = 0; for (d = 0; d < dim; d++) { - real z = margin - input_target + input_data[d]; + scalar_t z = margin - input_target + input_data[d]; if (d == target_idx) continue; if (z > 0) { - real h = (p == 1) ? g : 2*g*z; + scalar_t h = (p == 1) ? g : 2*g*z; if(weights_data) h *= weights_data[target_idx]; gradInput_target -= h; @@ -193,7 +193,7 @@ void THNN_(MultiMarginCriterion_updateGradInput)( input_data += dim; gradInput_data += dim; } - gradInput_data = gradInput->data(); + gradInput_data = gradInput->data(); if (reduction != Reduction::None) { diff --git a/aten/src/THNN/generic/PReLU.c b/aten/src/THNN/generic/PReLU.c index 7a47008bf862df..6bb3687d4c87d6 100644 --- a/aten/src/THNN/generic/PReLU.c +++ b/aten/src/THNN/generic/PReLU.c @@ -14,9 +14,9 @@ void THNN_(PReLU_updateOutput)( if (nOutputPlane == 1) { // handle shared parameter case - real w = *weight->data(); - TH_TENSOR_APPLY2(real, output, real, input, - const real r = (*input_data > 0) ? 1 : w; + scalar_t w = *weight->data(); + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, + const scalar_t r = (*input_data > 0) ? 1 : w; *output_data = *input_data * r; ); return; @@ -37,15 +37,15 @@ void THNN_(PReLU_updateOutput)( } } - real *output_data = output->data(); - real *input_data = input->data(); - real *weight_data = weight->data(); + scalar_t *output_data = output->data(); + scalar_t *input_data = input->data(); + scalar_t *weight_data = weight->data(); THIndex_t i, j, k; #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { - real* n_input_data = input_data + i*nOutputPlane*ks; - real* n_output_data = output_data + i*nOutputPlane*ks; + scalar_t* n_input_data = input_data + i*nOutputPlane*ks; + scalar_t* n_output_data = output_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { for (k = 0; k < ks; ++k) @@ -70,8 +70,8 @@ void THNN_(PReLU_updateGradInput)( if (nOutputPlane == 1) { - real w = weight->data()[0]; - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + scalar_t w = weight->data()[0]; + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, if ((*input_data) > 0) *gradInput_data = *gradOutput_data; else @@ -83,10 +83,10 @@ void THNN_(PReLU_updateGradInput)( input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THTensor_(newContiguous)(weight); - const real *input_data = input->data(); - const real *gradOutput_data = gradOutput->data(); - const real *weight_data = weight->data(); - real *gradInput_data = gradInput->data(); + const scalar_t *input_data = input->data(); + const scalar_t *gradOutput_data = gradOutput->data(); + const scalar_t *weight_data = weight->data(); + scalar_t *gradInput_data = gradInput->data(); int64_t bs = 1, ks = 1; { @@ -106,13 +106,13 @@ void THNN_(PReLU_updateGradInput)( #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { - const real *n_input_data = input_data + i*nOutputPlane*ks; - const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; - real *n_gradInput_data = gradInput_data + i*nOutputPlane*ks; + const scalar_t *n_input_data = input_data + i*nOutputPlane*ks; + const scalar_t *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; + scalar_t *n_gradInput_data = gradInput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { - real w = weight_data[j]; + scalar_t w = weight_data[j]; for (k = 0; k < ks; ++k) { if (n_input_data[k] > 0) @@ -139,15 +139,15 @@ void THNN_(PReLU_accGradParameters)( THTensor *gradWeight, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_CHECK_NELEMENT(input, gradOutput); int64_t nOutputPlane = THTensor_(numel)(weight); if (nOutputPlane == 1) { - real *gradWeight_data = gradWeight->data(); - real sum = 0; - TH_TENSOR_APPLY2(real, input, real, gradOutput, + scalar_t *gradWeight_data = gradWeight->data(); + scalar_t sum = 0; + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, gradOutput, if ((*input_data) <= 0) sum += (*input_data) * (*gradOutput_data); ); @@ -173,19 +173,19 @@ void THNN_(PReLU_accGradParameters)( } } - const real *input_data = input->data(); - const real *gradOutput_data = gradOutput->data(); - real *gradWeight_data = gradWeight->data(); + const scalar_t *input_data = input->data(); + const scalar_t *gradOutput_data = gradOutput->data(); + scalar_t *gradWeight_data = gradWeight->data(); THIndex_t i, j, k; for (i = 0; i < bs; ++i) { - const real *n_input_data = input_data + i*nOutputPlane*ks; - const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; + const scalar_t *n_input_data = input_data + i*nOutputPlane*ks; + const scalar_t *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { - real sum = 0; + scalar_t sum = 0; for (k = 0; k < ks; ++k) if (n_input_data[k] <= 0) sum += n_gradOutput_data[k] * n_input_data[k]; diff --git a/aten/src/THNN/generic/RReLU.c b/aten/src/THNN/generic/RReLU.c index 8fd46d3c2996c2..ee1cd1e9dfb1b6 100644 --- a/aten/src/THNN/generic/RReLU.c +++ b/aten/src/THNN/generic/RReLU.c @@ -13,18 +13,18 @@ void THNN_(RReLU_updateOutput)( bool inplace, THGenerator *generator) { - real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_); - real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_); + scalar_t lower = TH_CONVERT_ACCREAL_TO_REAL(lower_); + scalar_t upper = TH_CONVERT_ACCREAL_TO_REAL(upper_); if (train) { // get default random generator THTensor_(resizeAs)(noise, input); if (inplace) { - TH_TENSOR_APPLY2(real, input, real, noise, + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, noise, if (*input_data <= 0) { - const real r = (real)THRandom_uniform(generator, lower, upper); + const scalar_t r = (scalar_t)THRandom_uniform(generator, lower, upper); *input_data = (*input_data) * r; *noise_data = r; } @@ -38,10 +38,10 @@ void THNN_(RReLU_updateOutput)( else { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, output, real, noise, + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, output, scalar_t, noise, if (*input_data <= 0) { - const real r = (real)THRandom_uniform(generator, lower, upper); + const scalar_t r = (scalar_t)THRandom_uniform(generator, lower, upper); *output_data = (*input_data) * r; *noise_data = r; } @@ -55,10 +55,10 @@ void THNN_(RReLU_updateOutput)( } else { - const real negSlope = (lower + upper) / 2; + const scalar_t negSlope = (lower + upper) / 2; if (inplace) { - TH_TENSOR_APPLY(real, input, + TH_TENSOR_APPLY(scalar_t, input, if (*input_data <= 0) { *input_data = *input_data * negSlope; @@ -69,8 +69,8 @@ void THNN_(RReLU_updateOutput)( else { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, input, real, output, - const real r = (*input_data) <= 0 ? negSlope : 1; + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, output, + const scalar_t r = (*input_data) <= 0 ? negSlope : 1; *output_data = *input_data * r; ); } @@ -88,8 +88,8 @@ void THNN_(RReLU_updateGradInput)( bool train, bool inplace) { - real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_); - real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_); + scalar_t lower = TH_CONVERT_ACCREAL_TO_REAL(lower_); + scalar_t upper = TH_CONVERT_ACCREAL_TO_REAL(upper_); THNN_CHECK_NELEMENT(input, gradOutput); if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU { @@ -108,10 +108,10 @@ void THNN_(RReLU_updateGradInput)( else { // use constant factor for negative input values - const real negSlope = (lower + upper) / 2; + const scalar_t negSlope = (lower + upper) / 2; if (inplace) { - TH_TENSOR_APPLY2(real, gradOutput, real, input, + TH_TENSOR_APPLY2(scalar_t, gradOutput, scalar_t, input, if (*input_data <= 0) { *gradOutput_data = (*gradOutput_data) * negSlope; @@ -122,7 +122,7 @@ void THNN_(RReLU_updateGradInput)( else { THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, *gradInput_data = (*input_data) <= 0 ? (*gradOutput_data) * negSlope : (*gradOutput_data); ); } diff --git a/aten/src/THNN/generic/Sigmoid.c b/aten/src/THNN/generic/Sigmoid.c index 2b218ddf455764..704df9b9992cda 100644 --- a/aten/src/THNN/generic/Sigmoid.c +++ b/aten/src/THNN/generic/Sigmoid.c @@ -18,8 +18,8 @@ void THNN_(Sigmoid_updateGradInput)( { THNN_CHECK_NELEMENT(output, gradOutput); THTensor_(resizeAs)(gradInput, output); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - real z = *output_data; + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, output, + scalar_t z = *output_data; *gradInput_data = *gradOutput_data * (1. - z) * z; ); } diff --git a/aten/src/THNN/generic/SmoothL1Criterion.c b/aten/src/THNN/generic/SmoothL1Criterion.c index e8b2398483c209..4006b127f5271a 100644 --- a/aten/src/THNN/generic/SmoothL1Criterion.c +++ b/aten/src/THNN/generic/SmoothL1Criterion.c @@ -13,8 +13,8 @@ void THNN_(SmoothL1Criterion_updateOutput)( if (reduction == Reduction::None) { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, target, real, output, - real z = fabs(*input_data - *target_data); + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, target, scalar_t, output, + scalar_t z = fabs(*input_data - *target_data); *output_data = z < 1 ? 0.5 * z * z : z - 0.5; ); return; @@ -22,9 +22,9 @@ void THNN_(SmoothL1Criterion_updateOutput)( THTensor_(resize1d)(output, 1); - real sum = 0; - TH_TENSOR_APPLY2(real, input, real, target, - real z = fabs(*input_data - *target_data); + scalar_t sum = 0; + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, target, + scalar_t z = fabs(*input_data - *target_data); sum += z < 1 ? 0.5*z*z : z - 0.5; ); @@ -47,8 +47,8 @@ void THNN_(SmoothL1Criterion_updateGradInput)( if (reduction == Reduction::None) { THNN_CHECK_SHAPE(gradOutput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real x = *input_data - *target_data; + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, + scalar_t x = *input_data - *target_data; if (x < -1.) { *gradInput_data = -1.; } else if (x > 1.) { @@ -57,17 +57,17 @@ void THNN_(SmoothL1Criterion_updateGradInput)( *gradInput_data = x; } ); - TH_TENSOR_APPLY2(real, gradInput, real, gradOutput, + TH_TENSOR_APPLY2(scalar_t, gradInput, scalar_t, gradOutput, *gradInput_data *= *gradOutput_data; ); return; } THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); - real norm = (reduction == Reduction::ElementwiseMean ? 1./((real)THTensor_(nElement)(input)) : 1.) * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0); + scalar_t norm = (reduction == Reduction::ElementwiseMean ? 1./((scalar_t)THTensor_(nElement)(input)) : 1.) * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real x = *input_data - *target_data; + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, + scalar_t x = *input_data - *target_data; if (x < -1.) *gradInput_data = - norm; else if (x > 1.) diff --git a/aten/src/THNN/generic/SoftMarginCriterion.c b/aten/src/THNN/generic/SoftMarginCriterion.c index 08c879e169e11b..9e65d983cb81ba 100644 --- a/aten/src/THNN/generic/SoftMarginCriterion.c +++ b/aten/src/THNN/generic/SoftMarginCriterion.c @@ -14,18 +14,18 @@ void THNN_(SoftMarginCriterion_updateOutput)( if (reduction == Reduction::None) { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, target, real, output, + TH_TENSOR_APPLY3(scalar_t, input, scalar_t, target, scalar_t, output, *output_data = log(1. + exp(-*input_data * *target_data));) return; } THTensor_(resize1d)(output, 1); - real sum; + scalar_t sum; sum = 0; - TH_TENSOR_APPLY2(real, input, real, target, - real z = log(1. + exp(-*input_data* *target_data)); + TH_TENSOR_APPLY2(scalar_t, input, scalar_t, target, + scalar_t z = log(1. + exp(-*input_data* *target_data)); sum += z;) if (reduction == Reduction::ElementwiseMean) @@ -48,17 +48,17 @@ void THNN_(SoftMarginCriterion_updateGradInput)( if (!reduction) { THNN_CHECK_SHAPE(gradOutput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real z = exp(-*target_data * *input_data); + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, + scalar_t z = exp(-*target_data * *input_data); *gradInput_data = -*target_data * z/(1. + z);) THTensor_(cmul)(gradInput, gradInput, gradOutput); return; } - real norm = (reduction == Reduction::ElementwiseMean ? 1./((real)THTensor_(nElement)(input)) : 1.); + scalar_t norm = (reduction == Reduction::ElementwiseMean ? 1./((scalar_t)THTensor_(nElement)(input)) : 1.); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real z = exp(-*target_data * *input_data); + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, input, scalar_t, target, + scalar_t z = exp(-*target_data * *input_data); *gradInput_data = -norm*(*target_data)*z/(1. + z) * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0);) } diff --git a/aten/src/THNN/generic/SoftPlus.c b/aten/src/THNN/generic/SoftPlus.c index 6491e66d63f18c..cd6c5db02dd6cd 100644 --- a/aten/src/THNN/generic/SoftPlus.c +++ b/aten/src/THNN/generic/SoftPlus.c @@ -9,12 +9,12 @@ void THNN_(SoftPlus_updateOutput)( accreal beta_, accreal threshold_) { - real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_); - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); + scalar_t beta = TH_CONVERT_ACCREAL_TO_REAL(beta_); + scalar_t threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); THTensor_(resizeAs)(output, input); // f(x) = 1/beta * log(1 + exp(beta * x)) - TH_TENSOR_APPLY2(real, output, real, input, \ + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, \ *output_data = (*input_data * beta) > threshold ? *input_data : THLog1p(exp(*input_data * beta)) / beta; ); } @@ -28,8 +28,8 @@ void THNN_(SoftPlus_updateGradInput)( accreal beta_, accreal threshold_) { - real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_); - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); + scalar_t beta = TH_CONVERT_ACCREAL_TO_REAL(beta_); + scalar_t threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, output); @@ -38,8 +38,8 @@ void THNN_(SoftPlus_updateGradInput)( // y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1) // THEREFORE: // d/dx(f(x)) = (exp(k*y) - 1) / exp(k*y) - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - real z = exp(*output_data * beta); + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, output, + scalar_t z = exp(*output_data * beta); *gradInput_data = (*output_data * beta) > threshold ? *gradOutput_data : *gradOutput_data * (z - 1.)/z; ); } diff --git a/aten/src/THNN/generic/SoftShrink.c b/aten/src/THNN/generic/SoftShrink.c index e7795086892c4e..86051db1392aea 100644 --- a/aten/src/THNN/generic/SoftShrink.c +++ b/aten/src/THNN/generic/SoftShrink.c @@ -8,10 +8,10 @@ void THNN_(SoftShrink_updateOutput)( THTensor *output, accreal lambda_) { - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); + scalar_t lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, output, real, input, + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, if ((*input_data) > lambda) *output_data = *input_data - lambda; else if ((*input_data) < -lambda) @@ -28,10 +28,10 @@ void THNN_(SoftShrink_updateGradInput)( THTensor *gradInput, accreal lambda_) { - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); + scalar_t lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, if ((*input_data) > lambda || (*input_data) < -lambda) *gradInput_data = (*gradOutput_data); else diff --git a/aten/src/THNN/generic/SparseLinear.c b/aten/src/THNN/generic/SparseLinear.c index e9ea4a9d330a35..8d1077e49ea451 100644 --- a/aten/src/THNN/generic/SparseLinear.c +++ b/aten/src/THNN/generic/SparseLinear.c @@ -6,8 +6,8 @@ #include #endif -#define ROW_PTR2(t, r) (t->data() + (r) * (t)->stride(0)) -#define COL_PTR2(t, c) (t->data() + (c) * (t)->stride(1)) +#define ROW_PTR2(t, r) (t->data() + (r) * (t)->stride(0)) +#define COL_PTR2(t, c) (t->data() + (c) * (t)->stride(1)) static bool THNN_(checkLegacyInput)(THTensor* t) { @@ -29,14 +29,14 @@ static bool THNN_(checkSize1D)(THTensor* t, int64_t size0) return !t->is_empty() && THTensor_nDimensionLegacyNoScalars(t) == 1 && THTensor_sizeLegacyNoScalars(t, 0) == size0; } -static void THNN_(set1d)(THTensor *t, int64_t x0, real value) { +static void THNN_(set1d)(THTensor *t, int64_t x0, scalar_t value) { THStorage_(set)(THTensor_getStoragePtr(t), t->storage_offset() + x0*t->stride(0), value); } -static real THNN_(get3d)(const THTensor *t, int64_t x0, int64_t x1, int64_t x2) { +static scalar_t THNN_(get3d)(const THTensor *t, int64_t x0, int64_t x1, int64_t x2) { return THStorage_(get)(THTensor_getStoragePtr(t), t->storage_offset() + x0*t->stride(0) + x1*t->stride(1) + x2*t->stride(2)); } -static real THNN_(get2d)(const THTensor *t, int64_t x0, int64_t x1) { +static scalar_t THNN_(get2d)(const THTensor *t, int64_t x0, int64_t x1) { return THStorage_(get)(THTensor_getStoragePtr(t), t->storage_offset() + x0*t->stride(0) + x1*t->stride(1)); } @@ -83,7 +83,7 @@ void THNN_(SparseLinear_updateOutput)( int64_t i_start = THLongTensor_get1d(csr, h); int64_t i_end = THLongTensor_get1d(csr, h+1); for (i = i_start; i < i_end; i++) { - real val = THNN_(get2d)(input, i, 2); + scalar_t val = THNN_(get2d)(input, i, 2); if (val == 0) { continue; } @@ -138,7 +138,7 @@ void THNN_(SparseLinear_legacyUpdateOutput)( batchSize > 1 && batchSize * nnz * outDim > 10000) for (h = 0; h < batchSize; h++) { for (i = 0; i < nnz; i++) { - real val = THNN_(get3d)(input, h, i, 1); + scalar_t val = THNN_(get3d)(input, h, i, 1); if (val == 0) { continue; } @@ -176,8 +176,8 @@ void THNN_(SparseLinear_accGradParameters)( accreal weightDecay_, accreal scale_) { - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int64_t h, i, col, hp0, hp1; int64_t outDim = THTensor_(size)(weight, 0); int64_t inDim = THTensor_(size)(weight, 1); @@ -214,7 +214,7 @@ void THNN_(SparseLinear_accGradParameters)( int64_t i_start = THLongTensor_get1d(csc, col); int64_t i_end = THLongTensor_get1d(csc, col+1); for (i = i_start; i < i_end; i++) { - real val = scale * THNN_(get2d)(input, i, 2); + scalar_t val = scale * THNN_(get2d)(input, i, 2); h = (int64_t)(THNN_(get2d)(input, i, 0)) - 1; int64_t offset = (int64_t)(THNN_(get2d)(input, i, 1)) - 1; @@ -256,8 +256,8 @@ void THNN_(SparseLinear_legacyAccGradParameters)( accreal weightDecay_, accreal scale_) { - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int64_t h, i; int64_t outDim = THTensor_(size)(weight, 0); int64_t inDim = THTensor_(size)(weight, 1); @@ -280,7 +280,7 @@ void THNN_(SparseLinear_legacyAccGradParameters)( batchSize * nnz * outDim > 10000) for (i = 0; i < nnz; i++) { for (h = 0; h < batchSize; h++) { - real val = scale * THNN_(get3d)(input, h, i, 1); + scalar_t val = scale * THNN_(get3d)(input, h, i, 1); if (val == 0) { continue; } @@ -322,7 +322,7 @@ void THNN_(SparseLinear_updateParameters)( THTensor *lastInput, accreal learningRate_) { - real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); + scalar_t learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); int64_t i; int64_t outDim = weight->size(0); int64_t inDim = weight->size(1); @@ -341,7 +341,7 @@ void THNN_(SparseLinear_updateParameters)( THTensor* offsets = THTensor_(newWithSize1d)(nnz); int64_t cnt = 0; for (i = 0; i < nnz; i++) { - real val = THNN_(get2d)(lastInput, i, 2); + scalar_t val = THNN_(get2d)(lastInput, i, 2); if (val == 0) { continue; } @@ -365,7 +365,7 @@ void THNN_(SparseLinear_updateParameters)( c10::raw::intrusive_ptr::decref(offsets); cnt = 1; - real* uniqueOffsets_p = uniqueOffsets->data(); + scalar_t* uniqueOffsets_p = uniqueOffsets->data(); for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) { if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) { uniqueOffsets_p[cnt++] = uniqueOffsets_p[i]; @@ -396,7 +396,7 @@ void THNN_(SparseLinear_legacyUpdateParameters)( THTensor *lastInput, accreal learningRate_) { - real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); + scalar_t learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); int64_t h, i; int64_t outDim = weight->size(0); int64_t inDim = weight->size(1); @@ -417,7 +417,7 @@ void THNN_(SparseLinear_legacyUpdateParameters)( int64_t cnt = 0; for (h = 0; h < batchSize; h++) { for (i = 0; i < nnz; i++) { - real val = THNN_(get3d)(lastInput, h, i, 1); + scalar_t val = THNN_(get3d)(lastInput, h, i, 1); if (val == 0 ) { continue; } @@ -441,7 +441,7 @@ void THNN_(SparseLinear_legacyUpdateParameters)( c10::raw::intrusive_ptr::decref(offsets); cnt = 1; - real* uniqueOffsets_p = uniqueOffsets->data(); + scalar_t* uniqueOffsets_p = uniqueOffsets->data(); for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) { if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) { uniqueOffsets_p[cnt++] = uniqueOffsets_p[i]; @@ -491,7 +491,7 @@ void THNN_(SparseLinear_zeroGradParameters)( int64_t offset = (int64_t)(THNN_(get2d)(lastInput, i, 1)) - 1; if (offset >= 0 && offset < inDim) { - real* pGradWeight = COL_PTR2(gradWeight, offset); + scalar_t* pGradWeight = COL_PTR2(gradWeight, offset); if (gradWeight->stride(0) == 1) { THVector_(fill)(pGradWeight, 0, outDim); } else { @@ -539,7 +539,7 @@ void THNN_(SparseLinear_legacyZeroGradParameters)( int64_t offset = (int64_t)(THNN_(get3d)(lastInput, h, i, 0)) - 1; if (offset >= 0 && offset < inDim) { - real* pGradWeight = COL_PTR2(gradWeight, offset); + scalar_t* pGradWeight = COL_PTR2(gradWeight, offset); if (gradWeight->stride(0) == 1) { THVector_(fill)(pGradWeight, 0, outDim); } else { diff --git a/aten/src/THNN/generic/SpatialAdaptiveAveragePooling.c b/aten/src/THNN/generic/SpatialAdaptiveAveragePooling.c index 0e22f94e442f7d..0ac3b72957013d 100644 --- a/aten/src/THNN/generic/SpatialAdaptiveAveragePooling.c +++ b/aten/src/THNN/generic/SpatialAdaptiveAveragePooling.c @@ -10,8 +10,8 @@ // 4d tensor B x D x H x W static void THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, @@ -41,17 +41,17 @@ static void THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)( int kW = iendW - istartW; /* local pointers */ - real *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; - real *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; + scalar_t *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; + scalar_t *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; /* compute local average: */ - real sum = 0; + scalar_t sum = 0; int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { - real val = *(ip + ih*istrideH + iw*istrideW); + scalar_t val = *(ip + ih*istrideH + iw*istrideW); sum += val; } } @@ -83,8 +83,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( int64_t istrideH = 0; int64_t istrideW = 0; - real *input_data = nullptr; - real *output_data = nullptr; + scalar_t *input_data = nullptr; + scalar_t *output_data = nullptr; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, @@ -113,8 +113,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( { THTensor_(resize3d)(output, sizeD, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data, sizeD, @@ -129,8 +129,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( THTensor_(resize4d)(output, sizeB, sizeD, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) @@ -146,8 +146,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( } static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, @@ -158,8 +158,8 @@ static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)( #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { - real *gradInput_p_d = gradInput_p + d*isizeW*isizeH; - real *gradOutput_p_d = gradOutput_p + d*osizeW*osizeH; + scalar_t *gradInput_p_d = gradInput_p + d*isizeW*isizeH; + scalar_t *gradOutput_p_d = gradOutput_p + d*osizeW*osizeH; /* calculate average */ int64_t oh, ow; @@ -176,7 +176,7 @@ static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)( int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; - real grad_delta = gradOutput_p_d[oh*osizeW +ow] / kH / kW; + scalar_t grad_delta = gradOutput_p_d[oh*osizeW +ow] / kH / kW; int ih, iw; for(ih = istartH; ih < iendH; ih++) @@ -207,8 +207,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( int isizeW; int osizeH; int osizeW; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); @@ -232,8 +232,8 @@ void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( osizeW = gradOutput->size(dimW); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); /* backprop */ if (input->dim() == 3) diff --git a/aten/src/THNN/generic/SpatialAdaptiveMaxPooling.c b/aten/src/THNN/generic/SpatialAdaptiveMaxPooling.c index 4f096f6c24911b..fd8a648efaa6af 100644 --- a/aten/src/THNN/generic/SpatialAdaptiveMaxPooling.c +++ b/aten/src/THNN/generic/SpatialAdaptiveMaxPooling.c @@ -10,8 +10,8 @@ // 4d tensor B x D x H x W static void THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeH, @@ -41,19 +41,19 @@ static void THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)( int kW = iendW - istartW; /* local pointers */ - real *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; - real *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; + scalar_t *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; + scalar_t *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; THIndex_t *indp = ind_p + d*osizeH*osizeW + oh*osizeW + ow; /* compute local max: */ int64_t maxindex = -1; - real maxval = -FLT_MAX; + scalar_t maxval = -FLT_MAX; int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { - real val = *(ip + ih*istrideH + iw*istrideW); + scalar_t val = *(ip + ih*istrideH + iw*istrideW); if ((val > maxval) || std::isnan(val)) { maxval = val; @@ -92,8 +92,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( int64_t istrideW = 0; int64_t istrideB = 0; - real *input_data = nullptr; - real *output_data = nullptr; + scalar_t *input_data = nullptr; + scalar_t *output_data = nullptr; THIndex_t *indices_data = nullptr; @@ -124,8 +124,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( /* indices will contain i,j locations for each output point */ THIndexTensor_(resize3d)(indices, sizeD, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data, @@ -144,8 +144,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( /* indices will contain i,j locations for each output point */ THIndexTensor_(resize4d)(indices, sizeB, sizeD, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(b) @@ -163,8 +163,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( } static void THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeH, @@ -176,8 +176,8 @@ static void THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)( #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { - real *gradInput_p_d = gradInput_p + d*isizeH*isizeW; - real *gradOutput_p_d = gradOutput_p + d*osizeH*osizeW; + scalar_t *gradInput_p_d = gradInput_p + d*isizeH*isizeW; + scalar_t *gradOutput_p_d = gradOutput_p + d*osizeH*osizeW; THIndex_t *ind_p_d = ind_p + d*osizeH*osizeW; /* calculate max points */ @@ -211,8 +211,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( int isizeW; int osizeH; int osizeW; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THIndex_t *indices_data; /* get contiguous gradOutput */ @@ -236,8 +236,8 @@ void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( osizeW = gradOutput->size(dimW); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); indices_data = THIndexTensor_(data)(indices); /* backprop */ diff --git a/aten/src/THNN/generic/SpatialAveragePooling.c b/aten/src/THNN/generic/SpatialAveragePooling.c index 310586d5918f1c..623335170c48e9 100644 --- a/aten/src/THNN/generic/SpatialAveragePooling.c +++ b/aten/src/THNN/generic/SpatialAveragePooling.c @@ -83,8 +83,8 @@ void THNN_(SpatialAveragePooling_updateOutput)( bool ceil_mode, bool count_include_pad) { - real *output_data; - real *input_data; + scalar_t *output_data; + scalar_t *input_data; int dimw = 2; int dimh = 1; @@ -140,8 +140,8 @@ void THNN_(SpatialAveragePooling_updateOutput)( input = THTensor_(newContiguous)(input); THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous"); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) @@ -151,8 +151,8 @@ void THNN_(SpatialAveragePooling_updateOutput)( { int64_t xx, yy; /* For all output pixels... */ - real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; - real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; + scalar_t *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; + scalar_t *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; int64_t i; for(i = 0; i < outputWidth*outputHeight; i++) ptr_output[i] = 0; @@ -172,7 +172,7 @@ void THNN_(SpatialAveragePooling_updateOutput)( hend = fminf(hend, inputHeight); wend = fminf(wend, inputWidth); - real sum = 0; + scalar_t sum = 0; int divide_factor; if(count_include_pad) @@ -222,8 +222,8 @@ void THNN_(SpatialAveragePooling_updateGradInput)( int64_t outputHeight; int64_t nInputPlane; // number of channels (or colors) - real *gradOutput_data; - real *gradInput_data; + scalar_t *gradOutput_data; + scalar_t *gradInput_data; int64_t k; @@ -271,8 +271,8 @@ void THNN_(SpatialAveragePooling_updateGradInput)( gradOutput = THTensor_(newContiguous)(gradOutput); THArgCheck(THTensor_(isContiguous)(gradInput), 4, "gradInput must be contiguous"); - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) @@ -280,11 +280,11 @@ void THNN_(SpatialAveragePooling_updateGradInput)( int64_t p; for(p = 0; p < nbatch; p++) { - real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; + scalar_t *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; int64_t xx, yy; - real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; - real *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; + scalar_t* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; + scalar_t *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; int64_t i; for(i=0; idata(); + scalar_t *input_data = input->data(); THIndex_t *target_data = THIndexTensor_(data)(target); - real *weights_data = weights ? weights->data() : NULL; - real *output_data = output->data(); - real *total_weight_data = total_weight->data(); + scalar_t *weights_data = weights ? weights->data() : NULL; + scalar_t *output_data = output->data(); + scalar_t *total_weight_data = total_weight->data(); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; - real total_weight_acc = 0; - real output_acc = 0; + scalar_t total_weight_acc = 0; + scalar_t output_acc = 0; for (int b = 0; b < batch_size; b++) { for (int elem = 0; elem < map_size; elem++) { int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; if (cur_target == ignore_index) continue; THAssert(cur_target >= 0 && cur_target < n_classes); - real cur_weight = weights ? weights_data[cur_target] : 1.0f; + scalar_t cur_weight = weights ? weights_data[cur_target] : 1.0f; total_weight_acc += cur_weight; output_acc -= input_data[b * sample_size + cur_target * map_size + elem] * cur_weight; } @@ -161,8 +161,8 @@ void THNN_(SpatialClassNLLCriterion_updateGradInput)( if (cur_target == ignore_index) { continue; } - real value = -(weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f); - real gradOutput_value = THTensor_(fastGet3d)(gradOutput, b, h, w); + scalar_t value = -(weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f); + scalar_t gradOutput_value = THTensor_(fastGet3d)(gradOutput, b, h, w); THTensor_(fastSet4d)(gradInput, b, cur_target, h, w, value * gradOutput_value); } } @@ -172,7 +172,7 @@ void THNN_(SpatialClassNLLCriterion_updateGradInput)( THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); - real *total_weight_data = total_weight->data(); + scalar_t *total_weight_data = total_weight->data(); if (*total_weight_data <= 0) return; @@ -180,15 +180,15 @@ void THNN_(SpatialClassNLLCriterion_updateGradInput)( weights = weights ? THTensor_(newContiguous)(weights) : NULL; THIndex_t *target_data = THIndexTensor_(data)(target); - real *weights_data = weights ? weights->data() : NULL; - real *gradInput_data = gradInput->data(); + scalar_t *weights_data = weights ? weights->data() : NULL; + scalar_t *gradInput_data = gradInput->data(); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; - real normalize = (reduction == Reduction::ElementwiseMean) ? *total_weight_data : 1.0f; + scalar_t normalize = (reduction == Reduction::ElementwiseMean) ? *total_weight_data : 1.0f; int b; #pragma omp parallel for diff --git a/aten/src/THNN/generic/SpatialConvolutionLocal.c b/aten/src/THNN/generic/SpatialConvolutionLocal.c index 44c55ce0e2ceb4..1fcb645db955c5 100644 --- a/aten/src/THNN/generic/SpatialConvolutionLocal.c +++ b/aten/src/THNN/generic/SpatialConvolutionLocal.c @@ -273,7 +273,7 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)( static void THNN_(SpatialConvolutionLocal_accGradParameters_frame) (THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, - THTensor *finput, real scale, + THTensor *finput, scalar_t scale, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) @@ -316,7 +316,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)( { THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); gradWeight = THNN_(view_weight_local)(gradWeight); THNN_(SpatialConvolutionLocal_shapeCheck) diff --git a/aten/src/THNN/generic/SpatialConvolutionMM.c b/aten/src/THNN/generic/SpatialConvolutionMM.c index 6597a6ae3a4c5e..9f8b8e7c90c925 100644 --- a/aten/src/THNN/generic/SpatialConvolutionMM.c +++ b/aten/src/THNN/generic/SpatialConvolutionMM.c @@ -316,7 +316,7 @@ static void THNN_(SpatialConvolutionMM_accGradParameters_frame)( THTensor *gradWeight, THTensor *gradBias, THTensor *finput, - real scale) + scalar_t scale) { int64_t i; THTensor *gradOutput2d = THTensor_(newWithStorage2d) @@ -335,8 +335,8 @@ static void THNN_(SpatialConvolutionMM_accGradParameters_frame)( for(i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; - real sum = 0; - real *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); + scalar_t sum = 0; + scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); for(k = 0; k < gradOutput2d->size(1); k++) sum += data[k]; (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale*sum; @@ -362,7 +362,7 @@ void THNN_(SpatialConvolutionMM_accGradParameters)( int padH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); if (gradWeight) { THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); gradWeight = THNN_(newViewWeightMM2d)(gradWeight); diff --git a/aten/src/THNN/generic/SpatialConvolutionMap.c b/aten/src/THNN/generic/SpatialConvolutionMap.c index 52401a3d1d61ae..107d63090a2620 100644 --- a/aten/src/THNN/generic/SpatialConvolutionMap.c +++ b/aten/src/THNN/generic/SpatialConvolutionMap.c @@ -52,11 +52,11 @@ void THNN_(SpatialConvolutionMap_updateOutput)( connTable = THTensor_(newContiguous)(connTable); /* get raw pointers */ - real *input_data = input->data(); - real *output_data = output->data(); - real *weight_data = weight->data(); - real *bias_data = bias->data(); - real *connTable_data = connTable->data(); + scalar_t *input_data = input->data(); + scalar_t *output_data = output->data(); + scalar_t *weight_data = weight->data(); + scalar_t *bias_data = bias->data(); + scalar_t *connTable_data = connTable->data(); int64_t p; #pragma omp parallel for private(p) @@ -66,9 +66,9 @@ void THNN_(SpatialConvolutionMap_updateOutput)( for (m = 0; m < nbatch; m++) { /* add bias */ - real *ptr_output = output_data + p*output_w*output_h + m*nOutputPlane*output_w*output_h; + scalar_t *ptr_output = output_data + p*output_w*output_h + m*nOutputPlane*output_w*output_h; int64_t j, k; - real z= bias_data[p]; + scalar_t z= bias_data[p]; for (j = 0; j < output_h*output_w; j++) ptr_output[j] = z; @@ -143,10 +143,10 @@ void THNN_(SpatialConvolutionMap_updateGradInput)( THTensor_(zero)(gradInput); /* get raw pointers */ - real *gradInput_data = gradInput->data(); - real *gradOutput_data = gradOutput->data(); - real *weight_data = weight->data(); - real *connTable_data = connTable->data(); + scalar_t *gradInput_data = gradInput->data(); + scalar_t *gradOutput_data = gradOutput->data(); + scalar_t *weight_data = weight->data(); + scalar_t *connTable_data = connTable->data(); int64_t p; #pragma omp parallel for private(p) @@ -194,7 +194,7 @@ void THNN_(SpatialConvolutionMap_accGradParameters)( int dW, int dH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THArgCheck( gradWeight != NULL && !gradWeight->is_empty() && gradWeight->dim() == 3 && connTable != NULL && connTable->size(0) == gradWeight->size(0), 5, @@ -226,10 +226,10 @@ void THNN_(SpatialConvolutionMap_accGradParameters)( THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); /* get raw pointers */ - real *input_data = input->data(); - real *gradOutput_data = gradOutput->data(); - real *gradWeight_data = gradWeight->data(); - real *gradBias_data = gradBias->data(); + scalar_t *input_data = input->data(); + scalar_t *gradOutput_data = gradOutput->data(); + scalar_t *gradWeight_data = gradWeight->data(); + scalar_t *gradBias_data = gradBias->data(); int64_t k; @@ -240,7 +240,7 @@ void THNN_(SpatialConvolutionMap_accGradParameters)( int64_t m; for (m = 0; m < nbatch; m++) { - real *ptr_gradOutput = gradOutput_data + k*output_w*output_h + m*nOutputPlane*output_w*output_h; + scalar_t *ptr_gradOutput = gradOutput_data + k*output_w*output_h + m*nOutputPlane*output_w*output_h; int64_t l; for (l = 0; l < output_h*output_w; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; diff --git a/aten/src/THNN/generic/SpatialDilatedConvolution.c b/aten/src/THNN/generic/SpatialDilatedConvolution.c index d445017894c469..6a9a81e21f3d4c 100644 --- a/aten/src/THNN/generic/SpatialDilatedConvolution.c +++ b/aten/src/THNN/generic/SpatialDilatedConvolution.c @@ -154,10 +154,10 @@ void THNN_(SpatialDilatedConvolution_updateOutput)( 't', 'n', n_, m_, k_, 1, - ones->data(), k_, - bias->data(), k_, + ones->data(), k_, + bias->data(), k_, 0, - output_n->data(), n_ + output_n->data(), n_ ); } else { THTensor_(zero)(output_n); @@ -165,12 +165,12 @@ void THNN_(SpatialDilatedConvolution_updateOutput)( // Extract columns: THNN_(im2col)( - input_n->data(), + input_n->data(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, - columns->data() + columns->data() ); // M,N,K are dims of matrix A and B @@ -183,10 +183,10 @@ void THNN_(SpatialDilatedConvolution_updateOutput)( 'n', 'n', n, m, k, 1, - columns->data(), n, - weight->data(), k, + columns->data(), n, + weight->data(), k, 1, - output_n->data(), n + output_n->data(), n ); } @@ -273,19 +273,19 @@ void THNN_(SpatialDilatedConvolution_updateGradInput)( 'n', 't', n, m, k, 1, - gradOutput_n->data(), n, - weight->data(), m, + gradOutput_n->data(), n, + weight->data(), m, 0, - gradColumns->data(), n + gradColumns->data(), n ); // Unpack columns back into input: THNN_(col2im)( - gradColumns->data(), + gradColumns->data(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, - gradInput_n->data() + gradInput_n->data() ); } @@ -320,7 +320,7 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( int dilationW, int dilationH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(SpatialDilatedConvolution_shapeCheck) (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, dilationH, dilationW, 1); @@ -374,12 +374,12 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( // Extract columns: THNN_(im2col)( - input_n->data(), + input_n->data(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, - columns->data() + columns->data() ); // M,N,K are dims of matrix A and B @@ -392,10 +392,10 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( 't', 'n', n, m, k, scale, - columns->data(), k, - gradOutput_n->data(), k, + columns->data(), k, + gradOutput_n->data(), k, 1, - gradWeight->data(), n + gradWeight->data(), n ); } @@ -416,10 +416,10 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)( 't', k_, m_, scale, - gradOutput_n->data(), k_, - ones->data(), 1, + gradOutput_n->data(), k_, + ones->data(), 1, 1, - gradBias->data(), 1 + gradBias->data(), 1 ); } } diff --git a/aten/src/THNN/generic/SpatialDilatedMaxPooling.c b/aten/src/THNN/generic/SpatialDilatedMaxPooling.c index 7b583888f468d5..98db87a6ce0c06 100644 --- a/aten/src/THNN/generic/SpatialDilatedMaxPooling.c +++ b/aten/src/THNN/generic/SpatialDilatedMaxPooling.c @@ -79,8 +79,8 @@ static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)( } static void THNN_(SpatialDilatedMaxPooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, THIndex_t *ind_p, int64_t nslices, int64_t iwidth, @@ -103,7 +103,7 @@ static void THNN_(SpatialDilatedMaxPooling_updateOutput_frame)( { /* loop over output */ int64_t i, j; - real *ip = input_p + k*iwidth*iheight; + scalar_t *ip = input_p + k*iwidth*iheight; for(i = 0; i < oheight; i++) { for(j = 0; j < owidth; j++) @@ -118,12 +118,12 @@ static void THNN_(SpatialDilatedMaxPooling_updateOutput_frame)( wstart += dilationW; /* local pointers */ - real *op = output_p + k*owidth*oheight + i*owidth + j; + scalar_t *op = output_p + k*owidth*oheight + i*owidth + j; THIndex_t *indp = ind_p + k*owidth*oheight + i*owidth + j; /* compute local max: */ int64_t maxindex = -1; - real maxval = -THInf; + scalar_t maxval = -THInf; int64_t tcntr = 0; int64_t x,y; for(y = hstart; y < hend; y += dilationH) @@ -131,7 +131,7 @@ static void THNN_(SpatialDilatedMaxPooling_updateOutput_frame)( for(x = wstart; x < wend; x += dilationW) { tcntr = y*iwidth + x; - real val = *(ip + tcntr); + scalar_t val = *(ip + tcntr); if ((val > maxval) || std::isnan(val)) { maxval = val; @@ -174,8 +174,8 @@ void THNN_(SpatialDilatedMaxPooling_updateOutput)( int64_t inputWidth; int64_t outputHeight; int64_t outputWidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THIndex_t *indices_data; THNN_(SpatialDilatedMaxPooling_shapeCheck) @@ -224,8 +224,8 @@ void THNN_(SpatialDilatedMaxPooling_updateOutput)( /* indices will contain the locations for each output point */ THIndexTensor_(resize3d)(indices, nInputPlane, outputHeight, outputWidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); THNN_(SpatialDilatedMaxPooling_updateOutput_frame) @@ -247,8 +247,8 @@ void THNN_(SpatialDilatedMaxPooling_updateOutput)( /* indices will contain the locations for each output point */ THIndexTensor_(resize4d)(indices, nbatch, nInputPlane, outputHeight, outputWidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(p) @@ -273,8 +273,8 @@ void THNN_(SpatialDilatedMaxPooling_updateOutput)( } static void THNN_(SpatialDilatedMaxPooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, THIndex_t *ind_p, int64_t nInputPlane, int64_t inputWidth, @@ -288,8 +288,8 @@ static void THNN_(SpatialDilatedMaxPooling_updateGradInput_frame)( #pragma omp parallel for private(k) for (k = 0; k < nInputPlane; k++) { - real *gradInput_p_k = gradInput_p + k*inputWidth*inputHeight; - real *gradOutput_p_k = gradOutput_p + k*outputWidth*outputHeight; + scalar_t *gradInput_p_k = gradInput_p + k*inputWidth*inputHeight; + scalar_t *gradOutput_p_k = gradOutput_p + k*outputWidth*outputHeight; THIndex_t *ind_p_k = ind_p + k*outputWidth*outputHeight; /* calculate max points */ @@ -333,8 +333,8 @@ void THNN_(SpatialDilatedMaxPooling_updateGradInput)( int inputWidth; int outputHeight; int outputWidth; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THIndex_t *indices_data; THNN_(SpatialDilatedMaxPooling_shapeCheck) @@ -362,8 +362,8 @@ void THNN_(SpatialDilatedMaxPooling_updateGradInput)( outputWidth = gradOutput->size(dimw); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); indices_data = THIndexTensor_(data)(indices); /* backprop */ diff --git a/aten/src/THNN/generic/SpatialFractionalMaxPooling.c b/aten/src/THNN/generic/SpatialFractionalMaxPooling.c index e7ba47bd065cc4..e2b94298f3364d 100644 --- a/aten/src/THNN/generic/SpatialFractionalMaxPooling.c +++ b/aten/src/THNN/generic/SpatialFractionalMaxPooling.c @@ -3,11 +3,11 @@ #else static int64_t* THNN_(SpatialFractionalMaxPooling_generateIntervals)( - real sample, + scalar_t sample, int64_t inputSize, int64_t outputSize, int poolSize) { - real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); + scalar_t alpha = (scalar_t) (inputSize - poolSize) / (scalar_t) (outputSize - 1); int64_t* sequence = (int64_t*) THAlloc(sizeof(int64_t) * outputSize); int64_t i; @@ -21,10 +21,10 @@ static int64_t* THNN_(SpatialFractionalMaxPooling_generateIntervals)( } static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( - real* input, - real* output, + scalar_t* input, + scalar_t* output, THIndex_t* indices, - real* randomSamples, + scalar_t* randomSamples, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH, @@ -33,7 +33,7 @@ static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; ++plane) { /* each plane contains 2 random samples, one for W and one for H */ - real* randomSamplesForPlane = randomSamples + plane * 2; + scalar_t* randomSamplesForPlane = randomSamples + plane * 2; /* Generate interval sequence */ int64_t* sequenceW = @@ -46,8 +46,8 @@ static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( /* loop over output */ int64_t h, w; - real* inputForPlane = input + plane * inputW * inputH; - real* outputForPlane = output + plane * outputW * outputH; + scalar_t* inputForPlane = input + plane * inputW * inputH; + scalar_t* outputForPlane = output + plane * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputW * outputH; for (h = 0; h < outputH; ++h) { @@ -56,7 +56,7 @@ static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( for (w = 0; w < outputW; ++w) { int64_t inputWStart = sequenceW[w]; - real maxVal = -THInf; + scalar_t maxVal = -THInf; int64_t maxIndex = -1; int64_t h2, w2; @@ -66,7 +66,7 @@ static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( THAssert(w2 >= 0 && w2 < inputW); int64_t planeIndex = h2 * inputW + w2; - real val = inputForPlane[planeIndex]; + scalar_t val = inputForPlane[planeIndex]; if (val > maxVal) { maxVal = val; maxIndex = planeIndex; @@ -135,10 +135,10 @@ void THNN_(SpatialFractionalMaxPooling_updateOutput)( THIndexTensor_(resize3d)(indices, numPlanes, outputH, outputW); THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( - input->data(), - output->data(), + input->data(), + output->data(), THIndexTensor_(data)(indices), - randomSamples->data(), + randomSamples->data(), numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } else { THTensor_(resize4d)(output, numBatch, numPlanes, outputH, outputW); @@ -149,10 +149,10 @@ void THNN_(SpatialFractionalMaxPooling_updateOutput)( #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( - input->data() + batch * numPlanes * inputH * inputW, - output->data() + batch * numPlanes * outputH * outputW, + input->data() + batch * numPlanes * inputH * inputW, + output->data() + batch * numPlanes * outputH * outputW, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, - randomSamples->data() + batch * numPlanes * 2, + randomSamples->data() + batch * numPlanes * 2, numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } } @@ -162,8 +162,8 @@ void THNN_(SpatialFractionalMaxPooling_updateOutput)( } static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( - real* gradInput, - real* gradOutput, + scalar_t* gradInput, + scalar_t* gradOutput, THIndex_t* indices, int64_t numPlanes, int64_t inputW, int64_t inputH, @@ -171,8 +171,8 @@ static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; plane++) { - real* gradInputForPlane = gradInput + plane * inputW * inputH; - real* gradOutputForPlane = gradOutput + plane * outputW * outputH; + scalar_t* gradInputForPlane = gradInput + plane * inputW * inputH; + scalar_t* gradOutputForPlane = gradOutput + plane * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputW * outputH; int64_t h, w; @@ -230,8 +230,8 @@ void THNN_(SpatialFractionalMaxPooling_updateGradInput)( /* backprop */ if (numInputDims == 3) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( - gradInput->data(), - gradOutput->data(), + gradInput->data(), + gradOutput->data(), THIndexTensor_(data)(indices), numPlanes, inputW, inputH, outputW, outputH); } else { @@ -239,8 +239,8 @@ void THNN_(SpatialFractionalMaxPooling_updateGradInput)( #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( - gradInput->data() + batch * numPlanes * inputH * inputW, - gradOutput->data() + batch * numPlanes * outputH * outputW, + gradInput->data() + batch * numPlanes * inputH * inputW, + gradOutput->data() + batch * numPlanes * outputH * outputW, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, numPlanes, inputW, inputH, outputW, outputH); } diff --git a/aten/src/THNN/generic/SpatialFullConvolutionMap.c b/aten/src/THNN/generic/SpatialFullConvolutionMap.c index d8fca897b4a3e3..60d93421b1e952 100644 --- a/aten/src/THNN/generic/SpatialFullConvolutionMap.c +++ b/aten/src/THNN/generic/SpatialFullConvolutionMap.c @@ -33,11 +33,11 @@ void THNN_(SpatialFullConvolutionMap_updateOutput)( THTensor* output = THTensor_(newContiguous)(output_); /* get raw pointers */ - real *input_data = input->data(); - real *output_data = output->data(); - real *weight_data = weight->data(); - real *bias_data = bias->data(); - real *connTable_data = connTable->data(); + scalar_t *input_data = input->data(); + scalar_t *output_data = output->data(); + scalar_t *weight_data = weight->data(); + scalar_t *bias_data = bias->data(); + scalar_t *connTable_data = connTable->data(); /* and dims */ const int64_t input_h = input->size(1); @@ -52,7 +52,7 @@ void THNN_(SpatialFullConvolutionMap_updateOutput)( for (p = 0; p < nOutputPlane; p++) { /* add bias */ - real *ptr_output = output_data + p*output_w*output_h; + scalar_t *ptr_output = output_data + p*output_w*output_h; int64_t j; int nweight; int64_t k; @@ -106,10 +106,10 @@ void THNN_(SpatialFullConvolutionMap_updateGradInput)( THTensor_(zero)(gradInput); /* get raw pointers */ - real *gradInput_data = gradInput->data(); - real *gradOutput_data = gradOutput->data(); - real *weight_data = weight->data(); - real *connTable_data = connTable->data(); + scalar_t *gradInput_data = gradInput->data(); + scalar_t *gradOutput_data = gradOutput->data(); + scalar_t *weight_data = weight->data(); + scalar_t *connTable_data = connTable->data(); /* and dims */ const int64_t input_h = input->size(1); @@ -161,7 +161,7 @@ void THNN_(SpatialFullConvolutionMap_accGradParameters)( int dW, int dH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THArgCheck( gradWeight != NULL && !gradWeight->is_empty() && gradWeight->dim() == 3 && connTable != NULL && connTable->size(0) == gradWeight->size(0), 5, @@ -173,10 +173,10 @@ void THNN_(SpatialFullConvolutionMap_accGradParameters)( gradOutput = THTensor_(newContiguous)(gradOutput); /* get raw pointers */ - real *input_data = input->data(); - real *gradOutput_data = gradOutput->data(); - real *gradWeight_data = gradWeight->data(); - real *gradBias_data = gradBias->data(); + scalar_t *input_data = input->data(); + scalar_t *gradOutput_data = gradOutput->data(); + scalar_t *gradWeight_data = gradWeight->data(); + scalar_t *gradBias_data = gradBias->data(); /* and dims */ const int64_t input_h = input->size(1); @@ -191,7 +191,7 @@ void THNN_(SpatialFullConvolutionMap_accGradParameters)( #pragma omp parallel for private(k) for (k = 0; k < nOutputPlane; k++) { - real *ptr_gradOutput = gradOutput_data + k*output_w*output_h; + scalar_t *ptr_gradOutput = gradOutput_data + k*output_w*output_h; int64_t l; for (l = 0; l < output_h*output_w; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; diff --git a/aten/src/THNN/generic/SpatialFullDilatedConvolution.c b/aten/src/THNN/generic/SpatialFullDilatedConvolution.c index 7c8c924d711932..482a2e40a4ff8e 100644 --- a/aten/src/THNN/generic/SpatialFullDilatedConvolution.c +++ b/aten/src/THNN/generic/SpatialFullDilatedConvolution.c @@ -154,18 +154,18 @@ void THNN_(SpatialFullDilatedConvolution_updateOutput)( 'n', 't', n, m, k, 1, - input_n->data(), n, - weight->data(), m, + input_n->data(), n, + weight->data(), m, 0, - columns->data(), n + columns->data(), n ); // Unpack columns back into input: THNN_(col2im)( - columns->data(), + columns->data(), nOutputPlane, outputHeight, outputWidth, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, - output_n->data() + output_n->data() ); // Do Bias after: @@ -181,10 +181,10 @@ void THNN_(SpatialFullDilatedConvolution_updateOutput)( 't', 'n', n_, m_, k_, 1, - ones->data(), k_, - bias->data(), k_, + ones->data(), k_, + bias->data(), k_, 1, - output_n->data(), n_ + output_n->data(), n_ ); } } @@ -265,12 +265,12 @@ void THNN_(SpatialFullDilatedConvolution_updateGradInput)( // Extract columns: THNN_(im2col)( - gradOutput_n->data(), + gradOutput_n->data(), nOutputPlane, outputHeight, outputWidth, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, - gradColumns->data() + gradColumns->data() ); // M,N,K are dims of matrix A and B @@ -284,10 +284,10 @@ void THNN_(SpatialFullDilatedConvolution_updateGradInput)( 'n', 'n', n, m, k, 1, - gradColumns->data(), n, - weight->data(), k, + gradColumns->data(), n, + weight->data(), k, 0, - gradInput_n->data(), n + gradInput_n->data(), n ); } @@ -323,7 +323,7 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( int adjW, int adjH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(SpatialFullDilatedConvolution_shapeCheck) (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, dilationH, dilationW, adjH, adjW, 1); @@ -391,12 +391,12 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( // Extract columns: THNN_(im2col)( - gradOutput_n->data(), + gradOutput_n->data(), nOutputPlane, outputHeight, outputWidth, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, - columns->data() + columns->data() ); // M,N,K are dims of matrix A and B @@ -410,10 +410,10 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( 't', 'n', n, m, k, scale, - columns->data(), k, - input_n->data(), k, + columns->data(), k, + input_n->data(), k, 1, - gradWeight->data(), n + gradWeight->data(), n ); } @@ -429,10 +429,10 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)( 't', k_, m_, scale, - gradOutput_n->data(), k_, - ones->data(), 1, + gradOutput_n->data(), k_, + ones->data(), 1, 1, - gradBias->data(), 1 + gradBias->data(), 1 ); } } diff --git a/aten/src/THNN/generic/SpatialMaxUnpooling.c b/aten/src/THNN/generic/SpatialMaxUnpooling.c index 666ea16dd02e00..60cf4903951e9c 100644 --- a/aten/src/THNN/generic/SpatialMaxUnpooling.c +++ b/aten/src/THNN/generic/SpatialMaxUnpooling.c @@ -2,7 +2,7 @@ #define TH_GENERIC_FILE "generic/SpatialMaxUnpooling.c" #else -static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(real *input_p, real *output_p, +static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(scalar_t *input_p, scalar_t *output_p, THIndex_t *ind_p, int nslices, int iwidth, int iheight, @@ -14,8 +14,8 @@ static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(real *input_p, real *o #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { - real *output_p_k = output_p + k*owidth*oheight; - real *input_p_k = input_p + k*iwidth*iheight; + scalar_t *output_p_k = output_p + k*owidth*oheight; + scalar_t *input_p_k = input_p + k*iwidth*iheight; THIndex_t *ind_p_k = ind_p + k*iwidth*iheight; int i, j; @@ -56,8 +56,8 @@ void THNN_(SpatialMaxUnpooling_updateOutput)( int nslices; int iheight; int iwidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THIndex_t *indices_data; @@ -87,8 +87,8 @@ void THNN_(SpatialMaxUnpooling_updateOutput)( THTensor_(resize3d)(output, nslices, oheight, owidth); THTensor_(zero)(output); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); THNN_(SpatialMaxUnpooling_updateOutput_frame)(input_data, output_data, @@ -104,8 +104,8 @@ void THNN_(SpatialMaxUnpooling_updateOutput)( THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); THTensor_(zero)(output); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); for (p = 0; p < nbatch; p++) @@ -125,7 +125,7 @@ void THNN_(SpatialMaxUnpooling_updateOutput)( THIndexTensor_(free)(indices); } -static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(real *gradInput_p, real *gradOutput_p, +static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(scalar_t *gradInput_p, scalar_t *gradOutput_p, THIndex_t *ind_p, int nslices, int iwidth, int iheight, @@ -135,8 +135,8 @@ static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(real *gradInput_p, #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { - real *gradInput_p_k = gradInput_p + k*iwidth*iheight; - real *gradOutput_p_k = gradOutput_p + k*owidth*oheight; + scalar_t *gradInput_p_k = gradInput_p + k*iwidth*iheight; + scalar_t *gradOutput_p_k = gradOutput_p + k*owidth*oheight; THIndex_t *ind_p_k = ind_p + k*iwidth*iheight; int i, j; @@ -169,8 +169,8 @@ void THNN_(SpatialMaxUnpooling_updateGradInput)( int nslices; int iheight; int iwidth; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THIndex_t *indices_data; THNN_CHECK_SHAPE_INDICES(input, indices); @@ -200,8 +200,8 @@ void THNN_(SpatialMaxUnpooling_updateGradInput)( } /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); indices_data = THIndexTensor_(data)(indices); /* backprop */ diff --git a/aten/src/THNN/generic/SpatialReflectionPadding.c b/aten/src/THNN/generic/SpatialReflectionPadding.c index 6add190b581e9d..b48820ad35e887 100644 --- a/aten/src/THNN/generic/SpatialReflectionPadding.c +++ b/aten/src/THNN/generic/SpatialReflectionPadding.c @@ -3,7 +3,7 @@ #else static void THNN_(SpatialReflectionPadding_updateOutput_frame)( - real *input_p, real *output_p, + scalar_t *input_p, scalar_t *output_p, int64_t nslices, int64_t iwidth, int64_t iheight, int64_t owidth, int64_t oheight, @@ -41,8 +41,8 @@ static void THNN_(SpatialReflectionPadding_updateOutput_frame)( } ip_y = ip_y - oStartY + iStartY; - real *dest_p = output_p + k*owidth*oheight + i * owidth + j; - real *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x; + scalar_t *dest_p = output_p + k*owidth*oheight + i * owidth + j; + scalar_t *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x; *dest_p = *src_p; } } @@ -64,8 +64,8 @@ void THNN_(SpatialReflectionPadding_updateOutput)(THNNState *state, int64_t iwidth; int64_t oheight; int64_t owidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); @@ -108,8 +108,8 @@ void THNN_(SpatialReflectionPadding_updateOutput)(THNNState *state, { THTensor_(resize3d)(output, nslices, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(SpatialReflectionPadding_updateOutput_frame)(input_data, output_data, nslices, @@ -124,8 +124,8 @@ void THNN_(SpatialReflectionPadding_updateOutput)(THNNState *state, THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) @@ -146,7 +146,7 @@ void THNN_(SpatialReflectionPadding_updateOutput)(THNNState *state, } static void THNN_(SpatialReflectionPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, + scalar_t *ginput_p, scalar_t *goutput_p, int64_t nslices, int64_t iwidth, int64_t iheight, int64_t owidth, int64_t oheight, @@ -184,8 +184,8 @@ static void THNN_(SpatialReflectionPadding_updateGradInput_frame)( } ip_y = ip_y - oStartY + iStartY; - real *src_p = goutput_p + k*owidth*oheight + i * owidth + j; - real *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x; + scalar_t *src_p = goutput_p + k*owidth*oheight + i * owidth + j; + scalar_t *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x; *dest_p += *src_p; } } @@ -241,8 +241,8 @@ void THNN_(SpatialReflectionPadding_updateGradInput)(THNNState *state, /* backprop */ if (input->dim() == 3) { THNN_(SpatialReflectionPadding_updateGradInput_frame)( - gradInput->data(), - gradOutput->data(), + gradInput->data(), + gradOutput->data(), nslices, iwidth, iheight, owidth, oheight, @@ -253,8 +253,8 @@ void THNN_(SpatialReflectionPadding_updateGradInput)(THNNState *state, #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { THNN_(SpatialReflectionPadding_updateGradInput_frame)( - gradInput->data() + p * nslices * iheight * iwidth, - gradOutput->data() + p * nslices * oheight * owidth, + gradInput->data() + p * nslices * iheight * iwidth, + gradOutput->data() + p * nslices * oheight * owidth, nslices, iwidth, iheight, owidth, oheight, diff --git a/aten/src/THNN/generic/SpatialReplicationPadding.c b/aten/src/THNN/generic/SpatialReplicationPadding.c index f65a928e376e9d..4de02317d3767a 100644 --- a/aten/src/THNN/generic/SpatialReplicationPadding.c +++ b/aten/src/THNN/generic/SpatialReplicationPadding.c @@ -3,7 +3,7 @@ #else static void THNN_(SpatialReplicationPadding_updateOutput_frame)( - real *input_p, real *output_p, + scalar_t *input_p, scalar_t *output_p, int64_t nslices, int64_t iwidth, int64_t iheight, int64_t owidth, int64_t oheight, @@ -40,8 +40,8 @@ static void THNN_(SpatialReplicationPadding_updateOutput_frame)( } ip_y = ip_y - oStartY + iStartY; - real *dest_p = output_p + k*owidth*oheight + i * owidth + j; - real *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x; + scalar_t *dest_p = output_p + k*owidth*oheight + i * owidth + j; + scalar_t *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x; *dest_p = *src_p; } } @@ -63,8 +63,8 @@ void THNN_(SpatialReplicationPadding_updateOutput)(THNNState *state, int64_t iwidth; int64_t oheight; int64_t owidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); @@ -98,8 +98,8 @@ void THNN_(SpatialReplicationPadding_updateOutput)(THNNState *state, { THTensor_(resize3d)(output, nslices, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(SpatialReplicationPadding_updateOutput_frame)(input_data, output_data, nslices, @@ -114,8 +114,8 @@ void THNN_(SpatialReplicationPadding_updateOutput)(THNNState *state, THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) @@ -136,7 +136,7 @@ void THNN_(SpatialReplicationPadding_updateOutput)(THNNState *state, } static void THNN_(SpatialReplicationPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, + scalar_t *ginput_p, scalar_t *goutput_p, int64_t nslices, int64_t iwidth, int64_t iheight, int64_t owidth, int64_t oheight, @@ -173,8 +173,8 @@ static void THNN_(SpatialReplicationPadding_updateGradInput_frame)( } ip_y = ip_y - oStartY + iStartY; - real *src_p = goutput_p + k*owidth*oheight + i * owidth + j; - real *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x; + scalar_t *src_p = goutput_p + k*owidth*oheight + i * owidth + j; + scalar_t *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x; *dest_p += *src_p; } } @@ -230,8 +230,8 @@ void THNN_(SpatialReplicationPadding_updateGradInput)(THNNState *state, /* backprop */ if (input->dim() == 3) { THNN_(SpatialReplicationPadding_updateGradInput_frame)( - gradInput->data(), - gradOutput->data(), + gradInput->data(), + gradOutput->data(), nslices, iwidth, iheight, owidth, oheight, @@ -242,8 +242,8 @@ void THNN_(SpatialReplicationPadding_updateGradInput)(THNNState *state, #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { THNN_(SpatialReplicationPadding_updateGradInput_frame)( - gradInput->data() + p * nslices * iheight * iwidth, - gradOutput->data() + p * nslices * oheight * owidth, + gradInput->data() + p * nslices * iheight * iwidth, + gradOutput->data() + p * nslices * oheight * owidth, nslices, iwidth, iheight, owidth, oheight, diff --git a/aten/src/THNN/generic/SpatialSubSampling.c b/aten/src/THNN/generic/SpatialSubSampling.c index d9b82f4d314932..485fd9a936af62 100644 --- a/aten/src/THNN/generic/SpatialSubSampling.c +++ b/aten/src/THNN/generic/SpatialSubSampling.c @@ -42,10 +42,10 @@ void THNN_(SpatialSubSampling_updateOutput)( { THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); - real *weight_data = weight->data(); - real *bias_data = bias->data(); - real *output_data; - real *input_data; + scalar_t *weight_data = weight->data(); + scalar_t *bias_data = bias->data(); + scalar_t *output_data; + scalar_t *input_data; int dimw = 2; int dimh = 1; @@ -79,8 +79,8 @@ void THNN_(SpatialSubSampling_updateOutput)( THTensor_(resize4d)(output, input->size(0), nInputPlane, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) @@ -90,11 +90,11 @@ void THNN_(SpatialSubSampling_updateOutput)( { int64_t xx, yy; /* For all output pixels... */ - real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; + scalar_t *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; /* Get the good mask for (k,i) (k out, i in) */ - real the_weight = weight_data[k]; + scalar_t the_weight = weight_data[k]; /* Initialize to the bias */ - real z = bias_data[k]; + scalar_t z = bias_data[k]; int64_t i; for(i = 0; i < outputWidth*outputHeight; i++) ptr_output[i] = z; @@ -104,8 +104,8 @@ void THNN_(SpatialSubSampling_updateOutput)( for(xx = 0; xx < outputWidth; xx++) { /* Compute the mean of the input image... */ - real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; - real sum = 0; + scalar_t *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; + scalar_t sum = 0; int64_t kx, ky; for(ky = 0; ky < kH; ky++) @@ -145,9 +145,9 @@ void THNN_(SpatialSubSampling_updateGradInput)( int nInputPlane = THTensor_(size)(weight,0); - real *weight_data; - real *gradOutput_data; - real *gradInput_data; + scalar_t *weight_data; + scalar_t *gradOutput_data; + scalar_t *gradInput_data; int64_t k; @@ -162,12 +162,12 @@ void THNN_(SpatialSubSampling_updateGradInput)( outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; - weight_data = weight->data(); + weight_data = weight->data(); gradOutput = THTensor_(newContiguous)(gradOutput); - gradOutput_data = gradOutput->data(); + gradOutput_data = gradOutput->data(); THTensor_(resizeAs)(gradInput, input); - gradInput_data = gradInput->data(); + gradInput_data = gradInput->data(); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) @@ -175,11 +175,11 @@ void THNN_(SpatialSubSampling_updateGradInput)( int64_t p; for(p = 0; p < nbatch; p++) { - real the_weight = weight_data[k]; - real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; + scalar_t the_weight = weight_data[k]; + scalar_t *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; int64_t xx, yy; - real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; + scalar_t* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; int64_t i; for(i=0; idata(); - gradBias_data = gradBias->data(); + gradWeight_data = gradWeight->data(); + gradBias_data = gradBias->data(); gradOutput = THTensor_(newContiguous)(gradOutput); - gradOutput_data = gradOutput->data(); + gradOutput_data = gradOutput->data(); input = THTensor_(newContiguous)(input); - input_data = input->data(); + input_data = input->data(); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) @@ -261,8 +261,8 @@ void THNN_(SpatialSubSampling_accGradParameters)( int64_t p; for(p = 0; p < nbatch; p++) { - real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; - real sum; + scalar_t *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; + scalar_t sum; int64_t xx, yy; int64_t i; @@ -276,8 +276,8 @@ void THNN_(SpatialSubSampling_accGradParameters)( { for(xx = 0; xx < outputWidth; xx++) { - real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; - real z = *ptr_gradOutput++; + scalar_t *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; + scalar_t z = *ptr_gradOutput++; int64_t kx, ky; for(ky = 0; ky < kH; ky++) diff --git a/aten/src/THNN/generic/SpatialUpSamplingBilinear.c b/aten/src/THNN/generic/SpatialUpSamplingBilinear.c index 10b61e0dcc7e1e..06544cf16e3503 100644 --- a/aten/src/THNN/generic/SpatialUpSamplingBilinear.c +++ b/aten/src/THNN/generic/SpatialUpSamplingBilinear.c @@ -55,8 +55,8 @@ void THNN_(SpatialUpSamplingBilinear_updateOutput)( THTensor_(size)(input, 1), outputHeight, outputWidth); THTensor_(zero)(output); - real *idata = input->data(); - real *odata = output->data(); + scalar_t *idata = input->data(); + scalar_t *odata = output->data(); channels = nbatch * channels; THAssert(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0); // special case: just copy @@ -65,8 +65,8 @@ void THNN_(SpatialUpSamplingBilinear_updateOutput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - const real* pos1 = &idata[h1 * inputWidth + w1]; - real* pos2 = &odata[h2 * outputWidth + w2]; + const scalar_t* pos1 = &idata[h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputWidth * inputHeight; @@ -83,16 +83,16 @@ void THNN_(SpatialUpSamplingBilinear_updateOutput)( const accreal h1r = linear_upsampling_compute_source_index(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; + const scalar_t h1lambda = h1r - h1; + const scalar_t h0lambda = (scalar_t)1. - h1lambda; for (int w2 = 0; w2 < outputWidth; ++w2) { const accreal w1r = linear_upsampling_compute_source_index(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - const real* pos1 = &idata[h1 * inputWidth + w1]; - real* pos2 = &odata[h2 * outputWidth + w2]; + const scalar_t w1lambda = w1r - w1; + const scalar_t w0lambda = (scalar_t)1. - w1lambda; + const scalar_t* pos1 = &idata[h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = h0lambda * (w0lambda * pos1[0]+ w1lambda * pos1[w1p]) + h1lambda * (w0lambda * pos1[h1p * inputWidth] @@ -126,8 +126,8 @@ void THNN_(SpatialUpSamplingBilinear_updateGradInput)( THTensor_(resize4d)(gradInput, nbatch, channels, inputHeight, inputWidth); THTensor_(zero)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); - real *data1 = gradInput->data(); - real *data2 = gradOutput->data(); + scalar_t *data1 = gradInput->data(); + scalar_t *data2 = gradOutput->data(); channels = nbatch * channels; // special case: same-size matching grids @@ -136,8 +136,8 @@ void THNN_(SpatialUpSamplingBilinear_updateGradInput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - real* pos1 = &data1[h1 * inputWidth + w1]; - const real* pos2 = &data2[h2 * outputWidth + w2]; + scalar_t* pos1 = &data1[h1 * inputWidth + w1]; + const scalar_t* pos2 = &data2[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputWidth * inputHeight; @@ -154,16 +154,16 @@ void THNN_(SpatialUpSamplingBilinear_updateGradInput)( const accreal h1r = linear_upsampling_compute_source_index(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; + const scalar_t h1lambda = h1r - h1; + const scalar_t h0lambda = (scalar_t)1. - h1lambda; for (int w2 = 0; w2 < outputWidth; ++w2) { const accreal w1r = linear_upsampling_compute_source_index(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - real* pos1 = &data1[h1 * inputWidth + w1]; - const real* pos2 = &data2[h2 * outputWidth + w2]; + const scalar_t w1lambda = w1r - w1; + const scalar_t w0lambda = (scalar_t)1. - w1lambda; + scalar_t* pos1 = &data1[h1 * inputWidth + w1]; + const scalar_t* pos2 = &data2[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] += h0lambda * w0lambda * pos2[0]; pos1[w1p] += h0lambda * w1lambda * pos2[0]; diff --git a/aten/src/THNN/generic/SpatialUpSamplingNearest.c b/aten/src/THNN/generic/SpatialUpSamplingNearest.c index d8b70082b680aa..409f70a21cf63f 100644 --- a/aten/src/THNN/generic/SpatialUpSamplingNearest.c +++ b/aten/src/THNN/generic/SpatialUpSamplingNearest.c @@ -56,8 +56,8 @@ void THNN_(SpatialUpSamplingNearest_updateOutput)( input = THTensor_(newContiguous)(input); THTensor_(zero)(output); - real *idata = input->data(); - real *odata = output->data(); + scalar_t *idata = input->data(); + scalar_t *odata = output->data(); // special case: just copy if (inputHeight == outputHeight && inputWidth == outputWidth) { @@ -65,8 +65,8 @@ void THNN_(SpatialUpSamplingNearest_updateOutput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - const real* pos1 = &idata[h1 * inputWidth + w1]; - real* pos2 = &odata[h2 * outputWidth + w2]; + const scalar_t* pos1 = &idata[h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputHeight * inputWidth; @@ -82,8 +82,8 @@ void THNN_(SpatialUpSamplingNearest_updateOutput)( const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, inputHeight); for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, inputWidth); - const real* pos1 = &idata[h1 * inputWidth + w1]; - real* pos2 = &odata[h2 * outputWidth + w2]; + const scalar_t* pos1 = &idata[h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputHeight * inputWidth; @@ -110,8 +110,8 @@ void THNN_(SpatialUpSamplingNearest_updateGradInput)( THTensor_(resize4d)(gradInput, nbatch, channels, inputHeight, inputWidth); THTensor_(zero)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); - real *idata = gradInput->data(); - real *odata = gradOutput->data(); + scalar_t *idata = gradInput->data(); + scalar_t *odata = gradOutput->data(); channels = nbatch * channels; const float height_scale = (float) inputHeight / (float)outputHeight; const float width_scale = (float) inputWidth / (float)outputWidth; @@ -121,8 +121,8 @@ void THNN_(SpatialUpSamplingNearest_updateGradInput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - real* pos1 = &idata[h1 * inputWidth + w1]; - const real* pos2 = &odata[h2 * outputWidth + w2]; + scalar_t* pos1 = &idata[h1 * inputWidth + w1]; + const scalar_t* pos2 = &odata[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] = pos2[0]; pos1 += inputHeight * inputWidth; @@ -138,8 +138,8 @@ void THNN_(SpatialUpSamplingNearest_updateGradInput)( const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, inputHeight); for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, inputWidth); - real* pos1 = &idata[h1 * inputWidth + w1]; - const real* pos2 = &odata[h2 * outputWidth + w2]; + scalar_t* pos1 = &idata[h1 * inputWidth + w1]; + const scalar_t* pos2 = &odata[h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputHeight * inputWidth; diff --git a/aten/src/THNN/generic/Sqrt.c b/aten/src/THNN/generic/Sqrt.c index f276bcd721e512..080513020b376b 100644 --- a/aten/src/THNN/generic/Sqrt.c +++ b/aten/src/THNN/generic/Sqrt.c @@ -27,15 +27,15 @@ void THNN_(Sqrt_updateGradInput)( !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, output, *gradInput_data = (*output_data == 0.0) ? 0.0 : (0.5 * (*gradOutput_data / *output_data)); ); } else { - real *gradOutput_data = gradOutput->data(); - real *gradInput_data = gradInput->data(); - real *output_data = output->data(); + scalar_t *gradOutput_data = gradOutput->data(); + scalar_t *gradInput_data = gradInput->data(); + scalar_t *output_data = output->data(); int64_t i; #pragma omp parallel for private(i) for(i = 0; i < THTensor_(nElement)(output); i++) diff --git a/aten/src/THNN/generic/Square.c b/aten/src/THNN/generic/Square.c index af792f95a19e07..0d956f73f70555 100644 --- a/aten/src/THNN/generic/Square.c +++ b/aten/src/THNN/generic/Square.c @@ -11,14 +11,14 @@ void THNN_(Square_updateOutput)( if (THTensor_nDimensionLegacyAll(input) == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { - TH_TENSOR_APPLY2(real, output, real, input, + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, *output_data = (*input_data) * (*input_data); ); } else { - real *output_data = output->data(); - real *input_data = input->data(); + scalar_t *output_data = output->data(); + scalar_t *input_data = input->data(); int64_t i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(input); i++) @@ -40,15 +40,15 @@ void THNN_(Square_updateGradInput)( !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, *gradInput_data = 2.0 * (*gradOutput_data) * (*input_data); ); } else { - real *gradOutput_data = gradOutput->data(); - real *gradInput_data = gradInput->data(); - real *input_data = input->data(); + scalar_t *gradOutput_data = gradOutput->data(); + scalar_t *gradInput_data = gradInput->data(); + scalar_t *input_data = input->data(); int64_t i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(gradInput); i++) diff --git a/aten/src/THNN/generic/Tanh.c b/aten/src/THNN/generic/Tanh.c index 9fbb8a2c7e88a3..15607f49316e65 100644 --- a/aten/src/THNN/generic/Tanh.c +++ b/aten/src/THNN/generic/Tanh.c @@ -24,22 +24,22 @@ void THNN_(Tanh_updateGradInput)( !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - real z = *output_data; \ + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, output, + scalar_t z = *output_data; \ *gradInput_data = *gradOutput_data * (1. - z*z); ); } else { - real* ptr_gradOutput = gradOutput->data(); - real* ptr_gradInput = gradInput->data(); - real* ptr_output = output->data(); + scalar_t* ptr_gradOutput = gradOutput->data(); + scalar_t* ptr_gradInput = gradInput->data(); + scalar_t* ptr_output = output->data(); int64_t i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(gradInput); i++) { - real z = ptr_output[i]; + scalar_t z = ptr_output[i]; ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z); } } diff --git a/aten/src/THNN/generic/TemporalConvolution.c b/aten/src/THNN/generic/TemporalConvolution.c index 17b3194d8e9d84..4e29f574acee43 100644 --- a/aten/src/THNN/generic/TemporalConvolution.c +++ b/aten/src/THNN/generic/TemporalConvolution.c @@ -277,7 +277,7 @@ void THNN_(TemporalConvolution_accGradParameters)( int dW, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int64_t nInputFrame; int64_t nOutputFrame; diff --git a/aten/src/THNN/generic/TemporalMaxPooling.c b/aten/src/THNN/generic/TemporalMaxPooling.c index 03b7d800344115..236d88bf3e415f 100644 --- a/aten/src/THNN/generic/TemporalMaxPooling.c +++ b/aten/src/THNN/generic/TemporalMaxPooling.c @@ -60,8 +60,8 @@ void THNN_(TemporalMaxPooling_updateOutput)( int64_t framesize; int64_t noframe; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THIndex_t *indices_data; int64_t t, y; @@ -94,25 +94,25 @@ void THNN_(TemporalMaxPooling_updateOutput)( THIndexTensor_(resize2d)(indices, noframe, framesize); /* get raw pointers */ - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); for(t = 0; t < noframe; t++) { - real *ip = input_data + t*framesize*dW; - real *op = output_data + t*framesize; + scalar_t *ip = input_data + t*framesize*dW; + scalar_t *op = output_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = -1; - real maxval = -THInf; + scalar_t maxval = -THInf; int64_t x; for(x = 0; x < kW; x++) { - real val = ip[x*framesize+y]; + scalar_t val = ip[x*framesize+y]; if (val > maxval) { maxval = val; @@ -122,7 +122,7 @@ void THNN_(TemporalMaxPooling_updateOutput)( /* set output to local max */ op[y] = maxval; - xp[y] = (real)maxindex; + xp[y] = (scalar_t)maxindex; } } } @@ -139,20 +139,20 @@ void THNN_(TemporalMaxPooling_updateOutput)( THIndexTensor_(resize3d)(indices, nbframe, noframe, framesize); /* get raw pointers */ - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); for(i = 0; i < nbframe; i++) { - real *inputSample_data = input_data + i*niframe*framesize; - real *outputSample_data = output_data + i*noframe*framesize; + scalar_t *inputSample_data = input_data + i*niframe*framesize; + scalar_t *outputSample_data = output_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { - real *ip = inputSample_data + t*framesize*dW; - real *op = outputSample_data + t*framesize; + scalar_t *ip = inputSample_data + t*framesize*dW; + scalar_t *op = outputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) @@ -160,11 +160,11 @@ void THNN_(TemporalMaxPooling_updateOutput)( { /* compute local max: */ int64_t maxindex = -1; - real maxval = -THInf; + scalar_t maxval = -THInf; int64_t x; for(x = 0; x < kW; x++) { - real val = ip[x*framesize+y]; + scalar_t val = ip[x*framesize+y]; if (val > maxval) { maxval = val; @@ -174,7 +174,7 @@ void THNN_(TemporalMaxPooling_updateOutput)( /* set output to local max */ op[y] = maxval; - xp[y] = (real)maxindex; + xp[y] = (scalar_t)maxindex; } } } @@ -198,8 +198,8 @@ void THNN_(TemporalMaxPooling_updateGradInput)( int noframe; int64_t framesize; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THIndex_t *indices_data; int64_t t, y; @@ -226,16 +226,16 @@ void THNN_(TemporalMaxPooling_updateGradInput)( framesize = gradOutput->size(dimF); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); indices_data = THIndexTensor_(data)(indices); if (input->dim() == 2) { for(t = 0; t < noframe; t++) { - real *gip = gradInput_data + t*framesize*dW; - real *gop = gradOutput_data + t*framesize; + scalar_t *gip = gradInput_data + t*framesize*dW; + scalar_t *gop = gradOutput_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) @@ -255,14 +255,14 @@ void THNN_(TemporalMaxPooling_updateGradInput)( for(i = 0; i < nbframe; i++) { - real *gradInputSample_data = gradInput_data + i*niframe*framesize; - real *gradOutputSample_data = gradOutput_data + i*noframe*framesize; + scalar_t *gradInputSample_data = gradInput_data + i*niframe*framesize; + scalar_t *gradOutputSample_data = gradOutput_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { - real *gip = gradInputSample_data + t*framesize*dW; - real *gop = gradOutputSample_data + t*framesize; + scalar_t *gip = gradInputSample_data + t*framesize*dW; + scalar_t *gop = gradOutputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) diff --git a/aten/src/THNN/generic/TemporalReflectionPadding.c b/aten/src/THNN/generic/TemporalReflectionPadding.c index e7b5d4547e29d0..097cbc6343393d 100644 --- a/aten/src/THNN/generic/TemporalReflectionPadding.c +++ b/aten/src/THNN/generic/TemporalReflectionPadding.c @@ -3,7 +3,7 @@ #else static void THNN_(TemporalReflectionPadding_updateOutput_frame)( - real *input_p, real *output_p, + scalar_t *input_p, scalar_t *output_p, long nslices, long iwidth, long owidth, @@ -28,9 +28,9 @@ static void THNN_(TemporalReflectionPadding_updateOutput_frame)( } ip_x = ip_x - oStartX + iStartX; - /* real *dest_p = output_p + k*owidth*oheight + i * owidth + j; */ - real *dest_p = output_p + k*owidth + j; - real *src_p = input_p + k*iwidth + ip_x; + /* scalar_t *dest_p = output_p + k*owidth*oheight + i * owidth + j; */ + scalar_t *dest_p = output_p + k*owidth + j; + scalar_t *src_p = input_p + k*iwidth + ip_x; *dest_p = *src_p; } } @@ -47,8 +47,8 @@ void THNN_(TemporalReflectionPadding_updateOutput)(THNNState *state, long nslices; long iwidth; long owidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input, "non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s"); @@ -84,8 +84,8 @@ void THNN_(TemporalReflectionPadding_updateOutput)(THNNState *state, { THTensor_(resize2d)(output, nslices, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(TemporalReflectionPadding_updateOutput_frame)(input_data, output_data, nslices, @@ -99,8 +99,8 @@ void THNN_(TemporalReflectionPadding_updateOutput)(THNNState *state, THTensor_(resize3d)(output, nbatch, nslices, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) @@ -120,7 +120,7 @@ void THNN_(TemporalReflectionPadding_updateOutput)(THNNState *state, } static void THNN_(TemporalReflectionPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, + scalar_t *ginput_p, scalar_t *goutput_p, long nslices, long iwidth, long owidth, @@ -145,8 +145,8 @@ static void THNN_(TemporalReflectionPadding_updateGradInput_frame)( } ip_x = ip_x - oStartX + iStartX; - real *src_p = goutput_p + k*owidth + j; - real *dest_p = ginput_p + k*iwidth + ip_x; + scalar_t *src_p = goutput_p + k*owidth + j; + scalar_t *dest_p = ginput_p + k*iwidth + ip_x; *dest_p += *src_p; } } @@ -191,8 +191,8 @@ void THNN_(TemporalReflectionPadding_updateGradInput)(THNNState *state, /* backprop */ if (input->dim() == 2) { THNN_(TemporalReflectionPadding_updateGradInput_frame)( - gradInput->data(), - gradOutput->data(), + gradInput->data(), + gradOutput->data(), nslices, iwidth, owidth, @@ -202,8 +202,8 @@ void THNN_(TemporalReflectionPadding_updateGradInput)(THNNState *state, #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { THNN_(TemporalReflectionPadding_updateGradInput_frame)( - gradInput->data() + p * nslices * iwidth, - gradOutput->data() + p * nslices * owidth, + gradInput->data() + p * nslices * iwidth, + gradOutput->data() + p * nslices * owidth, nslices, iwidth, owidth, diff --git a/aten/src/THNN/generic/TemporalReplicationPadding.c b/aten/src/THNN/generic/TemporalReplicationPadding.c index 2808fec8c34275..40e6bf71ab68b1 100644 --- a/aten/src/THNN/generic/TemporalReplicationPadding.c +++ b/aten/src/THNN/generic/TemporalReplicationPadding.c @@ -3,7 +3,7 @@ #else static void THNN_(TemporalReplicationPadding_updateOutput_frame)( - real *input_p, real *output_p, + scalar_t *input_p, scalar_t *output_p, long nslices, long iwidth, long owidth, @@ -27,8 +27,8 @@ static void THNN_(TemporalReplicationPadding_updateOutput_frame)( } ip_x = ip_x - oStartX + iStartX; - real *dest_p = output_p + k*owidth + j; - real *src_p = input_p + k*iwidth + ip_x; + scalar_t *dest_p = output_p + k*owidth + j; + scalar_t *src_p = input_p + k*iwidth + ip_x; *dest_p = *src_p; } } @@ -45,8 +45,8 @@ void THNN_(TemporalReplicationPadding_updateOutput)(THNNState *state, long nslices; long iwidth; long owidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input, "non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s"); @@ -77,8 +77,8 @@ void THNN_(TemporalReplicationPadding_updateOutput)(THNNState *state, { THTensor_(resize2d)(output, nslices, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(TemporalReplicationPadding_updateOutput_frame)(input_data, output_data, nslices, @@ -92,8 +92,8 @@ void THNN_(TemporalReplicationPadding_updateOutput)(THNNState *state, THTensor_(resize3d)(output, nbatch, nslices, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) @@ -113,7 +113,7 @@ void THNN_(TemporalReplicationPadding_updateOutput)(THNNState *state, } static void THNN_(TemporalReplicationPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, + scalar_t *ginput_p, scalar_t *goutput_p, long nslices, long iwidth, long owidth, @@ -137,8 +137,8 @@ static void THNN_(TemporalReplicationPadding_updateGradInput_frame)( } ip_x = ip_x - oStartX + iStartX; - real *src_p = goutput_p + k*owidth + j; - real *dest_p = ginput_p + k*iwidth + ip_x; + scalar_t *src_p = goutput_p + k*owidth + j; + scalar_t *dest_p = ginput_p + k*iwidth + ip_x; *dest_p += *src_p; } } @@ -183,8 +183,8 @@ void THNN_(TemporalReplicationPadding_updateGradInput)(THNNState *state, /* backprop */ if (input->dim() == 2) { THNN_(TemporalReplicationPadding_updateGradInput_frame)( - gradInput->data(), - gradOutput->data(), + gradInput->data(), + gradOutput->data(), nslices, iwidth, owidth, @@ -194,8 +194,8 @@ void THNN_(TemporalReplicationPadding_updateGradInput)(THNNState *state, #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { THNN_(TemporalReplicationPadding_updateGradInput_frame)( - gradInput->data() + p * nslices * iwidth, - gradOutput->data() + p * nslices * owidth, + gradInput->data() + p * nslices * iwidth, + gradOutput->data() + p * nslices * owidth, nslices, iwidth, owidth, diff --git a/aten/src/THNN/generic/TemporalRowConvolution.c b/aten/src/THNN/generic/TemporalRowConvolution.c index 0434a3fe7b13fc..665e20430ec435 100644 --- a/aten/src/THNN/generic/TemporalRowConvolution.c +++ b/aten/src/THNN/generic/TemporalRowConvolution.c @@ -67,8 +67,8 @@ static void THNN_(unfolded_acc_row)( int64_t nOutputFrame) { int64_t c; - real *input_data = input->data(); - real *finput_data = finput->data(); + scalar_t *input_data = input->data(); + scalar_t *finput_data = finput->data(); // #pragma omp parallel for private(c) for (c = 0; c < inputFrameSize; c++) { @@ -76,18 +76,18 @@ static void THNN_(unfolded_acc_row)( int64_t ix = 0; for (kw = 0; kw < kW; kw++) { - real *src = finput_data + scalar_t *src = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); - real *dst = input_data + c * (nInputFrame); + scalar_t *dst = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { - real *dst_slice = dst + (size_t)(ix); + scalar_t *dst_slice = dst + (size_t)(ix); THVector_(cadd)(dst_slice, dst_slice, src, 1, nOutputFrame); } else { for (x = 0; x < nOutputFrame; x++) { - real *dst_slice = dst + (size_t)(ix + x * dW); + scalar_t *dst_slice = dst + (size_t)(ix + x * dW); THVector_(cadd)(dst_slice, dst_slice, src + (size_t)(x), 1, 1); } @@ -107,8 +107,8 @@ static void THNN_(unfolded_copy_row)( int64_t nOutputFrame) { int64_t k; - real *input_data = input->data(); - real *finput_data = finput->data(); + scalar_t *input_data = input->data(); + scalar_t *finput_data = finput->data(); // #pragma omp parallel for private(k) for (k = 0; k < inputFrameSize * kW; k++) { @@ -117,16 +117,16 @@ static void THNN_(unfolded_copy_row)( int64_t kw = rest % kW; int64_t x; int64_t ix; - real *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); - real *src = input_data + c * (nInputFrame); + scalar_t *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); + scalar_t *src = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { - memcpy(dst, src+(size_t)(ix), sizeof(real) * (nOutputFrame)); + memcpy(dst, src+(size_t)(ix), sizeof(scalar_t) * (nOutputFrame)); } else { for (x = 0; x < nOutputFrame; x++) { memcpy(dst + (size_t)(x), src + (size_t)(ix + x * dW), - sizeof(real) * 1); + sizeof(scalar_t) * 1); } } } @@ -368,7 +368,7 @@ void THNN_(TemporalRowConvolution_updateGradInput)( static void THNN_(TemporalRowConvolution_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, - THTensor *finput, real scale) { + THTensor *finput, scalar_t scale) { int64_t i; THTensor *gradOutput3d = THTensor_(newWithStorage3d)( @@ -388,8 +388,8 @@ static void THNN_(TemporalRowConvolution_accGradParameters_frame)( if (gradBias != NULL) { for (i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; - real sum = 0; - real *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput3d)) + scalar_t sum = 0; + scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput3d)) + gradOutput3d->storage_offset() + i * gradOutput3d->stride(0); for (k = 0; k < gradOutput3d->size(2); k++) { @@ -418,7 +418,7 @@ void THNN_(TemporalRowConvolution_accGradParameters)( bool featFirst, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int ndim = input->dim(); THTensor *tinput = NULL; diff --git a/aten/src/THNN/generic/TemporalSubSampling.c b/aten/src/THNN/generic/TemporalSubSampling.c index ab658ac6ec6a74..992bbb3f6c0044 100644 --- a/aten/src/THNN/generic/TemporalSubSampling.c +++ b/aten/src/THNN/generic/TemporalSubSampling.c @@ -129,7 +129,7 @@ void THNN_(TemporalSubSampling_accGradParameters)( int dW, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THTensor *gradOutputFrame; THTensor *inputWindow, *buffer; int64_t k; diff --git a/aten/src/THNN/generic/TemporalUpSamplingLinear.c b/aten/src/THNN/generic/TemporalUpSamplingLinear.c index 59c18b0d13bcb1..37c49484799b9d 100644 --- a/aten/src/THNN/generic/TemporalUpSamplingLinear.c +++ b/aten/src/THNN/generic/TemporalUpSamplingLinear.c @@ -49,16 +49,16 @@ void THNN_(TemporalUpSamplingLinear_updateOutput)( THTensor_(size)(input, 1), outputWidth); THTensor_(zero)(output); - real *idata = input->data(); - real *odata = output->data(); + scalar_t *idata = input->data(); + scalar_t *odata = output->data(); channels = nbatch * channels; THAssert(inputWidth > 0 && outputWidth > 0); // special case: just copy if (inputWidth == outputWidth) { for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - const real* pos1 = &idata[w1]; - real* pos2 = &odata[w2]; + const scalar_t* pos1 = &idata[w1]; + scalar_t* pos2 = &odata[w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputWidth; @@ -73,11 +73,11 @@ void THNN_(TemporalUpSamplingLinear_updateOutput)( const accreal w1r = linear_upsampling_compute_source_index(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - const real* pos1 = &idata[w1]; + const scalar_t w1lambda = w1r - w1; + const scalar_t w0lambda = (scalar_t)1. - w1lambda; + const scalar_t* pos1 = &idata[w1]; // index w2 is interpolated by idata[w1] and (itself or idata[w1 + 1]) - real* pos2 = &odata[w2]; + scalar_t* pos2 = &odata[w2]; for (int c = 0; c < channels; ++c) { pos2[0] = w0lambda * pos1[0] + w1lambda * pos1[w1p]; pos1 += inputWidth; @@ -106,16 +106,16 @@ void THNN_(TemporalUpSamplingLinear_updateGradInput)( THTensor_(resize3d)(gradInput, nbatch, channels, inputWidth); THTensor_(zero)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); - real *data1 = gradInput->data(); - real *data2 = gradOutput->data(); + scalar_t *data1 = gradInput->data(); + scalar_t *data2 = gradOutput->data(); channels = nbatch * channels; // special case: same-size matching grids if (inputWidth == outputWidth) { for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - real* pos1 = &data1[w1]; - const real* pos2 = &data2[w2]; + scalar_t* pos1 = &data1[w1]; + const scalar_t* pos2 = &data2[w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputWidth; @@ -130,10 +130,10 @@ void THNN_(TemporalUpSamplingLinear_updateGradInput)( const accreal w1r = linear_upsampling_compute_source_index(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - real* pos1 = &data1[w1]; - const real* pos2 = &data2[w2]; + const scalar_t w1lambda = w1r - w1; + const scalar_t w0lambda = (scalar_t)1. - w1lambda; + scalar_t* pos1 = &data1[w1]; + const scalar_t* pos2 = &data2[w2]; for (int c = 0; c < channels; ++c) { pos1[0] += w0lambda * pos2[0]; pos1[w1p] += w1lambda * pos2[0]; diff --git a/aten/src/THNN/generic/TemporalUpSamplingNearest.c b/aten/src/THNN/generic/TemporalUpSamplingNearest.c index 8a63b6cbcc03a2..b7f6a0c048fa28 100644 --- a/aten/src/THNN/generic/TemporalUpSamplingNearest.c +++ b/aten/src/THNN/generic/TemporalUpSamplingNearest.c @@ -47,15 +47,15 @@ void THNN_(TemporalUpSamplingNearest_updateOutput)( input = THTensor_(newContiguous)(input); THTensor_(zero)(output); - real *idata = input->data(); - real *odata = output->data(); + scalar_t *idata = input->data(); + scalar_t *odata = output->data(); // special case: just copy if (inputWidth == outputWidth) { for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - const real* pos1 = &idata[w1]; - real* pos2 = &odata[w2]; + const scalar_t* pos1 = &idata[w1]; + scalar_t* pos2 = &odata[w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputWidth; @@ -69,8 +69,8 @@ void THNN_(TemporalUpSamplingNearest_updateOutput)( for (int w2 = 0; w2 < outputWidth; ++w2) { const accreal src_x = nearest_neighbor_compute_source_index(scale, w2, inputWidth); const int w1 = src_x; - const real* pos1 = &idata[w1]; - real* pos2 = &odata[w2]; + const scalar_t* pos1 = &idata[w1]; + scalar_t* pos2 = &odata[w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputWidth; @@ -93,8 +93,8 @@ void THNN_(TemporalUpSamplingNearest_updateGradInput)( THTensor_(resize3d)(gradInput, nbatch, channels, inputWidth); THTensor_(zero)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); - real *data1 = gradInput->data(); - real *data2 = gradOutput->data(); + scalar_t *data1 = gradInput->data(); + scalar_t *data2 = gradOutput->data(); channels = nbatch * channels; const float scale = (float) inputWidth / (float)outputWidth; @@ -102,8 +102,8 @@ void THNN_(TemporalUpSamplingNearest_updateGradInput)( if (inputWidth == outputWidth) { for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - real* pos1 = &data1[w1]; - const real* pos2 = &data2[w2]; + scalar_t* pos1 = &data1[w1]; + const scalar_t* pos2 = &data2[w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputWidth; @@ -116,8 +116,8 @@ void THNN_(TemporalUpSamplingNearest_updateGradInput)( for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = nearest_neighbor_compute_source_index(scale, w2, inputWidth); - real* pos1 = &data1[w1]; - const real* pos2 = &data2[w2]; + scalar_t* pos1 = &data1[w1]; + const scalar_t* pos2 = &data2[w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputWidth; diff --git a/aten/src/THNN/generic/Threshold.c b/aten/src/THNN/generic/Threshold.c index 211dfe7dbc3cfa..f47662f6ff548b 100644 --- a/aten/src/THNN/generic/Threshold.c +++ b/aten/src/THNN/generic/Threshold.c @@ -10,11 +10,11 @@ void THNN_(Threshold_updateOutput)( accreal val_, bool inplace) { - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); - real val = TH_CONVERT_ACCREAL_TO_REAL(val_); + scalar_t threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); + scalar_t val = TH_CONVERT_ACCREAL_TO_REAL(val_); if (inplace) { - TH_TENSOR_APPLY(real, input, + TH_TENSOR_APPLY(scalar_t, input, if (*input_data <= threshold) *input_data = val; ); @@ -23,7 +23,7 @@ void THNN_(Threshold_updateOutput)( else { THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, output, real, input, + TH_TENSOR_APPLY2(scalar_t, output, scalar_t, input, *output_data = (*input_data <= threshold) ? val : *input_data; // this order propagates NaN ); } @@ -38,11 +38,11 @@ void THNN_(Threshold_updateGradInput)( accreal val_, bool inplace) { - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); + scalar_t threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); THNN_CHECK_NELEMENT(input, gradOutput); if (inplace) { - TH_TENSOR_APPLY2(real, gradOutput, real, input, + TH_TENSOR_APPLY2(scalar_t, gradOutput, scalar_t, input, if ((*input_data) <= threshold) *gradOutput_data = 0; ); @@ -51,7 +51,7 @@ void THNN_(Threshold_updateGradInput)( else { THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, input, if ((*input_data) <= threshold) *gradInput_data = 0; else diff --git a/aten/src/THNN/generic/VolumetricAdaptiveAveragePooling.c b/aten/src/THNN/generic/VolumetricAdaptiveAveragePooling.c index 46a18de4af084e..4c1d3152245f8d 100644 --- a/aten/src/THNN/generic/VolumetricAdaptiveAveragePooling.c +++ b/aten/src/THNN/generic/VolumetricAdaptiveAveragePooling.c @@ -10,8 +10,8 @@ // 5d tensor B x D x T x H x W static void THNN_(VolumetricAdaptiveAveragePooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, int64_t sizeD, int64_t isizeT, int64_t isizeH, @@ -50,11 +50,11 @@ static void THNN_(VolumetricAdaptiveAveragePooling_updateOutput_frame)( int kW = iendW - istartW; /* local pointers */ - real *ip = input_p + d*istrideD + istartT*istrideT + istartH*istrideH + istartW*istrideW; - real *op = output_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow; + scalar_t *ip = input_p + d*istrideD + istartT*istrideT + istartH*istrideH + istartW*istrideW; + scalar_t *op = output_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow; /* compute local average: */ - real sum = 0; + scalar_t sum = 0; int it, ih, iw; for(it = 0; it < kT; it++) { @@ -62,7 +62,7 @@ static void THNN_(VolumetricAdaptiveAveragePooling_updateOutput_frame)( { for(iw = 0; iw < kW; iw++) { - real val = *(ip + it*istrideT + ih*istrideH + iw*istrideW); + scalar_t val = *(ip + it*istrideT + ih*istrideH + iw*istrideW); sum += val; } } @@ -100,8 +100,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)( int64_t istrideH = 0; int64_t istrideW = 0; - real *input_data = nullptr; - real *output_data = nullptr; + scalar_t *input_data = nullptr; + scalar_t *output_data = nullptr; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, @@ -133,8 +133,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)( { THTensor_(resize4d)(output, sizeD, osizeT, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(VolumetricAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data, sizeD, @@ -149,8 +149,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)( THTensor_(resize5d)(output, sizeB, sizeD, osizeT, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) @@ -166,8 +166,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)( } static void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, int64_t sizeD, int64_t isizeT, int64_t isizeH, @@ -180,8 +180,8 @@ static void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput_frame)( #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { - real *gradInput_p_d = gradInput_p + d*isizeT*isizeW*isizeH; - real *gradOutput_p_d = gradOutput_p + d*osizeT*osizeW*osizeH; + scalar_t *gradInput_p_d = gradInput_p + d*isizeT*isizeW*isizeH; + scalar_t *gradOutput_p_d = gradOutput_p + d*osizeT*osizeW*osizeH; /* calculate average */ int64_t ot, oh, ow; @@ -204,7 +204,7 @@ static void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput_frame)( int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; - real grad_delta = gradOutput_p_d[ot*osizeH*osizeW + oh*osizeW + ow] / kT / kH / kW; + scalar_t grad_delta = gradOutput_p_d[ot*osizeH*osizeW + oh*osizeW + ow] / kT / kH / kW; int it, ih, iw; for(it = istartT; it < iendT; it++) @@ -242,8 +242,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput)( int64_t osizeT; int64_t osizeH; int64_t osizeW; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); @@ -270,8 +270,8 @@ void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput)( osizeW = gradOutput->size(dimW); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); /* backprop */ if (input->dim() == 4) diff --git a/aten/src/THNN/generic/VolumetricAdaptiveMaxPooling.c b/aten/src/THNN/generic/VolumetricAdaptiveMaxPooling.c index a1b2303c5a7944..18b7fea525e347 100644 --- a/aten/src/THNN/generic/VolumetricAdaptiveMaxPooling.c +++ b/aten/src/THNN/generic/VolumetricAdaptiveMaxPooling.c @@ -10,8 +10,8 @@ // 5d tensor B x D x T x H x W static void THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeT, @@ -51,13 +51,13 @@ static void THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)( int64_t kW = iendW - istartW; /* local pointers */ - real *ip = input_p + d*istrideD + istartT *istrideT + istartH*istrideH + istartW*istrideW; - real *op = output_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow; + scalar_t *ip = input_p + d*istrideD + istartT *istrideT + istartH*istrideH + istartW*istrideW; + scalar_t *op = output_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow; THIndex_t *indp = ind_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow; /* compute local max: */ int64_t maxindex = -1; - real maxval = -FLT_MAX; + scalar_t maxval = -FLT_MAX; int64_t it, ih, iw; for(it = 0; it < kT; it++) { @@ -65,7 +65,7 @@ static void THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)( { for(iw = 0; iw < kW; iw++) { - real val = *(ip + it*istrideT + ih*istrideH + iw*istrideW); + scalar_t val = *(ip + it*istrideT + ih*istrideH + iw*istrideW); if ((val > maxval) || std::isnan(val)) { maxval = val; @@ -111,8 +111,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)( int64_t istrideH = 0; int64_t istrideW = 0; - real *input_data = nullptr; - real *output_data = nullptr; + scalar_t *input_data = nullptr; + scalar_t *output_data = nullptr; THIndex_t *indices_data = nullptr; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, @@ -146,8 +146,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)( /* indices will contain max input locations for each output point */ THIndexTensor_(resize4d)(indices, sizeD, osizeT, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data, @@ -166,8 +166,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)( /* indices will contain max input locations for each output point */ THIndexTensor_(resize5d)(indices, sizeB, sizeD, osizeT, osizeH, osizeW); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(b) @@ -185,8 +185,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)( } static void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeT, @@ -200,8 +200,8 @@ static void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)( #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { - real *gradInput_p_d = gradInput_p + d*isizeT*isizeH*isizeW; - real *gradOutput_p_d = gradOutput_p + d*osizeT*osizeH*osizeW; + scalar_t *gradInput_p_d = gradInput_p + d*isizeT*isizeH*isizeW; + scalar_t *gradOutput_p_d = gradOutput_p + d*osizeT*osizeH*osizeW; THIndex_t *ind_p_d = ind_p + d*osizeT*osizeH*osizeW; /* calculate max points */ @@ -242,8 +242,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput)( int64_t osizeT; int64_t osizeH; int64_t osizeW; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THIndex_t *indices_data; /* get contiguous gradOutput */ @@ -271,8 +271,8 @@ void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput)( osizeW = gradOutput->size(dimW); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); indices_data = THIndexTensor_(data)(indices); /* backprop */ diff --git a/aten/src/THNN/generic/VolumetricAveragePooling.c b/aten/src/THNN/generic/VolumetricAveragePooling.c index 14a919759334e3..6593271f26cbcf 100644 --- a/aten/src/THNN/generic/VolumetricAveragePooling.c +++ b/aten/src/THNN/generic/VolumetricAveragePooling.c @@ -104,8 +104,8 @@ static inline void THNN_(VolumetricAveragePooling_shapeCheck)( } static void THNN_(VolumetricAveragePooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, int64_t nslices, int64_t itime, int64_t iwidth, @@ -131,8 +131,8 @@ static void THNN_(VolumetricAveragePooling_updateOutput_frame)( int64_t i, j, ti; /* local pointers. */ - real *ip = input_p + k * itime * iwidth * iheight; - real *op = output_p + k * otime * owidth * oheight; + scalar_t *ip = input_p + k * itime * iwidth * iheight; + scalar_t *op = output_p + k * otime * owidth * oheight; for (i = 0; i < otime * oheight * owidth; ++i) *(op + i) = 0; @@ -165,7 +165,7 @@ static void THNN_(VolumetricAveragePooling_updateOutput_frame)( divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart); /* compute local sum: */ - real sum = 0.0; + scalar_t sum = 0.0; int64_t x, y, z; for (z = tstart; z < tend; z++) @@ -210,8 +210,8 @@ void THNN_(VolumetricAveragePooling_updateOutput)( int64_t otime; int64_t oheight; int64_t owidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THNN_(VolumetricAveragePooling_shapeCheck)( state, input, NULL, kT, kW, kH, @@ -267,8 +267,8 @@ void THNN_(VolumetricAveragePooling_updateOutput)( /* resize output */ THTensor_(resize4d)(output, nslices, otime, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(VolumetricAveragePooling_updateOutput_frame)( input_data, output_data, nslices, @@ -291,8 +291,8 @@ void THNN_(VolumetricAveragePooling_updateOutput)( /* resize output */ THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(p) for (p=0; p < nBatch; p++) @@ -314,8 +314,8 @@ void THNN_(VolumetricAveragePooling_updateOutput)( } static void THNN_(VolumetricAveragePooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, int64_t nslices, int64_t itime, int64_t iwidth, @@ -341,8 +341,8 @@ static void THNN_(VolumetricAveragePooling_updateGradInput_frame)( int64_t i, j, ti; /* local pointers */ - real *ip = gradInput_p + k * itime * iwidth * iheight; - real *op = gradOutput_p + k * otime * owidth * oheight; + scalar_t *ip = gradInput_p + k * itime * iwidth * iheight; + scalar_t *op = gradOutput_p + k * otime * owidth * oheight; for (i = 0; i < itime*iwidth*iheight; i++) *(ip + i) = 0; @@ -374,7 +374,7 @@ static void THNN_(VolumetricAveragePooling_updateGradInput_frame)( divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart); /* scatter gradients out to footprint: */ - real val = *op++; + scalar_t val = *op++; int64_t x,y,z; for (z = tstart; z < tend; z++) @@ -417,8 +417,8 @@ void THNN_(VolumetricAveragePooling_updateGradInput)( int64_t otime; int64_t oheight; int64_t owidth; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; int dimN = 0; int dimt = 1; @@ -454,8 +454,8 @@ void THNN_(VolumetricAveragePooling_updateGradInput)( owidth = gradOutput->size(dimw); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); /* backprop */ if (input->dim() == 4) /* non-batch mode*/ diff --git a/aten/src/THNN/generic/VolumetricConvolution.c b/aten/src/THNN/generic/VolumetricConvolution.c index d74b9f15702093..fbe7bbf46a93bb 100644 --- a/aten/src/THNN/generic/VolumetricConvolution.c +++ b/aten/src/THNN/generic/VolumetricConvolution.c @@ -180,7 +180,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( int pH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THArgCheck(pT != 0 || pW != 0 || pH != 0, 9, "padding not supported by CPU backend"); // sharing signature with CUDA version THNN_ARGCHECK(!gradWeight->is_empty() && gradWeight->dim() == 5, 4, gradWeight, @@ -195,7 +195,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( } int64_t k; - real *gradBias_data; + scalar_t *gradBias_data; THTensor *gradOutSlice; int dimPlane = 0; if (gradOutput->dim() == 5) @@ -211,7 +211,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( { /* gradient to bias */ if (gradBias) { - gradBias_data = gradBias->data(); + gradBias_data = gradBias->data(); gradOutSlice = THTensor_(new)(); for (k = 0; k < nOutputPlane; k++) { @@ -239,7 +239,7 @@ void THNN_(VolumetricConvolution_accGradParameters)( /* gradient to bias */ if (gradBias) { - gradBias_data = gradBias->data(); + gradBias_data = gradBias->data(); gradOutSlice = THTensor_(new)(); for (k = 0; k < nOutputPlane; k++) { diff --git a/aten/src/THNN/generic/VolumetricConvolutionMM.c b/aten/src/THNN/generic/VolumetricConvolutionMM.c index 2ca020dacbcf80..51b8de16b273ad 100644 --- a/aten/src/THNN/generic/VolumetricConvolutionMM.c +++ b/aten/src/THNN/generic/VolumetricConvolutionMM.c @@ -150,8 +150,8 @@ static void THNN_(unfolded_acc_vol)( int64_t outputWidth, int64_t outputHeight) { - real *input_data = input->data(); - real *finput_data = finput->data(); + scalar_t *input_data = input->data(); + scalar_t *finput_data = finput->data(); #ifdef _OPENMP int inOmp = omp_in_parallel(); #pragma omp parallel if (!inOmp) firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, nInputPlane, inputHeight, inputWidth, inputDepth) @@ -201,7 +201,7 @@ static void THNN_(unfolded_acc_vol)( int64_t d_col_tmp = d / dT + 1; int64_t d_col_end = d_col_tmp < outputDepth? d_col_tmp : outputDepth; - real val = 0; + scalar_t val = 0; int64_t offset = (c * kTkHkW + d * kHkW + h * kW + w) * outputDHW; int64_t offset_w_col_start = w_col_start * coeff_w_col; @@ -253,8 +253,8 @@ static void THNN_(unfolded_acc_vol)( The larger loop could lower the proportion of openmp overhead. And the inner part in loop is simpler. The naive code is below: - real *input_data = input->data(); - real *finput_data = finput->data(); + scalar_t *input_data = input->data(); + scalar_t *finput_data = finput->data(); int64_t n = nInputPlane*kT*kH*kW*outputDepth*outputWidth*outputHeight; #pragma omp parallel for firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, inputHeight, inputWidth, inputDepth) @@ -303,8 +303,8 @@ static void THNN_(unfolded_copy_vol)( int64_t outputWidth, int64_t outputHeight) { - real *input_data = input->data(); - real *finput_data = finput->data(); + scalar_t *input_data = input->data(); + scalar_t *finput_data = finput->data(); #ifdef _OPENMP int inOmp = omp_in_parallel(); @@ -342,7 +342,7 @@ static void THNN_(unfolded_copy_vol)( #endif int64_t count = 0; - real* dst = finput_data + line_index_offset; + scalar_t* dst = finput_data + line_index_offset; int64_t inputHW = inputHeight*inputWidth; int64_t inputDHW = inputHW*inputDepth; @@ -674,7 +674,7 @@ static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( THTensor *gradWeight, THTensor *gradBias, THTensor *finput, // can be NULL if gradWeight = NULL - real scale) + scalar_t scale) { int64_t i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)( @@ -694,8 +694,8 @@ static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( for (i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; - real sum = 0; - real *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); + scalar_t sum = 0; + scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); for (k = 0; k < gradOutput2d->size(1); k++) sum += data[k]; @@ -719,7 +719,7 @@ void THNN_(VolumetricConvolutionMM_accGradParameters)( int pT, int pW, int pH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, diff --git a/aten/src/THNN/generic/VolumetricDilatedConvolution.c b/aten/src/THNN/generic/VolumetricDilatedConvolution.c index 879f357f89f798..14f6aad41e1f7b 100644 --- a/aten/src/THNN/generic/VolumetricDilatedConvolution.c +++ b/aten/src/THNN/generic/VolumetricDilatedConvolution.c @@ -163,10 +163,10 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)( 't', 'n', n_, m_, k_, 1, - ones->data(), k_, - bias->data(), k_, + ones->data(), k_, + bias->data(), k_, 0, - output_n->data(), n_ + output_n->data(), n_ ); } else { THTensor_(zero)(output_n); @@ -174,12 +174,12 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)( // Extract columns: THNN_(vol2col)( - input_n->data(), + input_n->data(), nInputPlane, inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth, kT, kH, kW, padT, padH, padW, dT, dH, dW, dilationT, dilationH, dilationW, - columns->data() + columns->data() ); // M,N,K are dims of matrix A and B @@ -192,10 +192,10 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)( 'n', 'n', n, m, k, 1, - columns->data(), n, - weight->data(), k, + columns->data(), n, + weight->data(), k, 1, - output_n->data(), n + output_n->data(), n ); } @@ -285,20 +285,20 @@ void THNN_(VolumetricDilatedConvolution_updateGradInput)( 'n', 't', n, m, k, 1, - gradOutput_n->data(), n, - weight->data(), m, + gradOutput_n->data(), n, + weight->data(), m, 0, - gradColumns->data(), n + gradColumns->data(), n ); // Unpack columns back into input: THNN_(col2vol)( - gradColumns->data(), + gradColumns->data(), nInputPlane, inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth, kT, kH, kW, padT, padH, padW, dT, dH, dW, dilationT, dilationH, dilationW, - gradInput_n->data() + gradInput_n->data() ); } @@ -332,7 +332,7 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( int dilationT, int dilationW, int dilationH, accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(VolumetricDilatedConvolution_shapeCheck)( input, gradOutput, gradWeight, gradBias, kT, kH, kW, dT, dH, dW, padT, padH, padW, @@ -396,12 +396,12 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( // Extract columns: THNN_(vol2col)( - input_n->data(), + input_n->data(), nInputPlane, inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth, kT, kH, kW, padT, padH, padW, dT, dH, dW, dilationT, dilationH, dilationW, - columns->data() + columns->data() ); // M,N,K are dims of matrix A and B @@ -414,10 +414,10 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( 't', 'n', n, m, k, scale, - columns->data(), k, - gradOutput_n->data(), k, + columns->data(), k, + gradOutput_n->data(), k, 1, - gradWeight->data(), n + gradWeight->data(), n ); } @@ -432,10 +432,10 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)( 't', k_, m_, scale, - gradOutput_n->data(), k_, - ones->data(), 1, + gradOutput_n->data(), k_, + ones->data(), 1, 1, - gradBias->data(), 1 + gradBias->data(), 1 ); } } diff --git a/aten/src/THNN/generic/VolumetricDilatedMaxPooling.c b/aten/src/THNN/generic/VolumetricDilatedMaxPooling.c index 667fcb2b4a9997..fc9da1cf4a35a0 100644 --- a/aten/src/THNN/generic/VolumetricDilatedMaxPooling.c +++ b/aten/src/THNN/generic/VolumetricDilatedMaxPooling.c @@ -98,8 +98,8 @@ static inline void THNN_(VolumetricDilatedMaxPooling_shapeCheck)( } static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, THIndex_t *indz_p, int64_t nslices, int64_t itime, @@ -127,7 +127,7 @@ static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( { /* loop over output */ int64_t i, j, ti; - real *ip = input_p + k * itime * iwidth * iheight; + scalar_t *ip = input_p + k * itime * iwidth * iheight; for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) @@ -151,14 +151,14 @@ static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( while(start_w < 0) start_w += dilationW; - real *op = output_p + k * otime * owidth * oheight + scalar_t *op = output_p + k * otime * owidth * oheight + ti * owidth * oheight + i * owidth + j; THIndex_t *indzp = indz_p + k * otime * owidth * oheight + ti * owidth * oheight + i * owidth + j; /* compute local max: */ int64_t maxindex = -1; - real maxval = -THInf; + scalar_t maxval = -THInf; int64_t x,y,z; int64_t index = 0; @@ -169,7 +169,7 @@ static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( for (x = start_w; x < end_w; x += dilationW) { index = z * iwidth * iheight + y * iwidth + x; - real val = ip[index]; + scalar_t val = ip[index]; if ((val > maxval) || std::isnan(val)) { maxval = val; @@ -216,8 +216,8 @@ void THNN_(VolumetricDilatedMaxPooling_updateOutput)( int64_t otime; int64_t oheight; int64_t owidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THIndex_t *indices_data; @@ -279,8 +279,8 @@ void THNN_(VolumetricDilatedMaxPooling_updateOutput)( /* indices will contain ti,i,j uchar locations packed into float/double */ THIndexTensor_(resize4d)(indices, nslices, otime, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( @@ -308,8 +308,8 @@ void THNN_(VolumetricDilatedMaxPooling_updateOutput)( /* indices will contain ti,i,j locations for each output point */ THIndexTensor_(resize5d)(indices, nBatch, nslices, otime, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(p) @@ -335,8 +335,8 @@ void THNN_(VolumetricDilatedMaxPooling_updateOutput)( } static void THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, THIndex_t *indz_p, int64_t nslices, int64_t itime, @@ -359,8 +359,8 @@ static void THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { - real *gradInput_p_k = gradInput_p + k * itime * iwidth * iheight; - real *gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight; + scalar_t *gradInput_p_k = gradInput_p + k * itime * iwidth * iheight; + scalar_t *gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight; THIndex_t *indz_p_k = indz_p + k * otime * owidth * oheight; /* calculate max points */ @@ -412,8 +412,8 @@ void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( int otime; int oheight; int owidth; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THIndex_t *indices_data; int dimN = 0; @@ -453,8 +453,8 @@ void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( owidth = gradOutput->size(dimw); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); indices_data = THIndexTensor_(data)(indices); /* backprop */ diff --git a/aten/src/THNN/generic/VolumetricFractionalMaxPooling.c b/aten/src/THNN/generic/VolumetricFractionalMaxPooling.c index e7379b134c71d4..13bbc917c6b0ca 100644 --- a/aten/src/THNN/generic/VolumetricFractionalMaxPooling.c +++ b/aten/src/THNN/generic/VolumetricFractionalMaxPooling.c @@ -3,11 +3,11 @@ #else static int64_t* THNN_(VolumetricFractionalMaxPooling_generateIntervals)( - real sample, + scalar_t sample, int64_t inputSize, int64_t outputSize, int poolSize) { - real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); + scalar_t alpha = (scalar_t) (inputSize - poolSize) / (scalar_t) (outputSize - 1); int64_t* sequence = (int64_t*) THAlloc(sizeof(int64_t) * outputSize); int64_t i; @@ -21,10 +21,10 @@ static int64_t* THNN_(VolumetricFractionalMaxPooling_generateIntervals)( } static void THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( - real* input, - real* output, + scalar_t* input, + scalar_t* output, THIndex_t* indices, - real* randomSamples, + scalar_t* randomSamples, int64_t numPlanes, int64_t inputT, int64_t inputW, int64_t inputH, int64_t outputT, int64_t outputW, int64_t outputH, @@ -33,7 +33,7 @@ static void THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; ++plane) { /* each plane contains 3 random samples, one for T, one for W, and one for H */ - real* randomSamplesForPlane = randomSamples + plane * 3; + scalar_t* randomSamplesForPlane = randomSamples + plane * 3; /* Generate interval sequence */ int64_t* sequenceT = @@ -49,8 +49,8 @@ static void THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( /* loop over output */ int64_t h, w, t; - real* inputForPlane = input + plane * inputT * inputW * inputH; - real* outputForPlane = output + plane * outputT * outputW * outputH; + scalar_t* inputForPlane = input + plane * inputT * inputW * inputH; + scalar_t* outputForPlane = output + plane * outputT * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputT * outputW * outputH; for (h = 0; h < outputH; ++h) { @@ -62,7 +62,7 @@ static void THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( for (t = 0; t < outputT; ++t) { int64_t inputTStart = sequenceT[t]; - real maxVal = -THInf; + scalar_t maxVal = -THInf; int64_t maxIndex = -1; int64_t h2, w2, t2; @@ -74,7 +74,7 @@ static void THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( THAssert(t2 >= 0 && t2 < inputT); int64_t planeIndex = h2 * inputW * inputT + w2 * inputT + t2; - real val = inputForPlane[planeIndex]; + scalar_t val = inputForPlane[planeIndex]; if (val > maxVal) { maxVal = val; maxIndex = planeIndex; @@ -152,10 +152,10 @@ void THNN_(VolumetricFractionalMaxPooling_updateOutput)( THIndexTensor_(resize4d)(indices, numPlanes, outputH, outputW, outputT); THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( - input->data(), - output->data(), + input->data(), + output->data(), THIndexTensor_(data)(indices), - randomSamples->data(), + randomSamples->data(), numPlanes, inputT, inputW, inputH, outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH); } else { @@ -167,10 +167,10 @@ void THNN_(VolumetricFractionalMaxPooling_updateOutput)( #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( - input->data() + batch * numPlanes * inputH * inputW * inputT, - output->data() + batch * numPlanes * outputH * outputW * outputT, + input->data() + batch * numPlanes * inputH * inputW * inputT, + output->data() + batch * numPlanes * outputH * outputW * outputT, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW * outputT, - randomSamples->data() + batch * numPlanes * 3, + randomSamples->data() + batch * numPlanes * 3, numPlanes, inputT, inputW, inputH, outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH); } @@ -181,8 +181,8 @@ void THNN_(VolumetricFractionalMaxPooling_updateOutput)( } static void THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)( - real* gradInput, - real* gradOutput, + scalar_t* gradInput, + scalar_t* gradOutput, THIndex_t* indices, int64_t numPlanes, int64_t inputT, int64_t inputW, int64_t inputH, @@ -190,8 +190,8 @@ static void THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)( int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; plane++) { - real* gradInputForPlane = gradInput + plane * inputT * inputW * inputH; - real* gradOutputForPlane = gradOutput + plane * outputT * outputW * outputH; + scalar_t* gradInputForPlane = gradInput + plane * inputT * inputW * inputH; + scalar_t* gradOutputForPlane = gradOutput + plane * outputT * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputT * outputW * outputH; int64_t h, w, t; @@ -256,8 +256,8 @@ void THNN_(VolumetricFractionalMaxPooling_updateGradInput)( /* backprop */ if (numInputDims == 4) { THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)( - gradInput->data(), - gradOutput->data(), + gradInput->data(), + gradOutput->data(), THIndexTensor_(data)(indices), numPlanes, inputT, inputW, inputH, outputT, outputW, outputH); } else { @@ -265,8 +265,8 @@ void THNN_(VolumetricFractionalMaxPooling_updateGradInput)( #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)( - gradInput->data() + batch * numPlanes * inputH * inputW * inputT, - gradOutput->data() + batch * numPlanes * outputH * outputW * outputT, + gradInput->data() + batch * numPlanes * inputH * inputW * inputT, + gradOutput->data() + batch * numPlanes * outputH * outputW * outputT, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW * outputT, numPlanes, inputT, inputW, inputH, outputT, outputW, outputH); } diff --git a/aten/src/THNN/generic/VolumetricFullDilatedConvolution.c b/aten/src/THNN/generic/VolumetricFullDilatedConvolution.c index 5ad23b5af27ed7..e8ea9c9d4f2b73 100644 --- a/aten/src/THNN/generic/VolumetricFullDilatedConvolution.c +++ b/aten/src/THNN/generic/VolumetricFullDilatedConvolution.c @@ -3,14 +3,14 @@ #else static void THNN_(vol2col)( - const real *data_vol, const int64_t channels, + const scalar_t *data_vol, const int64_t channels, const int64_t depth, const int64_t height, const int64_t width, const int64_t depth_col, const int64_t height_col, const int64_t width_col, const int64_t kT, const int64_t kH, const int64_t kW, const int64_t pT, const int64_t pH, const int64_t pW, const int64_t dT, const int64_t dH, const int64_t dW, const int64_t dilationT, const int64_t dilationH, const int64_t dilationW, - real *data_col) + scalar_t *data_col) { int64_t c, t, h, w; int64_t channels_col = channels * kT * kH * kW; @@ -43,17 +43,17 @@ static void THNN_(vol2col)( } static void THNN_(col2vol)( - const real* data_col, const int64_t channels, + const scalar_t* data_col, const int64_t channels, const int64_t depth, const int64_t height, const int64_t width, const int64_t out_depth, const int64_t out_height, const int64_t out_width, const int64_t kT, const int64_t kH, const int64_t kW, const int64_t pT, const int64_t pH, const int64_t pW, const int64_t dT, const int64_t dH, const int64_t dW, const int64_t dilationT, const int64_t dilationH, const int64_t dilationW, - real* data_vol) + scalar_t* data_vol) { int64_t c, t, h, w; - memset(data_vol, 0, sizeof(real) * depth * height * width * channels); + memset(data_vol, 0, sizeof(scalar_t) * depth * height * width * channels); int64_t depth_col = out_depth; int64_t height_col = out_height; int64_t width_col = out_width; @@ -248,22 +248,22 @@ void THNN_(VolumetricFullDilatedConvolution_updateOutput)( 'n', 't', n, m, k, 1, - input_n->data(), n, - weight->data(), m, + input_n->data(), n, + weight->data(), m, 0, - columns->data(), n + columns->data(), n ); // Unpack columns back into input: THNN_(col2vol)( - columns->data(), + columns->data(), nOutputPlane, outputDepth, outputHeight, outputWidth, inputDepth, inputHeight, inputWidth, kT, kH, kW, pT, pH, pW, dT, dH, dW, dilationT, dilationH, dilationW, - output_n->data() + output_n->data() ); // Do Bias after: @@ -279,10 +279,10 @@ void THNN_(VolumetricFullDilatedConvolution_updateOutput)( 't', 'n', n_, m_, k_, 1, - ones->data(), k_, - bias->data(), k_, + ones->data(), k_, + bias->data(), k_, 1, - output_n->data(), n_ + output_n->data(), n_ ); } } @@ -371,14 +371,14 @@ void THNN_(VolumetricFullDilatedConvolution_updateGradInput)( // Extract columns: THNN_(vol2col)( - gradOutput_n->data(), + gradOutput_n->data(), nOutputPlane, outputDepth, outputHeight, outputWidth, inputDepth, inputHeight, inputWidth, kT, kH, kW, pT, pH, pW, dT, dH, dW, dilationT, dilationH, dilationW, - gradColumns->data() + gradColumns->data() ); // M,N,K are dims of matrix A and B @@ -392,10 +392,10 @@ void THNN_(VolumetricFullDilatedConvolution_updateGradInput)( 'n', 'n', n, m, k, 1, - gradColumns->data(), n, - weight->data(), k, + gradColumns->data(), n, + weight->data(), k, 0, - gradInput_n->data(), n + gradInput_n->data(), n ); } @@ -431,7 +431,7 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( int aT, int aW, int aH, // extra output adjustment accreal scale_) { - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); + scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); // number of input & output planes and kernel size is indirectly defined by the gradWeight tensor THNN_(VolumetricFullDilatedConvolution_shapeCheck)( input, gradOutput, gradWeight, gradBias, kT, kW, kH, @@ -507,14 +507,14 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( // Extract columns: THNN_(vol2col)( - gradOutput_n->data(), nOutputPlane, + gradOutput_n->data(), nOutputPlane, outputDepth, outputHeight, outputWidth, inputDepth, inputHeight, inputWidth, kT, kH, kW, pT, pH, pW, dT, dH, dW, dilationT, dilationH, dilationW, - columns->data() + columns->data() ); // M,N,K are dims of matrix A and B @@ -528,10 +528,10 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( 't', 'n', n, m, k, scale, - columns->data(), k, - input_n->data(), k, + columns->data(), k, + input_n->data(), k, 1, - gradWeight->data(), n + gradWeight->data(), n ); } @@ -547,10 +547,10 @@ void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( 't', k_, m_, scale, - gradOutput_n->data(), k_, - ones->data(), 1, + gradOutput_n->data(), k_, + ones->data(), 1, 1, - gradBias->data(), 1 + gradBias->data(), 1 ); } } diff --git a/aten/src/THNN/generic/VolumetricMaxUnpooling.c b/aten/src/THNN/generic/VolumetricMaxUnpooling.c index 9aa97fb69feb80..0406f3ccbf6ad4 100644 --- a/aten/src/THNN/generic/VolumetricMaxUnpooling.c +++ b/aten/src/THNN/generic/VolumetricMaxUnpooling.c @@ -54,8 +54,8 @@ static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)( } static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)( - real *input_p, - real *output_p, + scalar_t *input_p, + scalar_t *output_p, THIndex_t *ind_p, int nslices, int iT, @@ -71,8 +71,8 @@ static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)( #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { - real *output_p_k = output_p + k * oT * oH * oW; - real *input_p_k = input_p + k * iT * iH * iW; + scalar_t *output_p_k = output_p + k * oT * oH * oW; + scalar_t *input_p_k = input_p + k * iT * iH * iW; THIndex_t *ind_p_k = ind_p + k * iT * iH * iW; int t, i, j, index; @@ -130,8 +130,8 @@ void THNN_(VolumetricMaxUnpooling_updateOutput)( int iT; int iH; int iW; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THIndex_t *indices_data; THNN_(VolumetricMaxUnpooling_shapeCheck)( @@ -162,8 +162,8 @@ void THNN_(VolumetricMaxUnpooling_updateOutput)( THTensor_(resize4d)(output, nslices, oT, oH, oW); THTensor_(zero)(output); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); THNN_(VolumetricMaxUnpooling_updateOutput_frame)( @@ -181,8 +181,8 @@ void THNN_(VolumetricMaxUnpooling_updateOutput)( THTensor_(resize5d)(output, nbatch, nslices, oT, oH, oW); THTensor_(zero)(output); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); indices_data = THIndexTensor_(data)(indices); for (p = 0; p < nbatch; p++) @@ -204,8 +204,8 @@ void THNN_(VolumetricMaxUnpooling_updateOutput)( } static void THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, + scalar_t *gradInput_p, + scalar_t *gradOutput_p, THIndex_t *ind_p, int nslices, int iT, @@ -219,8 +219,8 @@ static void THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { - real *gradInput_p_k = gradInput_p + k * iT * iH * iW; - real *gradOutput_p_k = gradOutput_p + k * oT * oH * oW; + scalar_t *gradInput_p_k = gradInput_p + k * iT * iH * iW; + scalar_t *gradOutput_p_k = gradOutput_p + k * oT * oH * oW; THIndex_t *ind_p_k = ind_p + k * iT * iH * iW; int t, i, j, index; @@ -268,8 +268,8 @@ void THNN_(VolumetricMaxUnpooling_updateGradInput)( int iT; int iH; int iW; - real *gradInput_data; - real *gradOutput_data; + scalar_t *gradInput_data; + scalar_t *gradOutput_data; THIndex_t *indices_data; THNN_(VolumetricMaxUnpooling_shapeCheck)( @@ -300,8 +300,8 @@ void THNN_(VolumetricMaxUnpooling_updateGradInput)( iW = input->size(dimw); /* get raw pointers */ - gradInput_data = gradInput->data(); - gradOutput_data = gradOutput->data(); + gradInput_data = gradInput->data(); + gradOutput_data = gradOutput->data(); indices_data = THIndexTensor_(data)(indices); /* backprop */ diff --git a/aten/src/THNN/generic/VolumetricReplicationPadding.c b/aten/src/THNN/generic/VolumetricReplicationPadding.c index 515ca742660645..59e3c6b0e498bc 100644 --- a/aten/src/THNN/generic/VolumetricReplicationPadding.c +++ b/aten/src/THNN/generic/VolumetricReplicationPadding.c @@ -63,7 +63,7 @@ static inline void THNN_(VolumetricReplicationPadding_shapeCheck)( } static void THNN_(VolumetricReplicationPadding_updateOutput_frame)( - real *input_p, real *output_p, + scalar_t *input_p, scalar_t *output_p, int64_t nslices, int64_t iwidth, int64_t iheight, int64_t idepth, int64_t owidth, int64_t oheight, int64_t odepth, @@ -112,9 +112,9 @@ static void THNN_(VolumetricReplicationPadding_updateOutput_frame)( } ip_z = ip_z - oStartZ + iStartZ; - real *dest_p = output_p + k * owidth * oheight * odepth + + scalar_t *dest_p = output_p + k * owidth * oheight * odepth + z * owidth * oheight + i * owidth + j; - real *src_p = input_p + k * iwidth * iheight * idepth + + scalar_t *src_p = input_p + k * iwidth * iheight * idepth + ip_z * iwidth * iheight + ip_y * iwidth + ip_x; *dest_p = *src_p; } @@ -142,8 +142,8 @@ void THNN_(VolumetricReplicationPadding_updateOutput)(THNNState *state, int64_t odepth; int64_t oheight; int64_t owidth; - real *input_data; - real *output_data; + scalar_t *input_data; + scalar_t *output_data; THNN_(VolumetricReplicationPadding_shapeCheck)( state, input, NULL, pleft, pright, @@ -175,8 +175,8 @@ THNN_(VolumetricReplicationPadding_shapeCheck)( { THTensor_(resize4d)(output, nslices, odepth, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); THNN_(VolumetricReplicationPadding_updateOutput_frame)( input_data, output_data, nslices, iwidth, iheight, idepth, @@ -189,8 +189,8 @@ THNN_(VolumetricReplicationPadding_shapeCheck)( THTensor_(resize5d)(output, nbatch, nslices, odepth, oheight, owidth); - input_data = input->data(); - output_data = output->data(); + input_data = input->data(); + output_data = output->data(); #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) @@ -212,7 +212,7 @@ THNN_(VolumetricReplicationPadding_shapeCheck)( } static void THNN_(VolumetricReplicationPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, + scalar_t *ginput_p, scalar_t *goutput_p, int64_t nslices, int64_t iwidth, int64_t iheight, int64_t idepth, int64_t owidth, int64_t oheight, int64_t odepth, @@ -261,9 +261,9 @@ static void THNN_(VolumetricReplicationPadding_updateGradInput_frame)( } ip_z = ip_z - oStartZ + iStartZ; - real *src_p = goutput_p + k * owidth * oheight * odepth + + scalar_t *src_p = goutput_p + k * owidth * oheight * odepth + z * owidth * oheight + i * owidth + j; - real *dest_p = ginput_p + k * iwidth * iheight * idepth + + scalar_t *dest_p = ginput_p + k * iwidth * iheight * idepth + ip_z * iwidth * iheight + ip_y * iwidth + ip_x; *dest_p += *src_p; } @@ -326,8 +326,8 @@ THNN_(VolumetricReplicationPadding_shapeCheck)( /* backprop */ if (input->dim() == 4) { THNN_(VolumetricReplicationPadding_updateGradInput_frame)( - gradInput->data(), - gradOutput->data(), + gradInput->data(), + gradOutput->data(), nslices, iwidth, iheight, idepth, owidth, oheight, odepth, @@ -339,8 +339,8 @@ THNN_(VolumetricReplicationPadding_shapeCheck)( #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { THNN_(VolumetricReplicationPadding_updateGradInput_frame)( - gradInput->data() + p * nslices * idepth * iheight * iwidth, - gradOutput->data() + p * nslices * odepth * oheight * owidth, + gradInput->data() + p * nslices * idepth * iheight * iwidth, + gradOutput->data() + p * nslices * odepth * oheight * owidth, nslices, iwidth, iheight, idepth, owidth, oheight, odepth, diff --git a/aten/src/THNN/generic/VolumetricUpSamplingNearest.c b/aten/src/THNN/generic/VolumetricUpSamplingNearest.c index 15022ad21c42b4..185ff22df91f25 100644 --- a/aten/src/THNN/generic/VolumetricUpSamplingNearest.c +++ b/aten/src/THNN/generic/VolumetricUpSamplingNearest.c @@ -60,8 +60,8 @@ void THNN_(VolumetricUpSamplingNearest_updateOutput)( input = THTensor_(newContiguous)(input); THTensor_(zero)(output); - real *idata = input->data(); - real *odata = output->data(); + scalar_t *idata = input->data(); + scalar_t *odata = output->data(); // special case: just copy if (inputDepth == outputDepth && inputHeight == outputHeight && inputWidth == outputWidth) { @@ -71,8 +71,8 @@ void THNN_(VolumetricUpSamplingNearest_updateOutput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - const real* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - real* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + const scalar_t* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputDepth * inputHeight * inputWidth; @@ -91,8 +91,8 @@ void THNN_(VolumetricUpSamplingNearest_updateOutput)( const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, inputHeight); for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, inputWidth); - const real* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - real* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + const scalar_t* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputDepth * inputHeight * inputWidth; @@ -121,8 +121,8 @@ void THNN_(VolumetricUpSamplingNearest_updateGradInput)( THTensor_(resize5d)(gradInput, nbatch, channels, inputDepth, inputHeight, inputWidth); THTensor_(zero)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); - real *idata = gradInput->data(); - real *odata = gradOutput->data(); + scalar_t *idata = gradInput->data(); + scalar_t *odata = gradOutput->data(); channels = nbatch * channels; const float depth_scale = (float) inputDepth / (float) outputDepth; const float height_scale = (float) inputHeight / (float)outputHeight; @@ -136,8 +136,8 @@ void THNN_(VolumetricUpSamplingNearest_updateGradInput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - real* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - const real* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + scalar_t* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + const scalar_t* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputDepth * inputHeight * inputWidth; @@ -156,8 +156,8 @@ void THNN_(VolumetricUpSamplingNearest_updateGradInput)( const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, inputHeight); for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, inputWidth); - real* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - const real* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + scalar_t* pos1 = &idata[d1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + const scalar_t* pos2 = &odata[d2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputDepth * inputHeight * inputWidth; diff --git a/aten/src/THNN/generic/VolumetricUpSamplingTrilinear.c b/aten/src/THNN/generic/VolumetricUpSamplingTrilinear.c index 78c9cca3ec3169..11613e646d5619 100644 --- a/aten/src/THNN/generic/VolumetricUpSamplingTrilinear.c +++ b/aten/src/THNN/generic/VolumetricUpSamplingTrilinear.c @@ -58,8 +58,8 @@ void THNN_(VolumetricUpSamplingTrilinear_updateOutput)( THTensor_(size)(input, 1), outputDepth, outputHeight, outputWidth); THTensor_(zero)(output); - real *idata = input->data(); - real *odata = output->data(); + scalar_t *idata = input->data(); + scalar_t *odata = output->data(); channels = nbatch * channels; THAssert(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 && outputDepth > 0 && outputHeight > 0 && outputWidth > 0); @@ -71,8 +71,8 @@ void THNN_(VolumetricUpSamplingTrilinear_updateOutput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - const real* pos1 = &idata[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - real* pos2 = &odata[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + const scalar_t* pos1 = &idata[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = pos1[0]; pos1 += inputWidth * inputHeight * inputDepth; @@ -91,22 +91,22 @@ void THNN_(VolumetricUpSamplingTrilinear_updateOutput)( const accreal t1r = linear_upsampling_compute_source_index(rdepth, t2, align_corners); const int t1 = t1r; const int t1p = (t1 < inputDepth - 1) ? 1 : 0; - const real t1lambda = t1r - t1; - const real t0lambda = (real)1. - t1lambda; + const scalar_t t1lambda = t1r - t1; + const scalar_t t0lambda = (scalar_t)1. - t1lambda; for (int h2 = 0; h2 < outputHeight; ++h2) { const accreal h1r = linear_upsampling_compute_source_index(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; + const scalar_t h1lambda = h1r - h1; + const scalar_t h0lambda = (scalar_t)1. - h1lambda; for (int w2 = 0; w2 < outputWidth; ++w2) { const accreal w1r = linear_upsampling_compute_source_index(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - const real* pos1 = &idata[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - real* pos2 = &odata[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + const scalar_t w1lambda = w1r - w1; + const scalar_t w0lambda = (scalar_t)1. - w1lambda; + const scalar_t* pos1 = &idata[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + scalar_t* pos2 = &odata[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos2[0] = t0lambda * (h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) + h1lambda * (w0lambda * pos1[h1p * inputWidth] @@ -150,8 +150,8 @@ void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)( THTensor_(resize5d)(gradInput, nbatch, channels, inputDepth, inputHeight, inputWidth); THTensor_(zero)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); - real *data1 = gradInput->data(); - real *data2 = gradOutput->data(); + scalar_t *data1 = gradInput->data(); + scalar_t *data2 = gradOutput->data(); channels = nbatch * channels; // special case: same-size matching grids @@ -162,8 +162,8 @@ void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)( const int h1 = h2; for (int w2 = 0; w2 < outputWidth; ++w2) { const int w1 = w2; - real* pos1 = &data1[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - const real* pos2 = &data2[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + scalar_t* pos1 = &data1[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + const scalar_t* pos2 = &data2[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] += pos2[0]; pos1 += inputWidth * inputHeight * inputDepth; @@ -182,22 +182,22 @@ void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)( const accreal t1r = linear_upsampling_compute_source_index(rdepth, t2, align_corners); const int t1 = t1r; const int t1p = (t1 < inputDepth - 1) ? 1 : 0; - const real t1lambda = t1r - t1; - const real t0lambda = (real)1. - t1lambda; + const scalar_t t1lambda = t1r - t1; + const scalar_t t0lambda = (scalar_t)1. - t1lambda; for (int h2 = 0; h2 < outputHeight; ++h2) { const accreal h1r = linear_upsampling_compute_source_index(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; + const scalar_t h1lambda = h1r - h1; + const scalar_t h0lambda = (scalar_t)1. - h1lambda; for (int w2 = 0; w2 < outputWidth; ++w2) { const accreal w1r = linear_upsampling_compute_source_index(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - real* pos1 = &data1[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - const real* pos2 = &data2[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; + const scalar_t w1lambda = w1r - w1; + const scalar_t w0lambda = (scalar_t)1. - w1lambda; + scalar_t* pos1 = &data1[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; + const scalar_t* pos2 = &data2[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; for (int c = 0; c < channels; ++c) { pos1[0] += t0lambda * h0lambda * w0lambda * pos2[0]; pos1[w1p] += t0lambda * h0lambda * w1lambda * pos2[0]; diff --git a/aten/src/THNN/generic/unfold.c b/aten/src/THNN/generic/unfold.c index bac73e21813d2c..6158d6e9499b29 100644 --- a/aten/src/THNN/generic/unfold.c +++ b/aten/src/THNN/generic/unfold.c @@ -24,8 +24,8 @@ void THNN_(unfolded_acc)( int nip; - real *input_data = input->data(); - real *finput_data = finput->data(); + scalar_t *input_data = input->data(); + scalar_t *finput_data = finput->data(); #pragma omp parallel for private(nip) for(nip = 0; nip < nInputPlane; nip++) @@ -36,8 +36,8 @@ void THNN_(unfolded_acc)( { for(kw = 0; kw < kW; kw++) { - real *src = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth); - real *dst = input_data + nip*((size_t)inputHeight*inputWidth); + scalar_t *src = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth); + scalar_t *dst = input_data + nip*((size_t)inputHeight*inputWidth); if (padW > 0 || padH > 0) { int lpad,rpad; for(y = 0; y < outputHeight; y++) { @@ -48,7 +48,7 @@ void THNN_(unfolded_acc)( ix = 0 - padW + kw; lpad = fmaxf(0,padW-kw); rpad = fmaxf(0,padW-(kW-kw-1)); - real *dst_slice = dst+(size_t)iy*inputWidth+ix+lpad; + scalar_t *dst_slice = dst+(size_t)iy*inputWidth+ix+lpad; THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+lpad, 1, outputWidth - lpad - rpad); /* note: THVector_add could handle 1 value better */ } else{ @@ -56,7 +56,7 @@ void THNN_(unfolded_acc)( ix = (int64_t)x*dW - padW + kw; if (ix < 0 || ix >= inputWidth){ }else{ - real *dst_slice = dst+(size_t)iy*inputWidth+ix; + scalar_t *dst_slice = dst+(size_t)iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1); } } @@ -68,11 +68,11 @@ void THNN_(unfolded_acc)( iy = (int64_t)y*dH + kh; ix = 0 + kw; if (dW == 1 ) { - real *dst_slice = dst+(size_t)iy*inputWidth+ix; + scalar_t *dst_slice = dst+(size_t)iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */ }else{ for(x = 0; x < outputWidth; x++) { - real *dst_slice = dst+(size_t)iy*inputWidth+ix+x*dW; + scalar_t *dst_slice = dst+(size_t)iy*inputWidth+ix+x*dW; THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1); } } @@ -105,8 +105,8 @@ void THNN_(unfolded_copy)( // outputWidth*dW does not overflow a int64_t int64_t k; - real *input_data = input->data(); - real *finput_data = finput->data(); + scalar_t *input_data = input->data(); + scalar_t *finput_data = finput->data(); #pragma omp parallel for private(k) for(k = 0; k < (int64_t)nInputPlane*kH*kW; k++) { @@ -116,34 +116,34 @@ void THNN_(unfolded_copy)( int64_t kw = rest % kW; int x, y; int64_t ix, iy; - real *dst = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth); - real *src = input_data + nip*((size_t)inputHeight*inputWidth); + scalar_t *dst = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth); + scalar_t *src = input_data + nip*((size_t)inputHeight*inputWidth); if (padW > 0 || padH > 0) { int64_t lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = (int64_t)y*dH - padH + kh; if (iy < 0 || iy >= inputHeight) { - memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth); + memset(dst+(size_t)y*outputWidth, 0, sizeof(scalar_t)*outputWidth); } else { if (dW==1){ ix = 0 - padW + kw; lpad = fmaxf(0,padW-kw); rpad = fmaxf(0,padW-(kW-kw-1)); if (outputWidth-rpad-lpad <= 0) { - memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth); + memset(dst+(size_t)y*outputWidth, 0, sizeof(scalar_t)*outputWidth); } else { - if (lpad > 0) memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*lpad); - memcpy(dst+(size_t)y*outputWidth+lpad, src+(size_t)iy*inputWidth+ix+lpad, sizeof(real)*(outputWidth-rpad-lpad)); - if (rpad > 0) memset(dst+(size_t)y*outputWidth + outputWidth - rpad, 0, sizeof(real)*rpad); + if (lpad > 0) memset(dst+(size_t)y*outputWidth, 0, sizeof(scalar_t)*lpad); + memcpy(dst+(size_t)y*outputWidth+lpad, src+(size_t)iy*inputWidth+ix+lpad, sizeof(scalar_t)*(outputWidth-rpad-lpad)); + if (rpad > 0) memset(dst+(size_t)y*outputWidth + outputWidth - rpad, 0, sizeof(scalar_t)*rpad); } } else{ for (x=0; x= inputWidth) - memset(dst+(size_t)y*outputWidth+x, 0, sizeof(real)*1); + memset(dst+(size_t)y*outputWidth+x, 0, sizeof(scalar_t)*1); else - memcpy(dst+(size_t)y*outputWidth+x, src+(size_t)iy*inputWidth+ix, sizeof(real)*(1)); + memcpy(dst+(size_t)y*outputWidth+x, src+(size_t)iy*inputWidth+ix, sizeof(scalar_t)*(1)); } } } @@ -153,10 +153,10 @@ void THNN_(unfolded_copy)( iy = (int64_t)y*dH + kh; ix = 0 + kw; if (dW == 1) - memcpy(dst+(size_t)y*outputWidth, src+(size_t)iy*inputWidth+ix, sizeof(real)*outputWidth); + memcpy(dst+(size_t)y*outputWidth, src+(size_t)iy*inputWidth+ix, sizeof(scalar_t)*outputWidth); else{ for (x=0; xstride[0], num_batches' new_kernel_launch: ' createBatchGemmBuffer, grid, block, 0, THCState_getCurrentStream(state), - (const real**)d_result, THCTensor_(data)(state, ra__), + (const scalar_t**)d_result, THCTensor_(data)(state, ra__), static_cast(ra__->stride[0]), static_cast(num_batches)' """ @@ -1083,9 +1083,9 @@ def replace_arg(match): old_kernel_launch_parameters, new_kernel_launch_parameters) # PyTorch Specific: Add template type - # Here the template value will be resolved from to . + # Here the template value will be resolved from to . if "THCUNN" in filepath.split("/") and "generic" not in filepath.split("/"): - kernel_name_with_template = kernel_name_with_template.replace("", "") + kernel_name_with_template = kernel_name_with_template.replace("", "") full_new_kernel_launch = re.sub(r'\b{0}\b'.format(re.escape(original_kernel_name_with_template)), lambda x: kernel_name_with_template, full_new_kernel_launch) diff --git a/torch/csrc/generic/Storage.cpp b/torch/csrc/generic/Storage.cpp index 8fa743676f53f5..94c0b85bdb5eca 100644 --- a/torch/csrc/generic/Storage.cpp +++ b/torch/csrc/generic/Storage.cpp @@ -105,9 +105,9 @@ static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObjec try { for (Py_ssize_t i = 0; i < length; i++) { item = PySequence_GetItem(first_arg, i); - real value = THPUtils_(unpackReal)(item.get()); + scalar_t value = THPUtils_(unpackReal)(item.get()); #if !defined(THC_GENERIC_FILE) - self->cdata->unsafe_data()[i] = value; + self->cdata->unsafe_data()[i] = value; #else // TODO: this might be slow - consider batched updates? THCStorage_(set)(LIBRARY_STATE self->cdata, i, value); @@ -118,7 +118,7 @@ static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObjec "but one of the items was of type %s instead of %s", THPUtils_typename(first_arg), THPUtils_typename(item.get()), - THPUtils_typeTraits::python_type_str); + THPUtils_typeTraits::python_type_str); return nullptr; } return (PyObject*)self.release(); @@ -156,7 +156,7 @@ static PyObject * THPStorage_(get)(THPStorage *self, PyObject *index) "size %" PRId64, (int64_t) nindex, (int64_t) self->cdata->numel()); return nullptr; } - real value = THWStorage_(get)(LIBRARY_STATE self->cdata, nindex); + scalar_t value = THWStorage_(get)(LIBRARY_STATE self->cdata, nindex); return THPUtils_(newReal)(value); /* Slice index */ } else if (PySlice_Check(index)) { @@ -174,12 +174,12 @@ static int THPStorage_(set)(THPStorage *self, PyObject *index, PyObject *value) HANDLE_TH_ERRORS if (!THPUtils_(checkReal)(value)) { THPUtils_setError("can only set storage content with a %s, but got " - "%s instead", THPUtils_typeTraits::python_type_str, + "%s instead", THPUtils_typeTraits::python_type_str, THPUtils_typename(value)); return -1; } - real rvalue = THPUtils_(unpackReal)(value); + scalar_t rvalue = THPUtils_(unpackReal)(value); if (THPUtils_checkLong(index)) { int64_t nindex = THPUtils_unpackLong(index); THWStorage_(set)(LIBRARY_STATE self->cdata, nindex, rvalue); diff --git a/torch/csrc/generic/StorageMethods.cpp b/torch/csrc/generic/StorageMethods.cpp index ede2e11c92c886..42471b5e79a472 100644 --- a/torch/csrc/generic/StorageMethods.cpp +++ b/torch/csrc/generic/StorageMethods.cpp @@ -77,7 +77,7 @@ static PyObject * THPStorage_(fill_)(THPStorage *self, PyObject *number_arg) { HANDLE_TH_ERRORS THPUtils_assert(THPUtils_(checkReal)(number_arg), "fill_ expects %s, " - "but got %s", THPUtils_typeTraits::python_type_str, + "but got %s", THPUtils_typeTraits::python_type_str, THPUtils_typename(number_arg)); THWStorage_(fill)(LIBRARY_STATE self->cdata, THPUtils_(unpackReal)(number_arg)); Py_INCREF(self); @@ -134,16 +134,16 @@ static PyObject * THPStorage_(fromBuffer)(PyObject *_unused, PyObject *args, PyO } if (count < 0) { - if ((buffer.len - offset) % sizeof(real) != 0) { + if ((buffer.len - offset) % sizeof(scalar_t) != 0) { PyErr_Format(PyExc_ValueError, "buffer size (%" PRId64 ") must be a multiple " - "of element size (%" PRId64 ")", (int64_t)buffer.len, (int64_t)sizeof(real)); + "of element size (%" PRId64 ")", (int64_t)buffer.len, (int64_t)sizeof(scalar_t)); PyBuffer_Release(&buffer); return nullptr; } - count = (buffer.len - offset) / sizeof(real); + count = (buffer.len - offset) / sizeof(scalar_t); } - if (offset + (count * (Py_ssize_t)sizeof(real)) > buffer.len) { + if (offset + (count * (Py_ssize_t)sizeof(scalar_t)) > buffer.len) { PyErr_Format(PyExc_ValueError, "buffer has only %" PRId64 " elements after offset " "%" PRId64 ", but specified a size of %" PRId64, (int64_t)(buffer.len - offset), (int64_t)offset, (int64_t)count); diff --git a/torch/csrc/generic/StorageSharing.cpp b/torch/csrc/generic/StorageSharing.cpp index 109962ad227b51..6d0057e65593f6 100644 --- a/torch/csrc/generic/StorageSharing.cpp +++ b/torch/csrc/generic/StorageSharing.cpp @@ -54,7 +54,7 @@ static THWStorage* THPStorage_(newFilenameStorage)(ptrdiff_t size) int flags = TH_ALLOCATOR_MAPPED_SHAREDMEM | TH_ALLOCATOR_MAPPED_EXCLUSIVE; std::string handle = THPStorage_(__newHandle)(); return THWStorage_(newWithDataAndAllocator)( - THManagedMapAllocator::makeDataPtr("", handle.c_str(), flags, size * sizeof(real)), size, /* allocator */ nullptr); + THManagedMapAllocator::makeDataPtr("", handle.c_str(), flags, size * sizeof(scalar_t)), size, /* allocator */ nullptr); } static PyObject * THPStorage_(pyNewFilenameStorage)(PyObject *_unused, PyObject *args) @@ -121,7 +121,7 @@ static PyObject * THPStorage_(newSharedFilename)(PyObject *_unused, PyObject *ar TH_ALLOCATOR_MAPPED_NOCREATE; return THPStorage_(New)( THWStorage_(newWithDataAndAllocator)( - THManagedMapAllocator::makeDataPtr(manager_handle, object_handle, flags, size * sizeof(real)), + THManagedMapAllocator::makeDataPtr(manager_handle, object_handle, flags, size * sizeof(scalar_t)), size, /* allocator */ nullptr)); END_HANDLE_TH_ERRORS @@ -134,7 +134,7 @@ static THWStorage* THPStorage_(newFdStorage)(ptrdiff_t size) TH_ALLOCATOR_MAPPED_KEEPFD | TH_ALLOCATOR_MAPPED_UNLINK; std::string handle = THPStorage_(__newHandle)(); - auto sptr = THMapAllocator::makeDataPtr(handle.c_str(), flags, size * sizeof(real), nullptr); + auto sptr = THMapAllocator::makeDataPtr(handle.c_str(), flags, size * sizeof(scalar_t), nullptr); return THWStorage_(newWithDataAndAllocator)(std::move(sptr), size, /* allocator */ nullptr); } @@ -203,8 +203,8 @@ static PyObject * THPStorage_(newSharedFd)(PyObject *_unused, PyObject *args) TH_ALLOCATOR_MAPPED_FROMFD; return THPStorage_(New)( THWStorage_(newWithDataAndAllocator)( - // TODO: Maybe we should read out the real size and use it for size - THMapAllocator::makeDataPtr(WITH_FD, nullptr, fd, flags, size * sizeof(real), nullptr), + // TODO: Maybe we should read out the scalar_t size and use it for size + THMapAllocator::makeDataPtr(WITH_FD, nullptr, fd, flags, size * sizeof(scalar_t), nullptr), size, /* allocator */ nullptr)); END_HANDLE_TH_ERRORS } @@ -225,14 +225,14 @@ static PyObject * THPStorage_(shareCuda)(THPStorage *self) if (THWStorage_(data)(LIBRARY_STATE storage)) { size_t base_size; void *base_ptr = THCCachingAllocator_getBaseAllocation(THWStorage_(data)(LIBRARY_STATE storage), &base_size); - ptrdiff_t offset = (char*)storage->data() - (char*)base_ptr; + ptrdiff_t offset = (char*)storage->data() - (char*)base_ptr; cudaIpcMemHandle_t handle; THCudaCheck(cudaIpcGetMemHandle(&handle, base_ptr)); _handle = PyBytes_FromStringAndSize((char *)&handle, CUDA_IPC_HANDLE_SIZE); - _offset = PyLong_FromSsize_t((Py_ssize_t)offset / sizeof(real)); - size = PyLong_FromSize_t(base_size / sizeof(real)); + _offset = PyLong_FromSsize_t((Py_ssize_t)offset / sizeof(scalar_t)); + size = PyLong_FromSize_t(base_size / sizeof(scalar_t)); } if (!tuple || !device || !_handle || !size || !_offset) { return nullptr; diff --git a/torch/csrc/generic/serialization.cpp b/torch/csrc/generic/serialization.cpp index 42dff61c2feb9f..2299cce245a16b 100644 --- a/torch/csrc/generic/serialization.cpp +++ b/torch/csrc/generic/serialization.cpp @@ -7,22 +7,22 @@ template void THPStorage_(writeFileRaw)(THWStorage *self, io fd) { - real *data; + scalar_t *data; int64_t size = THWStorage_(size)(LIBRARY_STATE self); #ifndef THC_GENERIC_FILE data = THWStorage_(data)(LIBRARY_STATE self); #else - std::unique_ptr cpu_data(new char[size * sizeof(real)]); - data = (real*)cpu_data.get(); - THCudaCheck(cudaMemcpy(data, THWStorage_(data)(LIBRARY_STATE self), size * sizeof(real), cudaMemcpyDeviceToHost)); + std::unique_ptr cpu_data(new char[size * sizeof(scalar_t)]); + data = (scalar_t*)cpu_data.get(); + THCudaCheck(cudaMemcpy(data, THWStorage_(data)(LIBRARY_STATE self), size * sizeof(scalar_t), cudaMemcpyDeviceToHost)); #endif ssize_t result = doWrite(fd, &size, sizeof(int64_t)); if (result != sizeof(int64_t)) throw std::system_error(result, std::system_category()); // fast track for bytes and little endian - if (sizeof(real) == 1 || THP_nativeByteOrder() == THPByteOrder::THP_LITTLE_ENDIAN) { + if (sizeof(scalar_t) == 1 || THP_nativeByteOrder() == THPByteOrder::THP_LITTLE_ENDIAN) { char *bytes = (char *) data; - int64_t remaining = sizeof(real) * size; + int64_t remaining = sizeof(scalar_t) * size; while (remaining > 0) { // we write and read in 1GB blocks to avoid bugs on some OSes ssize_t result = doWrite(fd, bytes, THMin(remaining, 1073741824)); @@ -35,26 +35,26 @@ void THPStorage_(writeFileRaw)(THWStorage *self, io fd) throw std::system_error(result, std::system_category()); } else { int64_t buffer_size = std::min(size, (int64_t)5000); - std::unique_ptr le_buffer(new uint8_t[buffer_size * sizeof(real)]); + std::unique_ptr le_buffer(new uint8_t[buffer_size * sizeof(scalar_t)]); for (int64_t i = 0; i < size; i += buffer_size) { size_t to_convert = std::min(size - i, buffer_size); - if (sizeof(real) == 2) { + if (sizeof(scalar_t) == 2) { THP_encodeInt16Buffer((uint8_t*)le_buffer.get(), (const int16_t*)data + i, THPByteOrder::THP_LITTLE_ENDIAN, to_convert); - } else if (sizeof(real) == 4) { + } else if (sizeof(scalar_t) == 4) { THP_encodeInt32Buffer((uint8_t*)le_buffer.get(), (const int32_t*)data + i, THPByteOrder::THP_LITTLE_ENDIAN, to_convert); - } else if (sizeof(real) == 8) { + } else if (sizeof(scalar_t) == 8) { THP_encodeInt64Buffer((uint8_t*)le_buffer.get(), (const int64_t*)data + i, THPByteOrder::THP_LITTLE_ENDIAN, to_convert); } - SYSCHECK(doWrite(fd, le_buffer.get(), to_convert * sizeof(real))); + SYSCHECK(doWrite(fd, le_buffer.get(), to_convert * sizeof(scalar_t))); } } } @@ -65,7 +65,7 @@ template void THPStorage_(writeFileRaw)(THWStorage *self, PyObject* f template THWStorage * THPStorage_(readFileRaw)(io file, THWStorage *_storage) { - real *data; + scalar_t *data; int64_t size; ssize_t result = doRead(file, &size, sizeof(int64_t)); if (result == 0) @@ -85,14 +85,14 @@ THWStorage * THPStorage_(readFileRaw)(io file, THWStorage *_storage) #ifndef THC_GENERIC_FILE data = THWStorage_(data)(LIBRARY_STATE storage); #else - std::unique_ptr cpu_data(new char[size * sizeof(real)]); - data = (real*)cpu_data.get(); + std::unique_ptr cpu_data(new char[size * sizeof(scalar_t)]); + data = (scalar_t*)cpu_data.get(); #endif // fast track for bytes and little endian - if (sizeof(real) == 1 || THP_nativeByteOrder() == THPByteOrder::THP_LITTLE_ENDIAN) { + if (sizeof(scalar_t) == 1 || THP_nativeByteOrder() == THPByteOrder::THP_LITTLE_ENDIAN) { char *bytes = (char *) data; - int64_t remaining = sizeof(real) * THWStorage_(size)(LIBRARY_STATE storage); + int64_t remaining = sizeof(scalar_t) * THWStorage_(size)(LIBRARY_STATE storage); while (remaining > 0) { // we write and read in 1GB blocks to avoid bugs on some OSes ssize_t result = doRead(file, bytes, THMin(remaining, 1073741824)); @@ -107,24 +107,24 @@ THWStorage * THPStorage_(readFileRaw)(io file, THWStorage *_storage) throw std::system_error(result, std::system_category()); } else { int64_t buffer_size = std::min(size, (int64_t)5000); - std::unique_ptr le_buffer(new uint8_t[buffer_size * sizeof(real)]); + std::unique_ptr le_buffer(new uint8_t[buffer_size * sizeof(scalar_t)]); for (int64_t i = 0; i < size; i += buffer_size) { size_t to_convert = std::min(size - i, buffer_size); - SYSCHECK(doRead(file, le_buffer.get(), sizeof(real) * to_convert)); + SYSCHECK(doRead(file, le_buffer.get(), sizeof(scalar_t) * to_convert)); - if (sizeof(real) == 2) { + if (sizeof(scalar_t) == 2) { THP_decodeInt16Buffer((int16_t*)data + i, le_buffer.get(), THPByteOrder::THP_LITTLE_ENDIAN, to_convert); - } else if (sizeof(real) == 4) { + } else if (sizeof(scalar_t) == 4) { THP_decodeInt32Buffer((int32_t*)data + i, le_buffer.get(), THPByteOrder::THP_LITTLE_ENDIAN, to_convert); - } else if (sizeof(real) == 8) { + } else if (sizeof(scalar_t) == 8) { THP_decodeInt64Buffer((int64_t*)data + i, le_buffer.get(), THPByteOrder::THP_LITTLE_ENDIAN, @@ -134,7 +134,7 @@ THWStorage * THPStorage_(readFileRaw)(io file, THWStorage *_storage) } #ifdef THC_GENERIC_FILE - THCudaCheck(cudaMemcpy(THWStorage_(data)(LIBRARY_STATE storage), data, size * sizeof(real), cudaMemcpyHostToDevice)); + THCudaCheck(cudaMemcpy(THWStorage_(data)(LIBRARY_STATE storage), data, size * sizeof(scalar_t), cudaMemcpyHostToDevice)); #endif return storage.release(); } diff --git a/torch/csrc/generic/utils.h b/torch/csrc/generic/utils.h index bcccffa051b770..ade810564fd2ae 100644 --- a/torch/csrc/generic/utils.h +++ b/torch/csrc/generic/utils.h @@ -18,7 +18,7 @@ typedef class THPPointer THPStoragePtr; #if (!defined(THC_GENERIC_FILE) || defined(THC_REAL_IS_HALF)) && \ (!defined(THD_GENERIC_FILE)) template<> -struct THPUtils_typeTraits { +struct THPUtils_typeTraits { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || \ defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || \ defined(THC_REAL_IS_HALF) From 24eb5ad0c5388bd98f3f0ee3296ab4ad2c13bdd4 Mon Sep 17 00:00:00 2001 From: iotamudelta Date: Sun, 2 Sep 2018 21:48:03 -0700 Subject: [PATCH 3/3] Fix unit tests on CI (#11191) Summary: Disables two of the unit tests in test_cuda that got introduced after test_cuda was enabled that fail on ROCm. Pull Request resolved: https://github.com/pytorch/pytorch/pull/11191 Differential Revision: D9628702 Pulled By: ezyang fbshipit-source-id: 4c298c728f42bb43d39b57967aa3e44385980265 --- test/test_cuda.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_cuda.py b/test/test_cuda.py index 27e21b8345ed27..b174a63201eda0 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -1696,9 +1696,11 @@ def test_min_max_inits(self): _, v = y.min(dim=0) self.assertEqual(v, expected) + @skipIfRocm def test_max_with_inf(self): TestTorch._test_max_with_inf(self, (torch.half, torch.float, torch.double), 'cuda') + @skipIfRocm def test_min_with_inf(self): TestTorch._test_min_with_inf(self, (torch.half, torch.float, torch.double), 'cuda')