Skip to content

scalar_to_tensor avoid scalar.to<float>() #1448

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 10, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 13 additions & 22 deletions core/conversion/converters/impl/element_wise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,11 +166,11 @@ auto element_wise_registrations TORCHTRT_UNUSED =
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// Should implement self - alpha * other
auto self = args[0].ITensorOrFreeze(ctx);
auto scalar = args[2].unwrapToScalar().to<float>();
auto other = args[1].ITensorOrFreeze(ctx);
auto scalar = args[2].unwrapToScalar();

if (1 != scalar) {
auto alphaTensor = tensor_to_const(ctx, torch::tensor({scalar}));
if (1 != scalar.to<float>()) {
auto alphaTensor = scalar_to_tensor(ctx, scalar);
auto scaleLayer = add_elementwise(
ctx,
nvinfer1::ElementWiseOperation::kPROD,
Expand Down Expand Up @@ -214,11 +214,11 @@ auto element_wise_registrations TORCHTRT_UNUSED =
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// Should implement self - alpha * other
auto self = args[0].ITensorOrFreeze(ctx);
auto scalar = args[2].unwrapToScalar().to<float>();
auto other = args[1].ITensorOrFreeze(ctx);
auto scalar = args[2].unwrapToScalar();

if (1 != scalar) {
auto alphaTensor = tensor_to_const(ctx, torch::tensor({scalar}));
if (1 != scalar.to<float>()) {
auto alphaTensor = scalar_to_tensor(ctx, scalar);
auto scaleLayer = add_elementwise(
ctx,
nvinfer1::ElementWiseOperation::kPROD,
Expand Down Expand Up @@ -351,8 +351,7 @@ auto element_wise_registrations TORCHTRT_UNUSED =
{"aten::div.Scalar(Tensor self, Scalar other) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto self = args[0].ITensorOrFreeze(ctx);
auto otherScalar = args[1].unwrapToScalar().to<float>();
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));
auto other = scalar_to_tensor(ctx, args[1].unwrapToScalar());
auto div = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, self, other, util::node_info(n));
TORCHTRT_CHECK(div, "Unable to create div layer from node: " << *n);

Expand Down Expand Up @@ -381,8 +380,7 @@ auto element_wise_registrations TORCHTRT_UNUSED =
{"aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto self = args[0].ITensorOrFreeze(ctx);
auto otherScalar = args[1].unwrapToScalar().to<float>();
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));
auto other = scalar_to_tensor(ctx, args[1].unwrapToScalar());
auto div = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, self, other, util::node_info(n));
TORCHTRT_CHECK(div, "Unable to create div layer from node: " << *n);

Expand Down Expand Up @@ -481,18 +479,12 @@ auto element_wise_registrations TORCHTRT_UNUSED =
{"aten::ne.Scalar(Tensor self, Scalar other) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto self = args[0].ITensorOrFreeze(ctx);
auto scalar = args[1].unwrapToScalar();
nvinfer1::ITensor* scalar_tensor;
if (self->getType() == nvinfer1::DataType::kFLOAT || self->getType() == nvinfer1::DataType::kHALF) {
scalar_tensor = tensor_to_const(ctx, torch::tensor({scalar.to<float>()}));
} else {
scalar_tensor = tensor_to_const(ctx, torch::tensor({scalar.to<int>()}));
}
auto other = scalar_to_tensor(ctx, args[1].unwrapToScalar());
auto equal = add_elementwise(
ctx,
nvinfer1::ElementWiseOperation::kEQUAL,
self,
scalar_tensor,
other,
util::node_info(n) + std::string("is_equal"));
TORCHTRT_CHECK(equal, "Unable to create elementwise equal layer from node: " << *n);
// XOR with ones negates and produces not_equal result
Expand Down Expand Up @@ -534,8 +526,7 @@ auto element_wise_registrations TORCHTRT_UNUSED =
{"aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto self = args[0].ITensorOrFreeze(ctx);
auto exponentScalar = args[1].unwrapToScalar().to<float>();
auto exponent = tensor_to_const(ctx, torch::tensor({exponentScalar}));
auto exponent = scalar_to_tensor(ctx, args[1].unwrapToScalar());
auto pow =
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kPOW, self, exponent, util::node_info(n));
TORCHTRT_CHECK(pow, "Unable to create Power layer from node: " << *n);
Expand Down Expand Up @@ -681,9 +672,9 @@ auto element_wise_registrations TORCHTRT_UNUSED =
{"aten::eq.Scalar(Tensor self, Scalar other) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto self = args[0].ITensorOrFreeze(ctx);
auto otherScalar = args[1].unwrapToScalar().to<float>();
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));
auto other = scalar_to_tensor(ctx, args[1].unwrapToScalar());
if (self->getType() == nvinfer1::DataType::kBOOL) {
auto otherScalar = args[1].unwrapToScalar().to<float>();
if (otherScalar == 0 || otherScalar == 1) {
LOG_DEBUG("Since input tensor is type bool, casting input tensor and scalar to int32");
other = castITensor(ctx, other, nvinfer1::DataType::kINT32);
Expand Down