Skip to content

Torch 1.6.0 update #166

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Aug 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .bazelversion
Original file line number Diff line number Diff line change
@@ -1 +1 @@
3.3.1
3.4.1
8 changes: 4 additions & 4 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,16 @@ http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.5.1.zip"],
sha256 = "cf0691493d05062fe3239cf76773bae4c5124f4b039050dbdd291c652af3ab2a"
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.6.0.zip"],
sha256 = "fded948bd2dbee625cee33ebbd4843a69496729389e0200a90fbb667cdaeeb69"
)

http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
sha256 = "818977576572eadaf62c80434a25afe44dbaa32ebda3a0919e389dcbe74f8656",
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-shared-with-deps-1.5.1.zip"],
sha256 = "141bb229f4bbf905541096cf8705785e7b0c79e37ca1e5db9d372730b1b9abd7",
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-shared-with-deps-1.6.0.zip"],
)

# Download these tarballs manually from the NVIDIA website
Expand Down
8 changes: 3 additions & 5 deletions core/conversion/InterfaceTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ namespace core {
namespace conversion {

GraphParams get_named_params(c10::ArrayRef<torch::jit::Value*> inputs,
std::vector<at::Tensor> params) {
std::vector<torch::jit::IValue> params) {
GraphParams named_params;
auto param_it = params.begin();
for (auto in : inputs) {
Expand All @@ -18,10 +18,8 @@ GraphParams get_named_params(c10::ArrayRef<torch::jit::Value*> inputs,
++param_it;
}
}
//ASSERT(named_params.size() == params.size);
if (named_params.size() != params.size()) {
LOG_ERROR("Graph parameter parsing failed");
}

TRTORCH_CHECK(named_params.size() == params.size(), "Graph parameter parsing failed, mismatched number of static parameters and IValues")
return std::move(named_params);
}

Expand Down
11 changes: 6 additions & 5 deletions core/conversion/conversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,10 +165,11 @@ void AddInputs(ConversionCtx* ctx,
TRTORCH_CHECK(profile->isValid(), "Optimization profile is invalid, please check the input range provided (conversion.AddInputs)");

ctx->cfg->addOptimizationProfile(profile);
// TODO: Enable in TRT 7.1
// if (ctx->op_precision == nvinfer1::DataType::kINT8) {
// ctx->cfg->setCalibrationProfile(profile);
// }
#if NV_TENSORRT_MAJOR > 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR >= 1)
if (ctx->op_precision == nvinfer1::DataType::kINT8) {
ctx->cfg->setCalibrationProfile(profile);
}
#endif
}

void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
Expand All @@ -186,7 +187,7 @@ void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outp

void AddParamsToCtxValueMap(ConversionCtx* ctx, GraphParams& params) {
for (auto p : params) {
ctx->evaluated_value_map[p.first] = torch::jit::IValue(p.second.clone());
ctx->evaluated_value_map[p.first] = std::move(p.second);
}
}

Expand Down
6 changes: 4 additions & 2 deletions core/conversion/conversion.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,11 @@ struct ConversionInfo {
: input_ranges(std::move(input_ranges)), engine_settings(BuilderSettings()) {}
};

using GraphParams = std::map<torch::jit::Value*, at::Tensor>;
//TODO: REMOVE GRAPH AND PARAMS AND MOVE FULLY TO INLINED CONSTANTS

GraphParams get_named_params(c10::ArrayRef<torch::jit::Value*> inputs, std::vector<at::Tensor> params);
using GraphParams = std::map<torch::jit::Value*, torch::jit::IValue>;

GraphParams get_named_params(c10::ArrayRef<torch::jit::Value*> inputs, std::vector<torch::jit::IValue> params);

// Converts a already lowered block (blocks with no sub blocks) to
// a serialized TensorRT engine that can be deserialized and run
Expand Down
4 changes: 3 additions & 1 deletion core/conversion/evaluators/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ cc_library(
"NodeEvaluatorRegistry.cpp",
"prim.cpp",
"aten.cpp",
"eval_macros.h"
"eval_macros.h",
"eval_util.h",
"eval_util.cpp"
],
deps = [
"//core/util:prelude",
Expand Down
105 changes: 105 additions & 0 deletions core/conversion/evaluators/eval_util.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#include "ATen/core/ivalue.h"
#include "ATen/core/List.h"
#include "core/util/prelude.h"
#include "ATen/core/functional.h"

namespace trtorch {
namespace core {
namespace conversion {
namespace evaluators {

//TODO: Switch back to PyTorch canonical implimentation
c10::optional<torch::jit::IValue> toIValue(const torch::jit::Value* v) {
if (v->node()->kind() != torch::jit::prim::Constant || v->type()->cast<c10::FunctionType>()) {
return c10::nullopt;
}
const torch::jit::Node* node = v->node();
const c10::TypePtr& type = v->type();
if (type->isSubtypeOf(c10::TensorType::get())) {
return node->t(c10::attr::value);
} else if (type->isSubtypeOf(c10::BoolType::get())) {
return (bool)node->i(c10::attr::value);
} else if (
type->isSubtypeOf(c10::NumberType::get()) &&
node->kindOf(c10::attr::value) == torch::jit::AttributeKind::i) {
return node->i(c10::attr::value);
} else if (
type->isSubtypeOf(c10::NumberType::get()) &&
node->kindOf(c10::attr::value) == torch::jit::AttributeKind::f) {
return node->f(c10::attr::value);
} else if (type->isSubtypeOf(c10::ListType::ofInts())) {
try {
const auto& is = node->is(c10::attr::value);
return is;
} catch (const std::exception& ex) {
const auto& ival = node->ival(c10::attr::value);
return ival;
}
} else if (type->isSubtypeOf(c10::ListType::ofFloats())) {
try {
const auto& fs = node->fs(c10::attr::value);
return fs;
} catch (const std::exception& ex) {
const auto& ival = node->ival(c10::attr::value);
return ival;
}
} else if (type->isSubtypeOf(c10::ListType::ofBools())) {
const auto bs = c10::fmap<bool>(node->is(c10::attr::value));
return bs;
} else if (type->isSubtypeOf(c10::ListType::ofTensors())) {
try {
const auto& ts = node->ts(c10::attr::value);
return ts;
} catch (const std::exception& ex) {
const auto& ival = node->ival(c10::attr::value);
return ival;
}
} else if (type->isSubtypeOf(c10::ListType::ofStrings())) {
try {
const auto& ss = node->ss(c10::attr::value);
auto vals = c10::impl::GenericList(c10::StringType::get());
for (const auto& str : ss) {
vals.push_back(str);
}
return vals;
} catch (const std::exception& ex) {
const auto& ival = node->ival(c10::attr::value);
return ival;
}
} else if (
type->cast<c10::ListType>() &&
node->kindOf(c10::attr::value) == torch::jit::AttributeKind::ival) {
const auto& list = node->ival(c10::attr::value);
TRTORCH_ASSERT(list.isList(), "Is not a list");
return list;
} else if (
type->cast<c10::DictType>() &&
node->kindOf(c10::attr::value) == torch::jit::AttributeKind::ival) {
const auto& dict = node->ival(c10::attr::value);
TRTORCH_ASSERT(dict.isGenericDict(), "Is not a dict");
return dict;
} else if (
type->cast<c10::TupleType>() &&
node->kindOf(c10::attr::value) == torch::jit::AttributeKind::ival) {
const auto& tup = node->ival(c10::attr::value);
TRTORCH_ASSERT(tup.isTuple(), "Is not a tuple");
return tup;
} else if (type == c10::StringType::get()) {
const auto& s = node->s(c10::attr::value);
return s;
} else if (type == c10::DeviceObjType::get()) {
auto d = c10::Device(node->s(c10::attr::value));
return d;
} else if (node->mustBeNone()) {
return torch::jit::IValue();
} else {
std::stringstream ss;
ss << "constant literal not supported for: " << type->str();
throw std::runtime_error(ss.str());
}
}

} // namespace evaluators
} // namespace conversion
} // namespace core
} // namespace trtorch
15 changes: 15 additions & 0 deletions core/conversion/evaluators/eval_util.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#pragma once

#include "torch/csrc/jit/ir/ir.h"

namespace trtorch {
namespace core {
namespace conversion {
namespace evaluators {

c10::optional<torch::jit::IValue> toIValue(const torch::jit::Value* v);

} // namespace evaluators
} // namespace conversion
} // namespace core
} // namespace trtorch
5 changes: 3 additions & 2 deletions core/conversion/evaluators/prim.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#include <limits>

#include "torch/csrc/jit/ir/ir.h"
#include "torch/csrc/jit/ir/constants.h"
//#include "torch/csrc/jit/ir/constants.h"
#include "ATen/core/functional.h"
#include "ATen/core/ivalue.h"
#include "ATen/core/List.h"
Expand All @@ -11,6 +11,7 @@

#include "core/conversion/evaluators/evaluators.h"
#include "core/conversion/evaluators/eval_macros.h"
#include "core/conversion/evaluators/eval_util.h"

namespace trtorch {
namespace core {
Expand All @@ -25,7 +26,7 @@ auto prim_registrations = RegisterNodeEvaluators()
if (n->output()->type()->kind() == at::FunctionType::Kind) {
return {};
}
return torch::jit::toIValue(n->output());
return evaluators::toIValue(n->output());
}
}).evaluator({
torch::jit::prim::NumToTensor,
Expand Down
9 changes: 4 additions & 5 deletions core/lowering/lowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#include "torch/csrc/jit/passes/lower_graph.h"
#include "torch/csrc/jit/passes/lower_tuples.h"
#include "torch/csrc/jit/passes/peephole.h"
#include "torch/csrc/jit/passes/quantization.h"

#include "core/util/prelude.h"
#include "core/lowering/lowering.h"
Expand Down Expand Up @@ -50,8 +49,7 @@ torch::jit::Module LowerModule(const torch::jit::script::Module& mod) {
return mod_;
}

std::pair<std::shared_ptr<torch::jit::Graph>, std::vector<at::Tensor>> Lower(const torch::jit::script::Module& mod,
std::string method_name) {
std::pair<std::shared_ptr<torch::jit::Graph>, std::vector<torch::jit::IValue>> Lower(const torch::jit::script::Module& mod, std::string method_name) {
auto lowered_mod = LowerModule(mod);
auto g = lowered_mod.get_method(method_name).graph();
LOG_GRAPH(*g);
Expand All @@ -62,10 +60,11 @@ std::pair<std::shared_ptr<torch::jit::Graph>, std::vector<at::Tensor>> Lower(con
lowering::LowerGraph(g);
//=[torch::jit::FoldConvBatchNorm2d(lowered_mod);
LOG_GRAPH("LibTorch Lowering");
auto graph_and_parameters = torch::jit::LowerGraph(*g, lowered_mod._ivalue());
auto graph_and_ivalues = torch::jit::LowerGraph(*g, lowered_mod._ivalue());
// Is this necessary?
lowering::LowerBlock(g->block());
return graph_and_parameters;

return graph_and_ivalues;
}


Expand Down
4 changes: 2 additions & 2 deletions core/lowering/lowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ namespace lowering {
void LowerBlock(torch::jit::Block* b);
void LowerGraph(std::shared_ptr<torch::jit::Graph>& g);
torch::jit::Module LowerModule(const torch::jit::script::Module& mod);
std::pair<std::shared_ptr<torch::jit::Graph>, std::vector<at::Tensor>> Lower(const torch::jit::script::Module& mod,
std::string method_name);
std::pair<std::shared_ptr<torch::jit::Graph>, std::vector<torch::jit::IValue>> Lower(const torch::jit::script::Module& mod,
std::string method_name);

} // namespace lowering
} // namespace core
Expand Down
4 changes: 2 additions & 2 deletions cpp/api/include/trtorch/macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
#define STR(x) XSTR(x)

#define TRTORCH_MAJOR_VERSION 0
#define TRTORCH_MINOR_VERSION 0
#define TRTORCH_PATCH_VERSION 3
#define TRTORCH_MINOR_VERSION 1
#define TRTORCH_PATCH_VERSION 0
#define TRTORCH_VERSION STR(TRTORCH_MAJOR_VERSION) \
"." STR(TRTORCH_MINOR_VERSION) \
"." STR(TRTORCH_PATCH_VERSION)
36 changes: 36 additions & 0 deletions docker/Dockerfile.20.07
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
FROM nvcr.io/nvidia/pytorch:20.07-py3

RUN apt-get update && apt-get install curl gnupg && rm -rf /var/lib/apt/lists/*

RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add - && \
echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list

RUN apt-get update && apt-get install bazel-3.4.1 && rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/bin/bazel-3.4.1 /usr/bin/bazel

RUN pip install notebook

COPY . /opt/trtorch
RUN rm /opt/trtorch/WORKSPACE
COPY ./docker/WORKSPACE.cu11.docker /opt/trtorch/WORKSPACE

# Workaround for bazel expecting both static and shared versions, we only use shared libraries inside container
RUN cp /usr/lib/x86_64-linux-gnu/libnvinfer.so /usr/lib/x86_64-linux-gnu/libnvinfer_static.a

WORKDIR /opt/trtorch
RUN bazel build //:libtrtorch --compilation_mode opt

WORKDIR /opt/trtorch/py

# Locale is not set by default
RUN apt-get update && apt-get install -y locales ninja-build && rm -rf /var/lib/apt/lists/* && locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
RUN python3 setup.py install --use-cxx11-abi

RUN conda init bash

ENV LD_LIBRARY_PATH /opt/conda/lib/python3.6/site-packages/torch/lib:$LD_LIBRARY_PATh

WORKDIR /opt/trtorch/notebooks
4 changes: 2 additions & 2 deletions docker/Dockerfile.docs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ FROM nvcr.io/nvidia/tensorrt:20.03-py3
RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add -
RUN echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list

RUN apt update && apt install bazel-3.3.1
RUN ln -s /usr/bin/bazel-3.3.1 /usr/bin/bazel
RUN apt update && apt install bazel-3.4.1
RUN ln -s /usr/bin/bazel-3.4.1 /usr/bin/bazel


COPY ./py/requirements.txt requirements.txt
Expand Down
Loading