From 7266cadf8f59cbb4e09b0b1281d36eed9e3ec69a Mon Sep 17 00:00:00 2001
From: Dheeraj Peri <peri.dheeraj@gmail.com>
Date: Fri, 1 Jul 2022 15:25:40 -0700
Subject: [PATCH 1/2] feat: Upgrade TensorRT to 8.4 EA

Signed-off-by: Dheeraj Peri <peri.dheeraj@gmail.com>
---
 README.md                                     |  4 +--
 WORKSPACE                                     | 12 +++----
 core/conversion/converters/converter_util.cpp |  4 +--
 .../test_fallback_graph_output.cpp            | 36 -------------------
 third_party/cudnn/archive/BUILD               |  2 +-
 5 files changed, 11 insertions(+), 47 deletions(-)

diff --git a/README.md b/README.md
index 2e8270b3a9..c718ad86cc 100644
--- a/README.md
+++ b/README.md
@@ -113,8 +113,8 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
 - Bazel 4.2.1
 - Libtorch 1.11.0 (built with CUDA 11.3)
 - CUDA 11.3 (10.2 on Jetson)
-- cuDNN 8.2.1
-- TensorRT 8.2.4.2 (TensorRT 8.2.1 on Jetson)
+- cuDNN 8.3.2
+- TensorRT 8.4.0.6
 
 ## Prebuilt Binaries and Wheel files
 
diff --git a/WORKSPACE b/WORKSPACE
index 2779e93cc7..a27563da16 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -76,20 +76,20 @@ http_archive(
 http_archive(
     name = "cudnn",
     build_file = "@//third_party/cudnn/archive:BUILD",
-    sha256 = "0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7",
-    strip_prefix = "cuda",
+    sha256 = "5500953c08c5e5d1dddcfda234f9efbddcdbe43a53b26dc0a82c723fa170c457",
+    strip_prefix = "cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive",
     urls = [
-        "https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.2.4/11.4_20210831/cudnn-11.4-linux-x64-v8.2.4.15.tgz",
+        "https://developer.nvidia.com/compute/cudnn/secure/8.3.2/local_installers/11.5/cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz",
     ],
 )
 
 http_archive(
     name = "tensorrt",
     build_file = "@//third_party/tensorrt/archive:BUILD",
-    sha256 = "826180eaaecdf9a7e76116855b9f1f3400ea9b06e66b06a3f6a0747ba6f863ad",
-    strip_prefix = "TensorRT-8.2.4.2",
+    sha256 = "0cd8071d717f1b870ada79ce5889ab3d702439c356e96cbef23d0b469007fcb4",
+    strip_prefix = "TensorRT-8.4.0.6",
     urls = [
-        "https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.2.4/tars/tensorrt-8.2.4.2.linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz",
+        "https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.4.0/tars/tensorrt-8.4.0.6.linux.x86_64-gnu.cuda-11.6.cudnn8.3.tar.gz",
     ],
 )
 
diff --git a/core/conversion/converters/converter_util.cpp b/core/conversion/converters/converter_util.cpp
index 9312706b47..708b3c7a11 100644
--- a/core/conversion/converters/converter_util.cpp
+++ b/core/conversion/converters/converter_util.cpp
@@ -135,8 +135,8 @@ nvinfer1::ITensor* castITensor(ConversionCtx* ctx, nvinfer1::ITensor* tensor, nv
 
     auto id_layer = ctx->net->addIdentity(*tensor);
     TORCHTRT_CHECK(id_layer, "Unable to create identity layer for ITensor: " << tensor_id.str());
-    auto casted_tensor = id_layer->getOutput(0);
-    casted_tensor->setType(dtype);
+    // layer->setOutputType should be used for casting and not manually setting output_tensor->setType()
+    id_layer->setOutputType(0, dtype);
 
     LOG_DEBUG(ctx->logger, "Casting ITensor " << tensor_id.str() << " from " << tensor->getType() << " to " << dtype);
 
diff --git a/tests/core/partitioning/test_fallback_graph_output.cpp b/tests/core/partitioning/test_fallback_graph_output.cpp
index 2421d94ec0..98fc4e6128 100644
--- a/tests/core/partitioning/test_fallback_graph_output.cpp
+++ b/tests/core/partitioning/test_fallback_graph_output.cpp
@@ -66,40 +66,4 @@ TEST(Partitioning, ComputeMobileNetFallbackGraphCorrectly) {
   auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
   ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
 }
-
-TEST(Partitioning, ComputeResNet50HalfFallbackGraphCorrectly) {
-  torch::jit::script::Module mod;
-  try {
-    mod = torch::jit::load("tests/modules/resnet50_traced.jit.pt");
-  } catch (const c10::Error& e) {
-    std::cerr << "error loading the model\n";
-    return;
-  }
-
-  mod.to(torch::kHalf);
-
-  const std::vector<std::vector<int64_t>> input_shapes = {{1, 3, 224, 224}};
-  std::vector<torch::jit::IValue> jit_inputs_ivalues;
-  std::vector<torch::jit::IValue> trt_inputs_ivalues;
-  for (auto in_shape : input_shapes) {
-    auto in = at::randint(5, in_shape, {at::kCUDA}).to(torch::kHalf);
-    jit_inputs_ivalues.push_back(in.clone());
-    trt_inputs_ivalues.push_back(in.clone());
-  }
-
-  auto in_shape = torch_tensorrt::core::ir::Input({1, 3, 224, 224});
-  in_shape.dtype = nvinfer1::DataType::kHALF;
-
-  std::vector<torch_tensorrt::core::ir::Input> input_ranges({in_shape});
-  auto g = mod.get_method("forward").graph();
-  torch_tensorrt::core::CompileSpec cfg(input_ranges);
-  cfg.partition_info.enabled = true;
-  cfg.partition_info.forced_fallback_operators.push_back("aten::add");
-
-  auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();
-  auto trt_mod = torch_tensorrt::core::CompileGraph(mod, cfg);
-  auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
-  // Lower threshold because FP16
-  ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-1));
-}
 #endif
diff --git a/third_party/cudnn/archive/BUILD b/third_party/cudnn/archive/BUILD
index c087ad303b..eb5945e7f5 100644
--- a/third_party/cudnn/archive/BUILD
+++ b/third_party/cudnn/archive/BUILD
@@ -9,7 +9,7 @@ cc_library(
 
 cc_import(
     name = "cudnn_lib",
-    shared_library = "lib64/libcudnn.so",
+    shared_library = "lib/libcudnn.so",
     visibility = ["//visibility:private"],
 )
 

From 5da47c1e7b9912c6c471faa5d94710bbe81fcc9d Mon Sep 17 00:00:00 2001
From: Dheeraj Peri <peri.dheeraj@gmail.com>
Date: Thu, 7 Jul 2022 14:17:27 -0700
Subject: [PATCH 2/2] chore: Fix missing return casted tensor

Signed-off-by: Dheeraj Peri <peri.dheeraj@gmail.com>
---
 core/conversion/converters/converter_util.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/conversion/converters/converter_util.cpp b/core/conversion/converters/converter_util.cpp
index 708b3c7a11..3bf495c192 100644
--- a/core/conversion/converters/converter_util.cpp
+++ b/core/conversion/converters/converter_util.cpp
@@ -137,7 +137,7 @@ nvinfer1::ITensor* castITensor(ConversionCtx* ctx, nvinfer1::ITensor* tensor, nv
     TORCHTRT_CHECK(id_layer, "Unable to create identity layer for ITensor: " << tensor_id.str());
     // layer->setOutputType should be used for casting and not manually setting output_tensor->setType()
     id_layer->setOutputType(0, dtype);
-
+    auto casted_tensor = id_layer->getOutput(0);
     LOG_DEBUG(ctx->logger, "Casting ITensor " << tensor_id.str() << " from " << tensor->getType() << " to " << dtype);
 
     std::stringstream ss;