diff --git a/.circleci/config.yml b/.circleci/config.yml index dcbc84cc9a..16dda8609f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -435,6 +435,7 @@ commands: mkdir -p /tmp/artifacts/test_results cd tests/py pytest --junitxml=/tmp/artifacts/test_results/api/api_test_results.xml api/ + pytest --junitxml=/tmp/artifacts/test_results/models/models_test_results.xml models/ pytest --junitxml=/tmp/artifacts/test_results/integrations/integrations_test_results.xml integrations/ cd ~/project diff --git a/.github/workflows/docgen.yml b/.github/workflows/docgen.yml index 7b66b98be5..61af5bc5d9 100644 --- a/.github/workflows/docgen.yml +++ b/.github/workflows/docgen.yml @@ -31,7 +31,7 @@ jobs: - name: Set up Python 3.9.4 uses: actions/setup-python@v2 with: - python-version: 3.9.4 + python-version: 3.9.4 - uses: actions/checkout@v2 with: ref: ${{github.head_ref}} diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 58c8440684..b56a233169 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -39,7 +39,7 @@ jobs: pip3 install -r $GITHUB_WORKSPACE/.github/scripts/requirements.txt pip3 install -r $GITHUB_WORKSPACE/requirements-dev.txt - name: Lint C++ - run: | + run: | cd $GITHUB_WORKSPACE python3 $GITHUB_WORKSPACE/.github/scripts/run_cpp_linter.py env: diff --git a/core/conversion/converters/impl/max.cpp b/core/conversion/converters/impl/max.cpp index 175cc75461..3ccf165bbe 100644 --- a/core/conversion/converters/impl/max.cpp +++ b/core/conversion/converters/impl/max.cpp @@ -13,47 +13,95 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto max_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( - {"aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { - auto self = args[0].ITensorOrFreeze(ctx); - auto dim = args[1].unwrapToInt(); - auto keep_dims = args[2].unwrapToBool(); - auto selfDim = util::toVec(self->getDimensions()); - if (dim < 0) { - dim = selfDim.size() + dim; - } - uint32_t shiftDim = 1 << dim; - auto TopKOperation = nvinfer1::TopKOperation::kMAX; - auto topk_layer = ctx->net->addTopK(*self, TopKOperation, 1, shiftDim); - TORCHTRT_CHECK(topk_layer, "Unable to create max layer from node: " << *n); - auto topk_dims = util::toVec(topk_layer->getOutput(0)->getDimensions()); - - nvinfer1::ITensor* out0 = nullptr; - nvinfer1::ITensor* out1 = nullptr; - if (!keep_dims) { - if (topk_dims[dim] == 1) { - auto squeeze_layer = ctx->net->addShuffle(*topk_layer->getOutput(0)); - squeeze_layer->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(0)->getDimensions(), dim)); - TORCHTRT_CHECK(squeeze_layer, "Unable to create squeeze_layer layer from node: " << *n); - out0 = ctx->AssociateValueAndTensor(n->outputs()[0], squeeze_layer->getOutput(0)); - - auto squeeze_layer_indices = ctx->net->addShuffle(*topk_layer->getOutput(1)); - squeeze_layer_indices->setReshapeDimensions( - util::squeezeDims(topk_layer->getOutput(1)->getDimensions(), dim)); - TORCHTRT_CHECK(squeeze_layer_indices, "Unable to create squeeze_layer_indices layer from node: " << *n); - out1 = ctx->AssociateValueAndTensor(n->outputs()[1], squeeze_layer_indices->getOutput(0)); - } - } else { - out0 = ctx->AssociateValueAndTensor(n->outputs()[0], topk_layer->getOutput(0)); - out1 = ctx->AssociateValueAndTensor(n->outputs()[1], topk_layer->getOutput(1)); - } - - LOG_DEBUG("Output tensor(0) shape: " << out0->getDimensions()); - LOG_DEBUG("Output tensor(1) shape: " << out1->getDimensions()); - - return true; - }}); + +bool min_max_dim(ConversionCtx* ctx, const torch::jit::Node* n, args& args, nvinfer1::TopKOperation topKOperation) { + auto self = args[0].ITensorOrFreeze(ctx); + auto dim = args[1].unwrapToInt(); + auto keep_dims = args[2].unwrapToBool(); + auto selfDim = util::toVec(self->getDimensions()); + if (dim < 0) { + dim = selfDim.size() + dim; + } + uint32_t reduce_axes_mask = 1 << dim; + auto topk_layer = ctx->net->addTopK(*self, topKOperation, 1, reduce_axes_mask); + TORCHTRT_CHECK(topk_layer, "Unable to create topk layer from node: " << *n); + auto topk_dims = util::toVec(topk_layer->getOutput(0)->getDimensions()); + + nvinfer1::ITensor* out0 = nullptr; + nvinfer1::ITensor* out1 = nullptr; + if (!keep_dims) { + TORCHTRT_CHECK(topk_dims[dim] == 1, "Unexpected size in squeeze dimension. Expected: 1 Actual: " << topk_dims[dim]); + auto squeeze_layer = ctx->net->addShuffle(*topk_layer->getOutput(0)); + squeeze_layer->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(0)->getDimensions(), dim)); + TORCHTRT_CHECK(squeeze_layer, "Unable to create squeeze_layer layer from node: " << *n); + out0 = ctx->AssociateValueAndTensor(n->outputs()[0], squeeze_layer->getOutput(0)); + + auto squeeze_layer_indices = ctx->net->addShuffle(*topk_layer->getOutput(1)); + squeeze_layer_indices->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(1)->getDimensions(), dim)); + TORCHTRT_CHECK(squeeze_layer_indices, "Unable to create squeeze_layer_indices layer from node: " << *n); + out1 = ctx->AssociateValueAndTensor(n->outputs()[1], squeeze_layer_indices->getOutput(0)); + } else { + out0 = ctx->AssociateValueAndTensor(n->outputs()[0], topk_layer->getOutput(0)); + out1 = ctx->AssociateValueAndTensor(n->outputs()[1], topk_layer->getOutput(1)); + } + + LOG_DEBUG("Output tensor(0) shape: " << out0->getDimensions()); + LOG_DEBUG("Output tensor(1) shape: " << out1->getDimensions()); + + return true; +} + +bool arg_min_max(ConversionCtx* ctx, const torch::jit::Node* n, args& args, nvinfer1::TopKOperation topKOperation) { + auto self = args[0].ITensorOrFreeze(ctx); + auto dim = args[1].unwrapToInt(); + auto keep_dims = args[2].unwrapToBool(); + auto selfDim = util::toVec(self->getDimensions()); + if (dim < 0) { + dim = selfDim.size() + dim; + } + uint32_t reduce_axes_mask = 1 << dim; + auto topk_layer = ctx->net->addTopK(*self, topKOperation, 1, reduce_axes_mask); + TORCHTRT_CHECK(topk_layer, "Unable to create topk layer from node: " << *n); + auto topk_dims = util::toVec(topk_layer->getOutput(0)->getDimensions()); + + nvinfer1::ITensor* out = nullptr; + if (!keep_dims) { + TORCHTRT_CHECK(topk_dims[dim] == 1, "Unexpected size in squeeze dimension. Expected: 1 Actual: " << topk_dims[dim]); + auto squeeze_layer_indices = ctx->net->addShuffle(*topk_layer->getOutput(1)); + squeeze_layer_indices->setReshapeDimensions(util::squeezeDims(topk_layer->getOutput(1)->getDimensions(), dim)); + TORCHTRT_CHECK(squeeze_layer_indices, "Unable to create squeeze_layer_indices layer from node: " << *n); + out = ctx->AssociateValueAndTensor(n->outputs()[0], squeeze_layer_indices->getOutput(0)); + } else { + out = ctx->AssociateValueAndTensor(n->outputs()[0], topk_layer->getOutput(1)); + } + + LOG_DEBUG("Output tensor shape: " << out->getDimensions()); + + return true; +} + +auto max_registrations TORCHTRT_UNUSED = + RegisterNodeConversionPatterns() + .pattern( + {"aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return min_max_dim(ctx, n, args, nvinfer1::TopKOperation::kMAX); + }}) + .pattern( + {"aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return min_max_dim(ctx, n, args, nvinfer1::TopKOperation::kMIN); + }}) + .pattern( + {"aten::argmax(Tensor self, int dim, bool keepdim=False) -> (Tensor)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return arg_min_max(ctx, n, args, nvinfer1::TopKOperation::kMAX); + }}) + .pattern( + {"aten::argmin(Tensor self, int dim, bool keepdim=False) -> (Tensor)", + [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + return arg_min_max(ctx, n, args, nvinfer1::TopKOperation::kMIN); + }}); } // namespace } // namespace impl } // namespace converters diff --git a/core/conversion/converters/impl/unary.cpp b/core/conversion/converters/impl/unary.cpp index fa4e88fa5e..c78602963c 100644 --- a/core/conversion/converters/impl/unary.cpp +++ b/core/conversion/converters/impl/unary.cpp @@ -49,6 +49,21 @@ auto abs_registration TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern } }}); +auto reciprocal_registration TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( + {"aten::reciprocal(Tensor self) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { + auto in = args[0].ITensorOrFreeze(ctx); + if (in->getType() == nvinfer1::DataType::kINT32) { + // pytorch implicitly casts to float for aten::reciprocal(int) + in = castITensor(ctx, in, nvinfer1::DataType::kFLOAT); + } + auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kRECIP); + TORCHTRT_CHECK(unary_layer, "Unable to create recip layer from node: " << *n); + unary_layer->setName(util::node_info(n).c_str()); + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + return true; + }}); + #define convert(unary, trt_type) \ auto unary##_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( \ {"aten::" #unary "(Tensor self) -> Tensor", \ @@ -74,7 +89,6 @@ convert(sinh, kSINH); convert(tan, kTAN); convert(atan, kATAN); convert(floor, kFLOOR); -convert(reciprocal, kRECIP); convert(log, kLOG); convert(ceil, kCEIL); convert(sqrt, kSQRT); diff --git a/core/partitioning/shape_analysis.cpp b/core/partitioning/shape_analysis.cpp index 7a36529949..f940c87751 100644 --- a/core/partitioning/shape_analysis.cpp +++ b/core/partitioning/shape_analysis.cpp @@ -167,7 +167,7 @@ void getSegmentsOutputByRunning( } if (cur_ivalue.toTensor().sizes().size() == 0) { // handle Scalar types, which has sizes of [] - input_shapes.push_back(util::toVec(util::toDims(c10::List({1})))); + input_shapes.push_back(util::toVec(util::toDims(c10::List({1})))); } else { input_shapes.push_back(util::toVec(util::toDims(cur_ivalue.toTensor().sizes()))); } diff --git a/cpp/bin/torchtrtc/main.cpp b/cpp/bin/torchtrtc/main.cpp index f98ed848de..bc3d5d4af0 100644 --- a/cpp/bin/torchtrtc/main.cpp +++ b/cpp/bin/torchtrtc/main.cpp @@ -35,7 +35,7 @@ bool unload_library(void* custom_lib) { bool success = false; #if defined(_WIN32) // Returns status non-zero for success - success = FreeLibrary(custom_lib) ? true : false; + success = FreeLibrary((HMODULE)custom_lib) ? true : false; #else success = dlclose(custom_lib) ? false : true; #endif diff --git a/cpp/include/torch_tensorrt/torch_tensorrt.h b/cpp/include/torch_tensorrt/torch_tensorrt.h index 45497a13a3..80db25e8f9 100644 --- a/cpp/include/torch_tensorrt/torch_tensorrt.h +++ b/cpp/include/torch_tensorrt/torch_tensorrt.h @@ -365,7 +365,7 @@ class TensorFormat { * signifying a static input shape or a set of three input shapes representing * the min, optiminal and max input shapes allowed for the engine. */ -struct TORCHTRT_API Input : torch::CustomClassHolder { +struct Input : torch::CustomClassHolder { /// Minimum acceptable input size into the engine std::vector min_shape; /// Optimal input size into the engine (size optimized for given kernels accept any size in min max range) @@ -520,7 +520,7 @@ struct TORCHTRT_API Input : torch::CustomClassHolder { * * This struct can either hold a complex inputs of shape or a flattened one, */ -struct TORCHTRT_API GraphInputs { +struct GraphInputs { torch::jit::IValue input_signature; // nested Input, full input spec std::vector inputs; // flatten input spec }; @@ -592,14 +592,14 @@ struct CompileSpec { * * @param inputs */ - CompileSpec(std::vector inputs); + TORCHTRT_API CompileSpec(std::vector inputs); /** * @brief Construct a new Compile Spec object from IValue which represents the nesting of input tensors for a module. * * @param input_signature */ - CompileSpec(torch::jit::IValue input_signature); + TORCHTRT_API CompileSpec(torch::jit::IValue input_signature); // Defaults should reflect TensorRT defaults for BuilderConfig /** diff --git a/docs/_cpp_api/classtorch__tensorrt_1_1DataType.html b/docs/_cpp_api/classtorch__tensorrt_1_1DataType.html index c5baedfa0a..63c732bd65 100644 --- a/docs/_cpp_api/classtorch__tensorrt_1_1DataType.html +++ b/docs/_cpp_api/classtorch__tensorrt_1_1DataType.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType.html b/docs/_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType.html index ee631a4842..dad196de11 100644 --- a/docs/_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType.html +++ b/docs/_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/classtorch__tensorrt_1_1TensorFormat.html b/docs/_cpp_api/classtorch__tensorrt_1_1TensorFormat.html index 6c81106e8e..544a08ce45 100644 --- a/docs/_cpp_api/classtorch__tensorrt_1_1TensorFormat.html +++ b/docs/_cpp_api/classtorch__tensorrt_1_1TensorFormat.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.html b/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.html index ae03830ea8..b0b88b47c4 100644 --- a/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.html +++ b/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.html b/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.html index 8a782a7aa9..4a927ca14a 100644 --- a/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.html +++ b/docs/_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html b/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html index 3d01828515..1f9ccae2dc 100644 --- a/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html +++ b/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268.html b/docs/_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268.html index 602c0b350a..dacc88ed1c 100644 --- a/docs/_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268.html +++ b/docs/_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e.html b/docs/_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e.html index 38e962e08c..bdd7cefecc 100644 --- a/docs/_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e.html +++ b/docs/_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827.html b/docs/_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827.html index 9a1fa322c1..f51a6a83a3 100644 --- a/docs/_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827.html +++ b/docs/_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b.html b/docs/_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b.html index a5b3fec8be..87cfdbb2c8 100644 --- a/docs/_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b.html +++ b/docs/_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html b/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html index 91089891e4..ea386e69ce 100644 --- a/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html +++ b/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59.html b/docs/_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59.html index bbe72d9894..5e01a51527 100644 --- a/docs/_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59.html +++ b/docs/_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883.html b/docs/_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883.html index b4290b153d..ce096a1a3d 100644 --- a/docs/_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883.html +++ b/docs/_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/dir_cpp.html b/docs/_cpp_api/dir_cpp.html index dea0115edc..cf5c2e72b2 100644 --- a/docs/_cpp_api/dir_cpp.html +++ b/docs/_cpp_api/dir_cpp.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/dir_cpp_include.html b/docs/_cpp_api/dir_cpp_include.html index 60d6e3b0f1..ed31a6249d 100644 --- a/docs/_cpp_api/dir_cpp_include.html +++ b/docs/_cpp_api/dir_cpp_include.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/dir_cpp_include_torch_tensorrt.html b/docs/_cpp_api/dir_cpp_include_torch_tensorrt.html index 6b900ef21f..3870d3f658 100644 --- a/docs/_cpp_api/dir_cpp_include_torch_tensorrt.html +++ b/docs/_cpp_api/dir_cpp_include_torch_tensorrt.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.html b/docs/_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.html index 1b9b5c91fe..5e2d106535 100644 --- a/docs/_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.html +++ b/docs/_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.html b/docs/_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.html index b0f505eab7..734ca547c9 100644 --- a/docs/_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.html +++ b/docs/_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/file_cpp_include_torch_tensorrt_logging.h.html b/docs/_cpp_api/file_cpp_include_torch_tensorrt_logging.h.html index faadfd3afb..b96f2dd1d3 100644 --- a/docs/_cpp_api/file_cpp_include_torch_tensorrt_logging.h.html +++ b/docs/_cpp_api/file_cpp_include_torch_tensorrt_logging.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/file_cpp_include_torch_tensorrt_macros.h.html b/docs/_cpp_api/file_cpp_include_torch_tensorrt_macros.h.html index 765669e1c2..818a7111d5 100644 --- a/docs/_cpp_api/file_cpp_include_torch_tensorrt_macros.h.html +++ b/docs/_cpp_api/file_cpp_include_torch_tensorrt_macros.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/file_cpp_include_torch_tensorrt_ptq.h.html b/docs/_cpp_api/file_cpp_include_torch_tensorrt_ptq.h.html index 62510e6f52..a11074e197 100644 --- a/docs/_cpp_api/file_cpp_include_torch_tensorrt_ptq.h.html +++ b/docs/_cpp_api/file_cpp_include_torch_tensorrt_ptq.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h.html b/docs/_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h.html index 4e154e62bc..8590414bdc 100644 --- a/docs/_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h.html +++ b/docs/_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3.html b/docs/_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3.html index df0c38386b..5d52ce2c62 100644 --- a/docs/_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3.html +++ b/docs/_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650.html b/docs/_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650.html index a49a6ee55f..7a08c5e8f0 100644 --- a/docs/_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650.html +++ b/docs/_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a.html b/docs/_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a.html index 481caf4a35..8dc93c7636 100644 --- a/docs/_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a.html +++ b/docs/_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2.html b/docs/_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2.html index c7de101a0b..49ab646340 100644 --- a/docs/_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2.html +++ b/docs/_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8.html b/docs/_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8.html index 8aed7c5567..45c57c1847 100644 --- a/docs/_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8.html +++ b/docs/_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5.html b/docs/_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5.html index a16b425bcd..a87c9858a7 100644 --- a/docs/_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5.html +++ b/docs/_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc.html b/docs/_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc.html index cd0b618f85..c2599b2c03 100644 --- a/docs/_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc.html +++ b/docs/_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c.html b/docs/_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c.html index 8067e27a03..d531b5dbab 100644 --- a/docs/_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c.html +++ b/docs/_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178.html b/docs/_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178.html index 443027515f..65d63f705b 100644 --- a/docs/_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178.html +++ b/docs/_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797.html b/docs/_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797.html index 99928f04a8..6b69b8cd7c 100644 --- a/docs/_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797.html +++ b/docs/_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9.html b/docs/_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9.html index 721759731d..a047680093 100644 --- a/docs/_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9.html +++ b/docs/_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef.html b/docs/_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef.html index fe2cc70617..0f3a0e2a06 100644 --- a/docs/_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef.html +++ b/docs/_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528.html b/docs/_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528.html index bcf3fb3dc7..71285f168f 100644 --- a/docs/_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528.html +++ b/docs/_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384.html b/docs/_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384.html index 0c8d8f790b..95ca7da3ea 100644 --- a/docs/_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384.html +++ b/docs/_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1.html b/docs/_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1.html index 1c451af297..a1ca523589 100644 --- a/docs/_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1.html +++ b/docs/_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2.html b/docs/_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2.html index 3b244e5516..755ca3d887 100644 --- a/docs/_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2.html +++ b/docs/_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/namespace_torch_tensorrt.html b/docs/_cpp_api/namespace_torch_tensorrt.html index c1279521c6..20c4859922 100644 --- a/docs/_cpp_api/namespace_torch_tensorrt.html +++ b/docs/_cpp_api/namespace_torch_tensorrt.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/namespace_torch_tensorrt__logging.html b/docs/_cpp_api/namespace_torch_tensorrt__logging.html index f57d3c7ec9..d125f3726e 100644 --- a/docs/_cpp_api/namespace_torch_tensorrt__logging.html +++ b/docs/_cpp_api/namespace_torch_tensorrt__logging.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/namespace_torch_tensorrt__ptq.html b/docs/_cpp_api/namespace_torch_tensorrt__ptq.html index 64439cce4b..ae4c0a7606 100644 --- a/docs/_cpp_api/namespace_torch_tensorrt__ptq.html +++ b/docs/_cpp_api/namespace_torch_tensorrt__ptq.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/namespace_torch_tensorrt__torchscript.html b/docs/_cpp_api/namespace_torch_tensorrt__torchscript.html index 5f3cefef91..ab90fc0a0d 100644 --- a/docs/_cpp_api/namespace_torch_tensorrt__torchscript.html +++ b/docs/_cpp_api/namespace_torch_tensorrt__torchscript.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h.html b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h.html index e6ad942d63..c7d4f40649 100644 --- a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h.html b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h.html index b619b14f7a..380137696d 100644 --- a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h.html b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h.html index ba7274cf5a..b3018fdc9c 100644 --- a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.html b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.html index f2ba470f1f..5102b92e8e 100644 --- a/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
@@ -508,7 +508,7 @@ Value value; }; -struct TORCHTRT_API Input : torch::CustomClassHolder { +struct Input : torch::CustomClassHolder { std::vector<int64_t> min_shape; std::vector<int64_t> opt_shape; std::vector<int64_t> max_shape; @@ -558,7 +558,7 @@ bool input_is_dynamic; }; -struct TORCHTRT_API GraphInputs { +struct GraphInputs { torch::jit::IValue input_signature; // nested Input, full input spec std::vector<Input> inputs; // flatten input spec }; @@ -575,9 +575,9 @@ TORCHTRT_API CompileSpec(std::vector<c10::ArrayRef<int64_t>> fixed_sizes); - CompileSpec(std::vector<Input> inputs); + TORCHTRT_API CompileSpec(std::vector<Input> inputs); - CompileSpec(torch::jit::IValue input_signature); + TORCHTRT_API CompileSpec(torch::jit::IValue input_signature); // Defaults should reflect TensorRT defaults for BuilderConfig GraphInputs graph_inputs; diff --git a/docs/_cpp_api/structtorch__tensorrt_1_1Device.html b/docs/_cpp_api/structtorch__tensorrt_1_1Device.html index ca9d929ab5..05ebff4d28 100644 --- a/docs/_cpp_api/structtorch__tensorrt_1_1Device.html +++ b/docs/_cpp_api/structtorch__tensorrt_1_1Device.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/structtorch__tensorrt_1_1GraphInputs.html b/docs/_cpp_api/structtorch__tensorrt_1_1GraphInputs.html index cd1762f9d3..dfedca5906 100644 --- a/docs/_cpp_api/structtorch__tensorrt_1_1GraphInputs.html +++ b/docs/_cpp_api/structtorch__tensorrt_1_1GraphInputs.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/structtorch__tensorrt_1_1Input.html b/docs/_cpp_api/structtorch__tensorrt_1_1Input.html index 2ea3ccbaab..02296f8799 100644 --- a/docs/_cpp_api/structtorch__tensorrt_1_1Input.html +++ b/docs/_cpp_api/structtorch__tensorrt_1_1Input.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec.html b/docs/_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec.html index 194e5e3d6e..6d859108c7 100644 --- a/docs/_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec.html +++ b/docs/_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
@@ -388,7 +388,7 @@

Struct Documentation
-CompileSpec(std::vector<Input> inputs)
+TORCHTRT_API CompileSpec(std::vector<Input> inputs)

Construct a new Compile Spec object from input ranges. Each entry in the vector represents a input and should be provided in call order.

Use this constructor to define inputs with dynamic shape, specific input types or tensor formats

@@ -400,7 +400,7 @@

Struct Documentation
-CompileSpec(torch::jit::IValue input_signature)
+TORCHTRT_API CompileSpec(torch::jit::IValue input_signature)

Construct a new Compile Spec object from IValue which represents the nesting of input tensors for a module.

Parameters
diff --git a/docs/_cpp_api/torch_tensort_cpp.html b/docs/_cpp_api/torch_tensort_cpp.html index 210493e0cd..8f7e1fc4d4 100644 --- a/docs/_cpp_api/torch_tensort_cpp.html +++ b/docs/_cpp_api/torch_tensort_cpp.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_cpp_api/unabridged_orphan.html b/docs/_cpp_api/unabridged_orphan.html index 9c6d73ec1d..bd1456b083 100644 --- a/docs/_cpp_api/unabridged_orphan.html +++ b/docs/_cpp_api/unabridged_orphan.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/index.html b/docs/_modules/index.html index edb7839310..3a8e88a14c 100644 --- a/docs/_modules/index.html +++ b/docs/_modules/index.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/_Device.html b/docs/_modules/torch_tensorrt/_Device.html index f147135f2a..054f09eeee 100644 --- a/docs/_modules/torch_tensorrt/_Device.html +++ b/docs/_modules/torch_tensorrt/_Device.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/_Input.html b/docs/_modules/torch_tensorrt/_Input.html index 1ac20dc47e..cebb7011f3 100644 --- a/docs/_modules/torch_tensorrt/_Input.html +++ b/docs/_modules/torch_tensorrt/_Input.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/_compile.html b/docs/_modules/torch_tensorrt/_compile.html index 2246ec1eae..a7a508ed4c 100644 --- a/docs/_modules/torch_tensorrt/_compile.html +++ b/docs/_modules/torch_tensorrt/_compile.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/_util.html b/docs/_modules/torch_tensorrt/_util.html index 989cb3e61a..bd4b0bb3da 100644 --- a/docs/_modules/torch_tensorrt/_util.html +++ b/docs/_modules/torch_tensorrt/_util.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/fx/fx2trt.html b/docs/_modules/torch_tensorrt/fx/fx2trt.html index e849401545..54ca9a0374 100644 --- a/docs/_modules/torch_tensorrt/fx/fx2trt.html +++ b/docs/_modules/torch_tensorrt/fx/fx2trt.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/fx/input_tensor_spec.html b/docs/_modules/torch_tensorrt/fx/input_tensor_spec.html index 6467052426..fb7efcf6a6 100644 --- a/docs/_modules/torch_tensorrt/fx/input_tensor_spec.html +++ b/docs/_modules/torch_tensorrt/fx/input_tensor_spec.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/fx/trt_module.html b/docs/_modules/torch_tensorrt/fx/trt_module.html index a57f5176fc..4e84541c80 100644 --- a/docs/_modules/torch_tensorrt/fx/trt_module.html +++ b/docs/_modules/torch_tensorrt/fx/trt_module.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/logging.html b/docs/_modules/torch_tensorrt/logging.html index 6128cc05ed..e4f1a17d5f 100644 --- a/docs/_modules/torch_tensorrt/logging.html +++ b/docs/_modules/torch_tensorrt/logging.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_modules/torch_tensorrt/ptq.html b/docs/_modules/torch_tensorrt/ptq.html index b85374f7d8..41c7f218a0 100644 --- a/docs/_modules/torch_tensorrt/ptq.html +++ b/docs/_modules/torch_tensorrt/ptq.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
@@ -373,7 +373,7 @@

Source code for torch_tensorrt.ptq

     if self.current_batch_idx + self.batch_size > len(self.data_loader.dataset):
         return None
 
-    batch = self.dataset_iterator.next()
+    batch = next(self.dataset_iterator)
     self.current_batch_idx += self.batch_size
     inputs_gpu = []
     if isinstance(batch, list):
@@ -401,6 +401,13 @@ 

Source code for torch_tensorrt.ptq

         return b""
+# deepcopy (which involves pickling) is performed on the compile_spec internally during compilation. +# We register this __reduce__ function for pickler to identity the calibrator object returned by DataLoaderCalibrator during deepcopy. +# This should be the object's local name relative to the module https://docs.python.org/3/library/pickle.html#object.__reduce__ +def __reduce__(self): + return self.__class__.__name__ + +
[docs]class DataLoaderCalibrator(object): """ Constructs a calibrator class in TensorRT and uses pytorch dataloader to load/preproces @@ -459,24 +466,27 @@

Source code for torch_tensorrt.ptq

             "get_batch": get_cache_mode_batch if use_cache else get_batch,
             "read_calibration_cache": read_calibration_cache,
             "write_calibration_cache": write_calibration_cache,
+            "__reduce__": __reduce__,  # used when you deepcopy the DataLoaderCalibrator object
         }
 
         # Using type metaclass to construct calibrator class based on algorithm type
         if algo_type == CalibrationAlgo.ENTROPY_CALIBRATION:
             return type(
-                "DataLoaderCalibrator", (_C.IInt8EntropyCalibrator,), attribute_mapping
+                "Int8EntropyCalibrator", (_C.IInt8EntropyCalibrator,), attribute_mapping
             )()
         elif algo_type == CalibrationAlgo.ENTROPY_CALIBRATION_2:
             return type(
-                "DataLoaderCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping
+                "Int8EntropyCalibrator2",
+                (_C.IInt8EntropyCalibrator2,),
+                attribute_mapping,
             )()
         elif algo_type == CalibrationAlgo.LEGACY_CALIBRATION:
             return type(
-                "DataLoaderCalibrator", (_C.IInt8LegacyCalibrator,), attribute_mapping
+                "Int8LegacyCalibrator", (_C.IInt8LegacyCalibrator,), attribute_mapping
             )()
         elif algo_type == CalibrationAlgo.MINMAX_CALIBRATION:
             return type(
-                "DataLoaderCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping
+                "Int8MinMaxCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping
             )()
         else:
             log(
diff --git a/docs/_modules/torch_tensorrt/ts/_compile_spec.html b/docs/_modules/torch_tensorrt/ts/_compile_spec.html
index f873b32923..640aafdaca 100644
--- a/docs/_modules/torch_tensorrt/ts/_compile_spec.html
+++ b/docs/_modules/torch_tensorrt/ts/_compile_spec.html
@@ -196,7 +196,7 @@
               
               
                 
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
@@ -570,7 +570,7 @@

Source code for torch_tensorrt.ts._compile_spec

< def _parse_compile_spec(compile_spec_: Dict[str, Any]) -> _ts_C.CompileSpec: - # TODO: Remove deep copy once collections does not need partial compilation + # TODO: Use deepcopy to support partial compilation of collections compile_spec = deepcopy(compile_spec_) info = _ts_C.CompileSpec() @@ -646,7 +646,7 @@

Source code for torch_tensorrt.ts._compile_spec

< compile_spec["enabled_precisions"] ) - if "calibrator" in compile_spec: + if "calibrator" in compile_spec and compile_spec["calibrator"]: info.ptq_calibrator = compile_spec["calibrator"] if "sparse_weights" in compile_spec: diff --git a/docs/_modules/torch_tensorrt/ts/_compiler.html b/docs/_modules/torch_tensorrt/ts/_compiler.html index ba86b16c3d..b636c58d37 100644 --- a/docs/_modules/torch_tensorrt/ts/_compiler.html +++ b/docs/_modules/torch_tensorrt/ts/_compiler.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/_sources/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst.txt b/docs/_sources/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst.txt index 7e00b9350a..2ba864f7cd 100644 --- a/docs/_sources/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst.txt +++ b/docs/_sources/_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst.txt @@ -170,7 +170,7 @@ Program Listing for File torch_tensorrt.h Value value; }; - struct TORCHTRT_API Input : torch::CustomClassHolder { + struct Input : torch::CustomClassHolder { std::vector min_shape; std::vector opt_shape; std::vector max_shape; @@ -220,7 +220,7 @@ Program Listing for File torch_tensorrt.h bool input_is_dynamic; }; - struct TORCHTRT_API GraphInputs { + struct GraphInputs { torch::jit::IValue input_signature; // nested Input, full input spec std::vector inputs; // flatten input spec }; @@ -237,9 +237,9 @@ Program Listing for File torch_tensorrt.h TORCHTRT_API CompileSpec(std::vector> fixed_sizes); - CompileSpec(std::vector inputs); + TORCHTRT_API CompileSpec(std::vector inputs); - CompileSpec(torch::jit::IValue input_signature); + TORCHTRT_API CompileSpec(torch::jit::IValue input_signature); // Defaults should reflect TensorRT defaults for BuilderConfig GraphInputs graph_inputs; diff --git a/docs/_sources/getting_started/getting_started_with_windows.rst.txt b/docs/_sources/getting_started/getting_started_with_windows.rst.txt new file mode 100644 index 0000000000..d5d3394855 --- /dev/null +++ b/docs/_sources/getting_started/getting_started_with_windows.rst.txt @@ -0,0 +1,78 @@ +.. _getting_started_windows: + +Building Torch-TensorRT on Windows +==================================== + +Torch-TensorRT has community support for Windows platform using CMake + +Prerequisite: + +* Microsoft Visual Studio +* LibTorch +* TensorRT +* CUDA +* cuDNN + + +Build configuration +------------------- + +* Open Microsoft Visual Studio +* Open Torch-TensorRT source code folder +* Open Manage configurations -> Edit JSON to open CMakeSettings.json file. +* Configure the CMake build configurations. Following is an example configuration: + +.. code-block:: none + + { + "configurations": [ + { + "name": "x64-Debug", + "generator": "Ninja", + "configurationType": "Debug", + "inheritEnvironments": [ "msvc_x64_x64" ], + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", + "cmakeCommandArgs": "-S . -B out", + "buildCommandArgs": "cmake --build out", + "ctestCommandArgs": "", + "variables": [ + { + "name": "CMAKE_MODULE_PATH", + "value": "$PWD\cmake\Modules", + "type": "FILEPATH" + }, + { + "name": "Torch_DIR", + "value": "\share\cmake\Torch", + "type": "FILEPATH" + }, + { + "name": "TensorRT_ROOT", + "value": "", + "type": "FILEPATH" + }, + { + "name": "CMAKE_BUILD_TYPE", + "value": "Release", + "type": " STRING" + } + ] + } + ] + } + + +Compilation +----------- + +* Click Build -> Build All or directly press Ctrl + Shift + B + +Note: After successful compilation, the build artifacts will be present at buildRoot path configured. + +Installation +------------ + +* Build -> Install Torch-TensorRT + +Note: After successful installation, the artifacts will be present at installRoot. \ No newline at end of file diff --git a/docs/cli/torchtrtc.html b/docs/cli/torchtrtc.html index e16f9bd1f6..3eb5c95bed 100644 --- a/docs/cli/torchtrtc.html +++ b/docs/cli/torchtrtc.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/conversion.html b/docs/contributors/conversion.html index c0c13ea1c6..a16bc6757d 100644 --- a/docs/contributors/conversion.html +++ b/docs/contributors/conversion.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/lowering.html b/docs/contributors/lowering.html index 2d9a812750..b7ef6ff5f2 100644 --- a/docs/contributors/lowering.html +++ b/docs/contributors/lowering.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/partitioning.html b/docs/contributors/partitioning.html index f524e76918..9ba9d118a8 100644 --- a/docs/contributors/partitioning.html +++ b/docs/contributors/partitioning.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/phases.html b/docs/contributors/phases.html index 2a93a59124..56af2f093a 100644 --- a/docs/contributors/phases.html +++ b/docs/contributors/phases.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/runtime.html b/docs/contributors/runtime.html index 08b4da78ee..3d716472ed 100644 --- a/docs/contributors/runtime.html +++ b/docs/contributors/runtime.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/system_overview.html b/docs/contributors/system_overview.html index 0601b9307b..e4c49fa61b 100644 --- a/docs/contributors/system_overview.html +++ b/docs/contributors/system_overview.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/useful_links.html b/docs/contributors/useful_links.html index 57cd2fe3cc..5c5565af99 100644 --- a/docs/contributors/useful_links.html +++ b/docs/contributors/useful_links.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/contributors/writing_converters.html b/docs/contributors/writing_converters.html index 956ec8702e..28877f7466 100644 --- a/docs/contributors/writing_converters.html +++ b/docs/contributors/writing_converters.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/genindex.html b/docs/genindex.html index 7af5bb2ab6..6c13b858c1 100644 --- a/docs/genindex.html +++ b/docs/genindex.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/getting_started/getting_started_with_cpp_api.html b/docs/getting_started/getting_started_with_cpp_api.html index fcce40782e..1c7d35be3b 100644 --- a/docs/getting_started/getting_started_with_cpp_api.html +++ b/docs/getting_started/getting_started_with_cpp_api.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/getting_started/getting_started_with_python_api.html b/docs/getting_started/getting_started_with_python_api.html index effd46eabc..d74eeb76b4 100644 --- a/docs/getting_started/getting_started_with_python_api.html +++ b/docs/getting_started/getting_started_with_python_api.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/getting_started/getting_started_with_windows.html b/docs/getting_started/getting_started_with_windows.html new file mode 100644 index 0000000000..c35e9ac2b8 --- /dev/null +++ b/docs/getting_started/getting_started_with_windows.html @@ -0,0 +1,708 @@ + + + + + + + + + + + + + Building Torch-TensorRT on Windows — Torch-TensorRT master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + +
+ +
    + +
  • + + + Docs + + > +
  • + + +
  • Building Torch-TensorRT on Windows
  • + + +
  • + + + + + +
  • + +
+ + +
+
+ +
+ Shortcuts +
+
+ +
+
+ + + +
+ +
+
+ +
+

Building Torch-TensorRT on Windows

+

Torch-TensorRT has community support for Windows platform using CMake

+

Prerequisite:

+
    +
  • Microsoft Visual Studio

  • +
  • LibTorch

  • +
  • TensorRT

  • +
  • CUDA

  • +
  • cuDNN

  • +
+
+

Build configuration

+
    +
  • Open Microsoft Visual Studio

  • +
  • Open Torch-TensorRT source code folder

  • +
  • Open Manage configurations -> Edit JSON to open CMakeSettings.json file.

  • +
  • Configure the CMake build configurations. Following is an example configuration:

  • +
+
{
+  "configurations": [
+    {
+      "name": "x64-Debug",
+      "generator": "Ninja",
+      "configurationType": "Debug",
+      "inheritEnvironments": [ "msvc_x64_x64" ],
+      "buildRoot": "${projectDir}\\out\\build\\${name}",
+      "installRoot": "${projectDir}\\out\\install\\${name}",
+      "cmakeCommandArgs": "-S . -B out",
+      "buildCommandArgs": "cmake --build out",
+      "ctestCommandArgs": "",
+      "variables": [
+        {
+          "name": "CMAKE_MODULE_PATH",
+          "value": "$PWD\cmake\Modules",
+          "type": "FILEPATH"
+        },
+        {
+          "name": "Torch_DIR",
+          "value": "<Path to libtorch>\share\cmake\Torch",
+          "type": "FILEPATH"
+        },
+        {
+          "name": "TensorRT_ROOT",
+          "value": "<Path to TensorRT directory>",
+          "type": "FILEPATH"
+        },
+        {
+          "name": "CMAKE_BUILD_TYPE",
+          "value": "Release",
+          "type": " STRING"
+        }
+      ]
+    }
+  ]
+}
+
+
+
+
+

Compilation

+
    +
  • Click Build -> Build All or directly press Ctrl + Shift + B

  • +
+

Note: After successful compilation, the build artifacts will be present at buildRoot path configured.

+
+
+

Installation

+
    +
  • Build -> Install Torch-TensorRT

  • +
+

Note: After successful installation, the artifacts will be present at installRoot.

+
+
+ + +
+ +
+
+ + + + +
+ + + +
+

+ © Copyright 2021, NVIDIA Corporation. + +

+
+ +
+ Built with Sphinx using a theme provided by Read the Docs. +
+ + +
+ +
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + + + + + + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/getting_started/installation.html b/docs/getting_started/installation.html index a51adc6deb..f15ad10b22 100644 --- a/docs/getting_started/installation.html +++ b/docs/getting_started/installation.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/index.html b/docs/index.html index 81c151175f..640f27ce58 100644 --- a/docs/index.html +++ b/docs/index.html @@ -198,7 +198,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/indices/supported_ops.html b/docs/indices/supported_ops.html index d230472422..da28244355 100644 --- a/docs/indices/supported_ops.html +++ b/docs/indices/supported_ops.html @@ -198,7 +198,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/objects.inv b/docs/objects.inv index 71491c9fdf..53f8d22cd4 100644 Binary files a/docs/objects.inv and b/docs/objects.inv differ diff --git a/docs/py-modindex.html b/docs/py-modindex.html index 226e8ddf92..f1fbf929a6 100644 --- a/docs/py-modindex.html +++ b/docs/py-modindex.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/py_api/fx.html b/docs/py_api/fx.html index ef36006a65..f1815325f1 100644 --- a/docs/py_api/fx.html +++ b/docs/py_api/fx.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/py_api/logging.html b/docs/py_api/logging.html index e7f157b23b..521d08972d 100644 --- a/docs/py_api/logging.html +++ b/docs/py_api/logging.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/py_api/ptq.html b/docs/py_api/ptq.html index e2a4f71ded..44c01e8bd6 100644 --- a/docs/py_api/ptq.html +++ b/docs/py_api/ptq.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/py_api/torch_tensorrt.html b/docs/py_api/torch_tensorrt.html index c5428f6c8e..9d7aec6fdb 100644 --- a/docs/py_api/torch_tensorrt.html +++ b/docs/py_api/torch_tensorrt.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/py_api/ts.html b/docs/py_api/ts.html index 02b8bbe6ed..a67ad2a27c 100644 --- a/docs/py_api/ts.html +++ b/docs/py_api/ts.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
@@ -567,7 +567,7 @@

Functions
-torch_tensorrt.ts.TensorRTCompileSpec(inputs=[], input_signature=None, device=None, disable_tf32=False, sparse_weights=False, enabled_precisions={}, refit=False, debug=False, capability=<EngineCapability.default: 0>, num_avg_timing_iters=1, workspace_size=0, dla_sram_size=1048576, dla_local_dram_size=1073741824, dla_global_dram_size=536870912, truncate_long_and_double=False, calibrator=None) <torch.ScriptClass object at 0x7f89ba6d5eb0>[source]
+torch_tensorrt.ts.TensorRTCompileSpec(inputs=[], input_signature=None, device=None, disable_tf32=False, sparse_weights=False, enabled_precisions={}, refit=False, debug=False, capability=<EngineCapability.default: 0>, num_avg_timing_iters=1, workspace_size=0, dla_sram_size=1048576, dla_local_dram_size=1073741824, dla_global_dram_size=536870912, truncate_long_and_double=False, calibrator=None) <torch.ScriptClass object at 0x7f9bda9065f0>[source]

Utility to create a formated spec dictionary for using the PyTorch TensorRT backend

Keyword Arguments
diff --git a/docs/search.html b/docs/search.html index c5405377d8..2d05d15033 100644 --- a/docs/search.html +++ b/docs/search.html @@ -196,7 +196,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/searchindex.js b/docs/searchindex.js index 6125435bfd..9506669f2c 100644 --- a/docs/searchindex.js +++ b/docs/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["_cpp_api/classtorch__tensorrt_1_1DataType","_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType","_cpp_api/classtorch__tensorrt_1_1TensorFormat","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502","_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268","_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e","_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827","_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da","_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59","_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883","_cpp_api/dir_cpp","_cpp_api/dir_cpp_include","_cpp_api/dir_cpp_include_torch_tensorrt","_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558","_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb","_cpp_api/file_cpp_include_torch_tensorrt_logging.h","_cpp_api/file_cpp_include_torch_tensorrt_macros.h","_cpp_api/file_cpp_include_torch_tensorrt_ptq.h","_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h","_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3","_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650","_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a","_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2","_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8","_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5","_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc","_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c","_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178","_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797","_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9","_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef","_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528","_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384","_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1","_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2","_cpp_api/namespace_torch_tensorrt","_cpp_api/namespace_torch_tensorrt__logging","_cpp_api/namespace_torch_tensorrt__ptq","_cpp_api/namespace_torch_tensorrt__torchscript","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h","_cpp_api/structtorch__tensorrt_1_1Device","_cpp_api/structtorch__tensorrt_1_1GraphInputs","_cpp_api/structtorch__tensorrt_1_1Input","_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec","_cpp_api/torch_tensort_cpp","_cpp_api/unabridged_orphan","cli/torchtrtc","contributors/conversion","contributors/lowering","contributors/partitioning","contributors/phases","contributors/runtime","contributors/system_overview","contributors/useful_links","contributors/writing_converters","getting_started/getting_started_with_cpp_api","getting_started/getting_started_with_python_api","getting_started/installation","index","indices/supported_ops","py_api/fx","py_api/logging","py_api/ptq","py_api/torch_tensorrt","py_api/ts","src/pytorch-sphinx-theme/docs/changelog","src/pytorch-sphinx-theme/docs/configuring","src/pytorch-sphinx-theme/docs/demo/api","src/pytorch-sphinx-theme/docs/demo/demo","src/pytorch-sphinx-theme/docs/demo/lists_tables","src/pytorch-sphinx-theme/docs/demo/long","src/pytorch-sphinx-theme/docs/demo/structure","src/pytorch-sphinx-theme/docs/index","src/pytorch-sphinx-theme/docs/installing","tutorials/creating_torchscript_module_in_python","tutorials/getting_started_with_fx_path","tutorials/notebooks","tutorials/ptq","tutorials/runtime","tutorials/serving_torch_tensorrt_with_triton","tutorials/use_from_pytorch","tutorials/using_dla"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":5,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":3,"sphinx.domains.rst":2,"sphinx.domains.std":2,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":2,"sphinx.ext.viewcode":1,nbsphinx:4,sphinx:56},filenames:["_cpp_api/classtorch__tensorrt_1_1DataType.rst","_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType.rst","_cpp_api/classtorch__tensorrt_1_1TensorFormat.rst","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.rst","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.rst","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.rst","_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268.rst","_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e.rst","_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827.rst","_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b.rst","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.rst","_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59.rst","_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883.rst","_cpp_api/dir_cpp.rst","_cpp_api/dir_cpp_include.rst","_cpp_api/dir_cpp_include_torch_tensorrt.rst","_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.rst","_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.rst","_cpp_api/file_cpp_include_torch_tensorrt_logging.h.rst","_cpp_api/file_cpp_include_torch_tensorrt_macros.h.rst","_cpp_api/file_cpp_include_torch_tensorrt_ptq.h.rst","_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst","_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3.rst","_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650.rst","_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a.rst","_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2.rst","_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8.rst","_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5.rst","_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc.rst","_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c.rst","_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178.rst","_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797.rst","_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9.rst","_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef.rst","_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528.rst","_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384.rst","_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1.rst","_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2.rst","_cpp_api/namespace_torch_tensorrt.rst","_cpp_api/namespace_torch_tensorrt__logging.rst","_cpp_api/namespace_torch_tensorrt__ptq.rst","_cpp_api/namespace_torch_tensorrt__torchscript.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst","_cpp_api/structtorch__tensorrt_1_1Device.rst","_cpp_api/structtorch__tensorrt_1_1GraphInputs.rst","_cpp_api/structtorch__tensorrt_1_1Input.rst","_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec.rst","_cpp_api/torch_tensort_cpp.rst","_cpp_api/unabridged_orphan.rst","cli/torchtrtc.rst","contributors/conversion.rst","contributors/lowering.rst","contributors/partitioning.rst","contributors/phases.rst","contributors/runtime.rst","contributors/system_overview.rst","contributors/useful_links.rst","contributors/writing_converters.rst","getting_started/getting_started_with_cpp_api.rst","getting_started/getting_started_with_python_api.rst","getting_started/installation.rst","index.rst","indices/supported_ops.rst","py_api/fx.rst","py_api/logging.rst","py_api/ptq.rst","py_api/torch_tensorrt.rst","py_api/ts.rst","src/pytorch-sphinx-theme/docs/changelog.rst","src/pytorch-sphinx-theme/docs/configuring.rst","src/pytorch-sphinx-theme/docs/demo/api.rst","src/pytorch-sphinx-theme/docs/demo/demo.rst","src/pytorch-sphinx-theme/docs/demo/lists_tables.rst","src/pytorch-sphinx-theme/docs/demo/long.rst","src/pytorch-sphinx-theme/docs/demo/structure.rst","src/pytorch-sphinx-theme/docs/index.rst","src/pytorch-sphinx-theme/docs/installing.rst","tutorials/creating_torchscript_module_in_python.rst","tutorials/getting_started_with_fx_path.rst","tutorials/notebooks.rst","tutorials/ptq.rst","tutorials/runtime.rst","tutorials/serving_torch_tensorrt_with_triton.rst","tutorials/use_from_pytorch.rst","tutorials/using_dla.rst"],objects:{"":[[5,0,1,"c.STR","STR"],[9,0,1,"c.TORCHTRT_API","TORCHTRT_API"],[11,0,1,"c.TORCHTRT_HIDDEN","TORCHTRT_HIDDEN"],[7,0,1,"c.TORCH_TENSORRT_MAJOR_VERSION","TORCH_TENSORRT_MAJOR_VERSION"],[8,0,1,"c.TORCH_TENSORRT_MINOR_VERSION","TORCH_TENSORRT_MINOR_VERSION"],[6,0,1,"c.TORCH_TENSORRT_PATCH_VERSION","TORCH_TENSORRT_PATCH_VERSION"],[12,0,1,"c.TORCH_TENSORRT_VERSION","TORCH_TENSORRT_VERSION"],[10,0,1,"c.XSTR","XSTR"],[0,1,1,"_CPPv4N14torch_tensorrt8DataTypeE","torch_tensorrt::DataType"],[0,2,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeE5Value","torch_tensorrt::DataType::DataType"],[0,2,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeEN3c1010ScalarTypeE","torch_tensorrt::DataType::DataType"],[0,2,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeEv","torch_tensorrt::DataType::DataType"],[0,3,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeE5Value","torch_tensorrt::DataType::DataType::t"],[0,3,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeEN3c1010ScalarTypeE","torch_tensorrt::DataType::DataType::t"],[0,4,1,"_CPPv4N14torch_tensorrt8DataType5ValueE","torch_tensorrt::DataType::Value"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kBoolE","torch_tensorrt::DataType::Value::kBool"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kCharE","torch_tensorrt::DataType::Value::kChar"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value6kFloatE","torch_tensorrt::DataType::Value::kFloat"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kHalfE","torch_tensorrt::DataType::Value::kHalf"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value4kIntE","torch_tensorrt::DataType::Value::kInt"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value8kUnknownE","torch_tensorrt::DataType::Value::kUnknown"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kBoolE","torch_tensorrt::DataType::kBool"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kCharE","torch_tensorrt::DataType::kChar"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value6kFloatE","torch_tensorrt::DataType::kFloat"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kHalfE","torch_tensorrt::DataType::kHalf"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value4kIntE","torch_tensorrt::DataType::kInt"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value8kUnknownE","torch_tensorrt::DataType::kUnknown"],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypecv5ValueEv","torch_tensorrt::DataType::operator Value"],[0,2,1,"_CPPv4N14torch_tensorrt8DataTypecvbEv","torch_tensorrt::DataType::operator bool"],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeneE8DataType","torch_tensorrt::DataType::operator!="],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeneEN8DataType5ValueE","torch_tensorrt::DataType::operator!="],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeneE8DataType","torch_tensorrt::DataType::operator!=::other"],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeneEN8DataType5ValueE","torch_tensorrt::DataType::operator!=::other"],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeeqE8DataType","torch_tensorrt::DataType::operator=="],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeeqEN8DataType5ValueE","torch_tensorrt::DataType::operator=="],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeeqE8DataType","torch_tensorrt::DataType::operator==::other"],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeeqEN8DataType5ValueE","torch_tensorrt::DataType::operator==::other"],[46,1,1,"_CPPv4N14torch_tensorrt6DeviceE","torch_tensorrt::Device"],[46,2,1,"_CPPv4N14torch_tensorrt6Device6DeviceEv","torch_tensorrt::Device::Device"],[1,1,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypeE","torch_tensorrt::Device::DeviceType"],[46,1,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypeE","torch_tensorrt::Device::DeviceType"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEv","torch_tensorrt::Device::DeviceType::DeviceType"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEv","torch_tensorrt::Device::DeviceType::DeviceType"],[1,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType::t"],[1,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType::t"],[46,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType::t"],[46,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType::t"],[1,4,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5ValueE","torch_tensorrt::Device::DeviceType::Value"],[46,4,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5ValueE","torch_tensorrt::Device::DeviceType::Value"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kDLAE","torch_tensorrt::Device::DeviceType::Value::kDLA"],[46,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kDLAE","torch_tensorrt::Device::DeviceType::Value::kDLA"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kGPUE","torch_tensorrt::Device::DeviceType::Value::kGPU"],[46,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kGPUE","torch_tensorrt::Device::DeviceType::Value::kGPU"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kDLAE","torch_tensorrt::Device::DeviceType::kDLA"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kGPUE","torch_tensorrt::Device::DeviceType::kGPU"],[1,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypecv5ValueEv","torch_tensorrt::Device::DeviceType::operator Value"],[46,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypecv5ValueEv","torch_tensorrt::Device::DeviceType::operator Value"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypecvbEv","torch_tensorrt::Device::DeviceType::operator bool"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypecvbEv","torch_tensorrt::Device::DeviceType::operator bool"],[1,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!="],[46,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!="],[1,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!=::other"],[46,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!=::other"],[1,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator=="],[46,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator=="],[1,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator==::other"],[46,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator==::other"],[46,6,1,"_CPPv4N14torch_tensorrt6Device18allow_gpu_fallbackE","torch_tensorrt::Device::allow_gpu_fallback"],[46,6,1,"_CPPv4N14torch_tensorrt6Device11device_typeE","torch_tensorrt::Device::device_type"],[46,6,1,"_CPPv4N14torch_tensorrt6Device8dla_coreE","torch_tensorrt::Device::dla_core"],[46,6,1,"_CPPv4N14torch_tensorrt6Device6gpu_idE","torch_tensorrt::Device::gpu_id"],[17,4,1,"_CPPv4N14torch_tensorrt16EngineCapabilityE","torch_tensorrt::EngineCapability"],[17,5,1,"_CPPv4N14torch_tensorrt16EngineCapability15kDLA_STANDALONEE","torch_tensorrt::EngineCapability::kDLA_STANDALONE"],[17,5,1,"_CPPv4N14torch_tensorrt16EngineCapability7kSAFETYE","torch_tensorrt::EngineCapability::kSAFETY"],[17,5,1,"_CPPv4N14torch_tensorrt16EngineCapability9kSTANDARDE","torch_tensorrt::EngineCapability::kSTANDARD"],[47,1,1,"_CPPv4N14torch_tensorrt11GraphInputsE","torch_tensorrt::GraphInputs"],[47,6,1,"_CPPv4N14torch_tensorrt11GraphInputs15input_signatureE","torch_tensorrt::GraphInputs::input_signature"],[47,6,1,"_CPPv4N14torch_tensorrt11GraphInputs6inputsE","torch_tensorrt::GraphInputs::inputs"],[48,1,1,"_CPPv4N14torch_tensorrt5InputE","torch_tensorrt::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN2at6TensorE","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEv","torch_tensorrt::Input::Input"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN2at6TensorE","torch_tensorrt::Input::Input::tensor"],[48,6,1,"_CPPv4N14torch_tensorrt5Input5dtypeE","torch_tensorrt::Input::dtype"],[48,6,1,"_CPPv4N14torch_tensorrt5Input6formatE","torch_tensorrt::Input::format"],[48,6,1,"_CPPv4N14torch_tensorrt5Input9max_shapeE","torch_tensorrt::Input::max_shape"],[48,6,1,"_CPPv4N14torch_tensorrt5Input9min_shapeE","torch_tensorrt::Input::min_shape"],[48,6,1,"_CPPv4N14torch_tensorrt5Input9opt_shapeE","torch_tensorrt::Input::opt_shape"],[48,6,1,"_CPPv4N14torch_tensorrt5Input5shapeE","torch_tensorrt::Input::shape"],[2,1,1,"_CPPv4N14torch_tensorrt12TensorFormatE","torch_tensorrt::TensorFormat"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatE5Value","torch_tensorrt::TensorFormat::TensorFormat"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatEN2at12MemoryFormatE","torch_tensorrt::TensorFormat::TensorFormat"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatEv","torch_tensorrt::TensorFormat::TensorFormat"],[2,3,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatE5Value","torch_tensorrt::TensorFormat::TensorFormat::t"],[2,3,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatEN2at12MemoryFormatE","torch_tensorrt::TensorFormat::TensorFormat::t"],[2,4,1,"_CPPv4N14torch_tensorrt12TensorFormat5ValueE","torch_tensorrt::TensorFormat::Value"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value13kChannelsLastE","torch_tensorrt::TensorFormat::Value::kChannelsLast"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value11kContiguousE","torch_tensorrt::TensorFormat::Value::kContiguous"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value8kUnknownE","torch_tensorrt::TensorFormat::Value::kUnknown"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value13kChannelsLastE","torch_tensorrt::TensorFormat::kChannelsLast"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value11kContiguousE","torch_tensorrt::TensorFormat::kContiguous"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value8kUnknownE","torch_tensorrt::TensorFormat::kUnknown"],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormatcv5ValueEv","torch_tensorrt::TensorFormat::operator Value"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormatcvbEv","torch_tensorrt::TensorFormat::operator bool"],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormatneE12TensorFormat","torch_tensorrt::TensorFormat::operator!="],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormatneEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator!="],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormatneE12TensorFormat","torch_tensorrt::TensorFormat::operator!=::other"],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormatneEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator!=::other"],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormateqE12TensorFormat","torch_tensorrt::TensorFormat::operator=="],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormateqEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator=="],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormateqE12TensorFormat","torch_tensorrt::TensorFormat::operator==::other"],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormateqEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator==::other"],[36,2,1,"_CPPv4N14torch_tensorrt15dump_build_infoEv","torch_tensorrt::dump_build_info"],[34,2,1,"_CPPv4N14torch_tensorrt14get_build_infoEv","torch_tensorrt::get_build_info"],[16,4,1,"_CPPv4N14torch_tensorrt7logging5LevelE","torch_tensorrt::logging::Level"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kDEBUGE","torch_tensorrt::logging::Level::kDEBUG"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kERRORE","torch_tensorrt::logging::Level::kERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kGRAPHE","torch_tensorrt::logging::Level::kGRAPH"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level5kINFOE","torch_tensorrt::logging::Level::kINFO"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level15kINTERNAL_ERRORE","torch_tensorrt::logging::Level::kINTERNAL_ERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level8kWARNINGE","torch_tensorrt::logging::Level::kWARNING"],[24,2,1,"_CPPv4N14torch_tensorrt7logging24get_is_colored_output_onEv","torch_tensorrt::logging::get_is_colored_output_on"],[22,2,1,"_CPPv4N14torch_tensorrt7logging18get_logging_prefixEv","torch_tensorrt::logging::get_logging_prefix"],[23,2,1,"_CPPv4N14torch_tensorrt7logging24get_reportable_log_levelEv","torch_tensorrt::logging::get_reportable_log_level"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kDEBUGE","torch_tensorrt::logging::kDEBUG"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kERRORE","torch_tensorrt::logging::kERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kGRAPHE","torch_tensorrt::logging::kGRAPH"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level5kINFOE","torch_tensorrt::logging::kINFO"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level15kINTERNAL_ERRORE","torch_tensorrt::logging::kINTERNAL_ERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level8kWARNINGE","torch_tensorrt::logging::kWARNING"],[26,2,1,"_CPPv4N14torch_tensorrt7logging3logE5LevelNSt6stringE","torch_tensorrt::logging::log"],[26,3,1,"_CPPv4N14torch_tensorrt7logging3logE5LevelNSt6stringE","torch_tensorrt::logging::log::lvl"],[26,3,1,"_CPPv4N14torch_tensorrt7logging3logE5LevelNSt6stringE","torch_tensorrt::logging::log::msg"],[27,2,1,"_CPPv4N14torch_tensorrt7logging24set_is_colored_output_onEb","torch_tensorrt::logging::set_is_colored_output_on"],[27,3,1,"_CPPv4N14torch_tensorrt7logging24set_is_colored_output_onEb","torch_tensorrt::logging::set_is_colored_output_on::colored_output_on"],[28,2,1,"_CPPv4N14torch_tensorrt7logging18set_logging_prefixENSt6stringE","torch_tensorrt::logging::set_logging_prefix"],[28,3,1,"_CPPv4N14torch_tensorrt7logging18set_logging_prefixENSt6stringE","torch_tensorrt::logging::set_logging_prefix::prefix"],[25,2,1,"_CPPv4N14torch_tensorrt7logging24set_reportable_log_levelE5Level","torch_tensorrt::logging::set_reportable_log_level"],[25,3,1,"_CPPv4N14torch_tensorrt7logging24set_reportable_log_levelE5Level","torch_tensorrt::logging::set_reportable_log_level::lvl"],[3,1,1,"_CPPv4I0EN14torch_tensorrt3ptq19Int8CacheCalibratorE","torch_tensorrt::ptq::Int8CacheCalibrator"],[3,7,1,"_CPPv4I0EN14torch_tensorrt3ptq19Int8CacheCalibratorE","torch_tensorrt::ptq::Int8CacheCalibrator::Algorithm"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE","torch_tensorrt::ptq::Int8CacheCalibrator::Int8CacheCalibrator"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE","torch_tensorrt::ptq::Int8CacheCalibrator::Int8CacheCalibrator::cache_file_path"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch::bindings"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch::names"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch::nbBindings"],[3,2,1,"_CPPv4NK14torch_tensorrt3ptq19Int8CacheCalibrator12getBatchSizeEv","torch_tensorrt::ptq::Int8CacheCalibrator::getBatchSize"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibratorcvPN8nvinfer115IInt8CalibratorEEv","torch_tensorrt::ptq::Int8CacheCalibrator::operator nvinfer1::IInt8Calibrator*"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::readCalibrationCache"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::readCalibrationCache::length"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::writeCalibrationCache"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::writeCalibrationCache::cache"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::writeCalibrationCache::length"],[4,1,1,"_CPPv4I00EN14torch_tensorrt3ptq14Int8CalibratorE","torch_tensorrt::ptq::Int8Calibrator"],[4,7,1,"_CPPv4I00EN14torch_tensorrt3ptq14Int8CalibratorE","torch_tensorrt::ptq::Int8Calibrator::Algorithm"],[4,7,1,"_CPPv4I00EN14torch_tensorrt3ptq14Int8CalibratorE","torch_tensorrt::ptq::Int8Calibrator::DataLoaderUniquePtr"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator::cache_file_path"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator::dataloader"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator::use_cache"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch::bindings"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch::names"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch::nbBindings"],[4,2,1,"_CPPv4NK14torch_tensorrt3ptq14Int8Calibrator12getBatchSizeEv","torch_tensorrt::ptq::Int8Calibrator::getBatchSize"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8CalibratorcvPN8nvinfer115IInt8CalibratorEEv","torch_tensorrt::ptq::Int8Calibrator::operator nvinfer1::IInt8Calibrator*"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8Calibrator::readCalibrationCache"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8Calibrator::readCalibrationCache::length"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8Calibrator::writeCalibrationCache"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8Calibrator::writeCalibrationCache::cache"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8Calibrator::writeCalibrationCache::length"],[29,2,1,"_CPPv4I0EN14torch_tensorrt3ptq26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE","torch_tensorrt::ptq::make_int8_cache_calibrator"],[29,7,1,"_CPPv4I0EN14torch_tensorrt3ptq26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE","torch_tensorrt::ptq::make_int8_cache_calibrator::Algorithm"],[29,3,1,"_CPPv4I0EN14torch_tensorrt3ptq26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE","torch_tensorrt::ptq::make_int8_cache_calibrator::cache_file_path"],[30,2,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator"],[30,7,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::Algorithm"],[30,7,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::DataLoader"],[30,3,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::cache_file_path"],[30,3,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::dataloader"],[30,3,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::use_cache"],[35,2,1,"_CPPv4N14torch_tensorrt10set_deviceEKi","torch_tensorrt::set_device"],[35,3,1,"_CPPv4N14torch_tensorrt10set_deviceEKi","torch_tensorrt::set_device::gpu_id"],[49,1,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpecE","torch_tensorrt::torchscript::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecEN5torch3jit6IValueE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorI5InputEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorIN3c108ArrayRefI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorINSt6vectorI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorIN3c108ArrayRefI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::fixed_sizes"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorINSt6vectorI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::fixed_sizes"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecEN5torch3jit6IValueE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::input_signature"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorI5InputEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::inputs"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec10capabilityE","torch_tensorrt::torchscript::CompileSpec::capability"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec5debugE","torch_tensorrt::torchscript::CompileSpec::debug"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec6deviceE","torch_tensorrt::torchscript::CompileSpec::device"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec12disable_tf32E","torch_tensorrt::torchscript::CompileSpec::disable_tf32"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec20dla_global_dram_sizeE","torch_tensorrt::torchscript::CompileSpec::dla_global_dram_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec19dla_local_dram_sizeE","torch_tensorrt::torchscript::CompileSpec::dla_local_dram_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec13dla_sram_sizeE","torch_tensorrt::torchscript::CompileSpec::dla_sram_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec18enabled_precisionsE","torch_tensorrt::torchscript::CompileSpec::enabled_precisions"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec12graph_inputsE","torch_tensorrt::torchscript::CompileSpec::graph_inputs"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14min_block_sizeE","torch_tensorrt::torchscript::CompileSpec::min_block_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec20num_avg_timing_itersE","torch_tensorrt::torchscript::CompileSpec::num_avg_timing_iters"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14ptq_calibratorE","torch_tensorrt::torchscript::CompileSpec::ptq_calibrator"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec5refitE","torch_tensorrt::torchscript::CompileSpec::refit"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec24require_full_compilationE","torch_tensorrt::torchscript::CompileSpec::require_full_compilation"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14sparse_weightsE","torch_tensorrt::torchscript::CompileSpec::sparse_weights"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec22torch_executed_modulesE","torch_tensorrt::torchscript::CompileSpec::torch_executed_modules"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec18torch_executed_opsE","torch_tensorrt::torchscript::CompileSpec::torch_executed_ops"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec24truncate_long_and_doubleE","torch_tensorrt::torchscript::CompileSpec::truncate_long_and_double"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14workspace_sizeE","torch_tensorrt::torchscript::CompileSpec::workspace_size"],[31,2,1,"_CPPv4N14torch_tensorrt11torchscript29check_method_operator_supportERKN5torch3jit6ModuleENSt6stringE","torch_tensorrt::torchscript::check_method_operator_support"],[31,3,1,"_CPPv4N14torch_tensorrt11torchscript29check_method_operator_supportERKN5torch3jit6ModuleENSt6stringE","torch_tensorrt::torchscript::check_method_operator_support::method_name"],[31,3,1,"_CPPv4N14torch_tensorrt11torchscript29check_method_operator_supportERKN5torch3jit6ModuleENSt6stringE","torch_tensorrt::torchscript::check_method_operator_support::module"],[32,2,1,"_CPPv4N14torch_tensorrt11torchscript7compileERKN5torch3jit6ModuleE11CompileSpec","torch_tensorrt::torchscript::compile"],[32,3,1,"_CPPv4N14torch_tensorrt11torchscript7compileERKN5torch3jit6ModuleE11CompileSpec","torch_tensorrt::torchscript::compile::info"],[32,3,1,"_CPPv4N14torch_tensorrt11torchscript7compileERKN5torch3jit6ModuleE11CompileSpec","torch_tensorrt::torchscript::compile::module"],[37,2,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine"],[37,3,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine::info"],[37,3,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine::method_name"],[37,3,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine::module"],[33,2,1,"_CPPv4N14torch_tensorrt11torchscript26embed_engine_in_new_moduleERKNSt6stringE6Device","torch_tensorrt::torchscript::embed_engine_in_new_module"],[33,3,1,"_CPPv4N14torch_tensorrt11torchscript26embed_engine_in_new_moduleERKNSt6stringE6Device","torch_tensorrt::torchscript::embed_engine_in_new_module::device"],[33,3,1,"_CPPv4N14torch_tensorrt11torchscript26embed_engine_in_new_moduleERKNSt6stringE6Device","torch_tensorrt::torchscript::embed_engine_in_new_module::engine"],[69,8,0,"-","torch_tensorrt"]],"torch_tensorrt.Device":[[69,10,1,"","__init__"],[69,11,1,"","allow_gpu_fallback"],[69,11,1,"","device_type"],[69,11,1,"","dla_core"],[69,11,1,"","gpu_id"]],"torch_tensorrt.Input":[[69,10,1,"","__init__"],[69,11,1,"","dtype"],[69,11,1,"","format"],[69,11,1,"","shape"],[69,11,1,"","shape_mode"]],"torch_tensorrt.fx":[[66,9,1,"","InputTensorSpec"],[66,9,1,"","TRTInterpreter"],[66,9,1,"","TRTInterpreterResult"],[66,9,1,"","TRTModule"]],"torch_tensorrt.logging":[[67,9,1,"","Level"],[67,9,1,"","debug"],[67,9,1,"","errors"],[67,12,1,"","get_is_colored_output_on"],[67,12,1,"","get_logging_prefix"],[67,12,1,"","get_reportable_log_level"],[67,9,1,"","graphs"],[67,9,1,"","info"],[67,9,1,"","internal_errors"],[67,12,1,"","log"],[67,12,1,"","set_is_colored_output_on"],[67,12,1,"","set_logging_prefix"],[67,12,1,"","set_reportable_log_level"],[67,9,1,"","warnings"]],"torch_tensorrt.logging.Level":[[67,11,1,"","Debug"],[67,11,1,"","Error"],[67,11,1,"","Graph"],[67,11,1,"","Info"],[67,11,1,"","InternalError"],[67,11,1,"","Warning"]],"torch_tensorrt.ptq":[[68,9,1,"id1","CacheCalibrator"],[68,9,1,"id2","CalibrationAlgo"],[68,9,1,"id0","DataLoaderCalibrator"],[68,12,1,"","get_batch"],[68,12,1,"","get_batch_size"],[68,12,1,"","get_cache_mode_batch"],[68,12,1,"","read_calibration_cache"],[68,12,1,"","write_calibration_cache"]],"torch_tensorrt.ptq.CacheCalibrator":[[68,10,1,"","__init__"]],"torch_tensorrt.ptq.CalibrationAlgo":[[68,11,1,"","ENTROPY_CALIBRATION"],[68,11,1,"","ENTROPY_CALIBRATION_2"],[68,11,1,"","LEGACY_CALIBRATION"],[68,11,1,"","MINMAX_CALIBRATION"]],"torch_tensorrt.ptq.DataLoaderCalibrator":[[68,10,1,"","__init__"]],"torch_tensorrt.ts":[[70,12,1,"","TensorRTCompileSpec"],[70,12,1,"","check_method_op_support"],[70,12,1,"","compile"],[70,12,1,"","convert_method_to_trt_engine"],[70,12,1,"","embed_engine_in_new_module"]],torch_tensorrt:[[69,9,1,"","Device"],[69,9,1,"","DeviceType"],[69,9,1,"","EngineCapability"],[69,9,1,"","Input"],[69,9,1,"","TensorFormat"],[69,12,1,"","compile"],[69,12,1,"","convert_method_to_trt_engine"],[69,9,1,"","dtype"],[69,12,1,"","dump_build_info"],[66,8,0,"-","fx"],[69,12,1,"","get_build_info"],[67,8,0,"-","logging"],[68,8,0,"-","ptq"],[69,12,1,"","set_device"],[70,8,0,"-","ts"]]},objnames:{"0":["c","macro","C macro"],"1":["cpp","class","C++ class"],"10":["py","method","Python method"],"11":["py","attribute","Python attribute"],"12":["py","function","Python function"],"2":["cpp","function","C++ function"],"3":["cpp","functionParam","C++ function parameter"],"4":["cpp","enum","C++ enum"],"5":["cpp","enumerator","C++ enumerator"],"6":["cpp","member","C++ member"],"7":["cpp","templateParam","C++ template parameter"],"8":["py","module","Python module"],"9":["py","class","Python class"]},objtypes:{"0":"c:macro","1":"cpp:class","10":"py:method","11":"py:attribute","12":"py:function","2":"cpp:function","3":"cpp:functionParam","4":"cpp:enum","5":"cpp:enumerator","6":"cpp:member","7":"cpp:templateParam","8":"py:module","9":"py:class"},terms:{"0":[33,43,44,45,49,52,58,60,61,63,65,67,68,69,70,71,73,74,81,83,85,86,87],"0000":75,"01":[61,65,75],"0208":61,"03":75,"0358":61,"0383":61,"04":[61,85],"0435":61,"0464":61,"0530":61,"0678":61,"0805":61,"0818":61,"0932":61,"0a0":74,"0x7f89ba6d5eb0":70,"1":[3,4,33,43,44,45,48,49,52,54,55,57,60,61,62,63,65,66,67,68,69,70,71,72,74,75,78,80,81,82,83,86,87],"10":[49,61,63,70,78,80,82,83,85],"100":[66,81],"1000":85,"1012":54,"1013":54,"1024":[52,69,70,82],"1045":61,"1048576":[45,49,70],"1056":61,"1063":61,"1073741824":[45,49,70],"109":61,"11":[54,61,63,74,78,85],"119":80,"12":[54,61,74,78,80,85],"120":[61,80],"123":75,"129":80,"13":[74,78],"136":85,"137":80,"138":80,"14":[78,85],"1409":83,"15":[74,78],"1502":61,"1549":61,"1556":83,"16":[61,62,69,78,80],"1691":61,"17":78,"18":[61,78],"19":[75,78],"1994":83,"1d":54,"1e":52,"2":[33,43,55,60,61,63,65,67,68,69,70,72,74,75,78,80,81,83],"20":78,"2009":83,"2010":83,"2012":75,"2014":83,"2020":[61,64],"2023":83,"22":85,"224":[55,66,69,70,82,85],"225":[66,85],"229":85,"23":[49,54,70,75],"234375":85,"24":54,"244":[69,70],"248":54,"249":54,"25":[61,66,81],"256":85,"258":74,"27":61,"28":61,"2802":61,"2822":74,"287":74,"29":61,"2c3":75,"3":[45,49,52,54,55,57,61,63,65,67,68,69,70,74,75,78,80,81,82,83,86,87],"300":[52,86],"31":61,"32":[52,61,62,69,80,83,87],"320":83,"32bit":52,"33":61,"346":61,"35":61,"36":61,"3677":54,"37":61,"38":80,"39":80,"3d":81,"4":[57,61,63,65,67,72,74,75,78,81],"406":85,"429688":85,"4465":83,"456":85,"468750":85,"4822":83,"485":85,"4914":83,"5":[52,57,58,61,63,67,69,74,75,78,80,81,85],"50":82,"512":[52,69,70,82],"51a991":74,"523438":85,"53":75,"536870912":[45,49,70],"539":61,"56":61,"576":61,"6":[54,57,61,63,65,78,80],"622":54,"64":[62,81],"64bit":52,"664062":85,"7":[57,58,61,78],"72048":63,"7302":75,"8":[3,52,54,61,63,69,74,75,78,85],"8000":85,"8001":85,"8002":85,"84":[61,80],"9":[61,78,85],"90":85,"92":85,"9223372036854775807":65,"abstract":[57,60,75],"boolean":[69,81],"break":[74,81],"byte":[69,70,82],"case":[0,1,2,46,49,53,57,60,63,81,83,84],"catch":[54,61],"char":[3,4,44,52,61],"class":[17,29,30,44,45,46,51,57,60,61,62,67,74,75,80,81,82,83],"const":[0,1,2,3,4,29,30,31,32,33,35,37,44,45,46,54,60,61,65,83],"default":[0,1,2,3,4,16,29,30,43,45,46,48,49,52,55,61,62,63,66,69,70,72,73,74,81,83,86],"do":[53,54,55,60,61,62,73,75,80,81,83,87],"enum":[0,1,2,42,45,46,51,67,70,83],"export":63,"final":[53,56,58,63,82],"float":[49,52,61,62,65,69,80,83,86],"function":[0,1,2,3,4,46,48,49,51,54,55,57,60,61,63,80,81,82,83,85,86,87],"import":[52,54,55,61,62,63,72,74,80,81,84,85,86],"int":[0,3,4,35,44,45,49,52,61,65,66,69,70,72],"long":[49,52,53,74,75],"new":[0,1,2,3,4,32,33,46,48,49,57,58,60,61,67,70,74,81,85],"public":[0,1,2,3,4,44,45,46,47,48,49,75,83],"return":[0,1,2,3,4,23,24,29,30,31,32,33,34,37,42,43,44,45,46,54,56,57,58,60,61,62,67,69,70,80,81,83,85],"short":[54,74,75],"static":[48,49,53,60,61,69,70,72],"super":[44,80],"throw":[52,54,61],"true":[0,1,2,4,46,49,54,55,60,61,65,66,69,70,72,75,81,83,85,86,87],"try":[58,61,74,75,86],"var":65,"void":[3,4,25,26,27,28,35,36,42,44,45],"while":[63,82,83,85],A:[4,29,30,32,33,47,48,54,55,60,63,70,75,83,85],And:61,As:[61,81],At:73,But:[61,74],By:[29,30,51,55,72,80],For:[53,55,61,63,66,72,74,75,80,81,82,83,84,85,86],If:[27,53,54,61,62,63,66,67,69,72,74,81,83,84,85,87],In:[0,1,2,46,53,56,57,58,60,62,63,64,74,75,77,81,82,83,84,85],Is:[24,69],It:[52,54,55,56,58,60,63,72,74,81,82],Its:[60,74],Not:3,On:55,One:[61,74,75,81,82],Or:74,THE:74,TO:61,That:74,Thats:61,The:[1,46,48,49,52,53,54,55,56,57,58,60,62,63,67,69,70,72,75,80,81,82,83,85,86],Then:[55,63,83,86],There:[4,53,58,60,63,75,80,81,82,83,84,85],These:[53,57,72,74,83,85],To:[1,46,52,55,61,62,63,72,80,85,86],Will:31,With:[61,72,74,83,85],_:[74,81],___torch_mangle_10:80,___torch_mangle_4847:57,___torch_mangle_5:80,___torch_mangle_9:80,__and__:65,__attribute__:43,__getitem__:65,__gnuc__:43,__init__:[68,69,74,80],__is__:65,__isnot__:65,__not__:65,__or__:65,__range_length:65,__round_to_zero_floordiv:65,__torch__:[57,61,80],__torch___pytorch_detection_ssd_src_model_ssd300_trt_engin:57,__torch___torchvision_models_resnet____torch_mangle_4847_resnet_trt_engin:57,__visibility__:43,__xor__:65,_all_:54,_c:[70,86],_convolut:[61,65],_jit_to_backend:86,_script:70,_shapemod:69,_theme:79,_validate_not_a_forked_repo:85,a1b:75,aarch64:58,ab:65,abi:84,abl:[53,54,60,64,81,83,86],about:[52,53,57,60,61,63,69,72,85],abov:[25,61,63,67,73,74,81],absolut:52,ac:77,acc_mod:81,acc_norm:81,acc_op:81,acc_op_convert:81,acc_ops_sigmoid:81,acc_trac:81,acceler:87,accept:[48,52,57,60,61,62,69],access:[54,60,61,64,72,81,86],accord:[60,70],accordingli:[72,81],account:85,accumsan:77,accumul:[49,70],accuraci:[82,83],achiev:82,aco:65,acosh:65,acoust:82,acquir:61,across:[49,52,54,70,72],acthardtanh:60,action:[74,81],activ:[61,70,74,81,82,83,87],activationtyp:[60,81],actual:[54,57,60,61,67,80,81],ad:[25,52,53,81],adaptive_avg_pool1d:65,adaptive_avg_pool2d:65,adaptive_avg_pool3d:65,adaptive_max_pool1d:65,adaptive_max_pool2d:65,adaptive_max_pool3d:65,add:[26,53,54,55,60,61,62,63,65,67,72,74,79],add_:[54,61,65],add_activ:81,addactiv:60,addit:[54,61,69,81,82],addlay:61,address:75,addshuffl:61,adipisc:[75,77],adjac:74,adjust:74,adopt:82,advanc:[75,83],advis:74,aenean:77,afford:81,aforement:85,after:[52,53,54,55,61,62,64,80,81,84,85],again:[44,57,60,74],against:[52,61],agx:45,ahead:61,aim:54,algo_typ:[68,83],algorithm:[3,4,29,30,44,68,81,83],algorithm_selector:81,alias:43,align:74,align_corn:65,aliquam:77,aliquet:[75,77],all:[16,42,43,44,45,49,52,54,55,57,61,62,63,67,69,74,75,80,81,82,83,84,85],alloc:60,allow:[48,49,52,53,54,69,72,81],allow_gpu_fallback:[45,46,69,70,83,86,87],allow_tf32:65,almost:61,alpha:[65,75,81],alreadi:[52,53,54,61,83],also:[29,53,60,61,62,63,64,72,74,75,82,83],altern:[48,82],although:74,altogeth:[55,72],alwai:[3,4,27,52,74],amet:[75,77],an:[2,3,4,48,49,52,53,54,55,56,57,58,60,61,62,63,64,68,69,70,72,74,75,80,81,82,83,84,85],analogu:60,analysi:55,analyt:72,analytics_id:72,ancient:74,ani:[48,52,53,60,61,62,63,69,72,74,81,83],ann:74,annot:[60,61],anonym:74,anoth:[62,74,75,80],ant:77,anyon:75,anyth:[74,75,84],aot:[61,64],api:[55,58,60,61,62,69,70,73,81,82,83,84,85,86],appear:74,append:65,appli:83,applic:[1,29,46,52,54,58,61,62,84,86,87],apr:61,ar:[42,46,49,52,53,54,55,57,58,60,61,63,64,69,70,72,74,75,76,80,81,82,83,84,85,86],arab:75,arang:65,architectur:[63,64,82],archiv:63,arcu:[75,77],area:76,aren:61,arg:[53,61,68,69,78,81,82],arg_replacement_tupl:81,argc:61,argument:[48,52,54,57,60,61,69,70,74,75,81],argv:61,around:[54,57,60,74,77,80],arrai:[3,4,33,53,70],arrayref:[45,48,49],arxiv:83,as_numpi:85,asin:65,asinh:65,aspect:52,assembl:[53,61],assign:[3,4,73],associ:[53,60,61],associatevalueandivalu:60,associatevalueandtensor:[60,61],assum:86,atan:65,atanh:65,aten:[49,54,55,59,60,61,65,70],atol:52,attrdict:85,attribut:[54,55,57,61,74,81],auctor:77,audio:82,augu:77,author:75,auto:[44,55,60,61,74,75,83,87],autodoc:[74,75],automat:[61,74],avail:[52,60,63,72,81,87],averag:[49,52,70],avg:52,avg_pool1d:65,avg_pool2d:65,avg_pool3d:65,awai:74,awaken:74,axi:65,b0:82,b:[63,65,75,85],b_hh:65,b_ih:65,back:[54,55,57,58,61,69,74,80],back_insert:44,backend:[70,73,86],background:[74,80],backlink:74,backward:81,bar:[72,74],base:[36,50,57,63,67,68,69,74,80,82,83],bash:63,basi:74,basic:[52,75,81,85],batch:[3,4,44,66,81,83,85,87],batch_norm:[60,65],batch_siz:[44,83],batched_data_:44,batchnorm:54,batchtyp:44,bathroom:74,bazel:[58,63],bazel_vers:63,bazelbuild:63,bazelisk:63,bazelvers:63,bdist_wheel:63,beat:75,becaus:[60,61,63,66,80,81],becom:60,bee:74,been:[53,60,61,75],befor:[49,54,58,60,61,63,64,70,81,85],beforehand:61,begin:[44,63,74,81],beginn:80,begun:74,behav:76,behavior:[49,69,70,81],behind:74,being:[61,81],belong:74,below:[60,61,63,74,81,85],benchmark:65,benefit:[60,61],besid:74,best:[63,74,81],beta:[65,70,81],better:[80,82],between:[54,60,63,74,75,83],bia:[54,61,65],bibendum:77,bibliograph:75,bigger:74,bin:63,binari:[44,83],binary_data:85,bind:[3,4,33,44,70,74],bird:85,bit:[49,60,61,69,70,81],bitbucket:72,bitbucket_url:72,blandit:77,blank:74,blob:[59,72,83],block0:54,block1:54,block:[52,53,54,78],blue:74,bmm:65,bodi:[74,75],bold:74,bool:[0,1,2,3,4,24,27,30,31,42,44,45,46,49,54,60,61,65,66,67,69,70,72,83],border:74,both:[63,72,74,80,83],bottom:72,box:74,bracket:74,branch:63,bread:74,brief:55,briefli:80,brontosaurus:74,browser:74,bsd:[42,43,44,45],buffer:[3,4,81],bug:63,bui:75,build:[29,30,34,49,52,53,56,58,60,61,69,73,78,81,83],build_fil:63,build_model:81,builder:81,builderconfig:45,built:[33,52,57,58,63,70],builtin:81,button:[72,74],bytearrai:81,c10:[0,1,45,46,48,49,61,83],c:[42,43,44,45,52,58,62,65,75,84,85,87],c_api:59,c_str:[60,61],cach:[3,4,29,30,44,52,61,68,81,83],cache_:44,cache_fil:[44,68,83],cache_file_path:[3,4,29,30,44],cache_file_path_:44,cache_size_:44,cachecalibr:[68,83],cackl:75,calcul:[48,53,55,61],calibr:[3,4,29,30,44,49,52,61,68,70,83],calibration_cache_fil:[29,30,83],calibration_dataload:[30,83],calibration_dataset:83,calibrationalgo:[68,83],call:[29,30,32,49,54,57,60,61,70,74,80,81,82,86],call_funct:81,callmethod:80,can:[0,1,4,29,30,37,46,47,48,49,52,53,54,55,56,57,58,60,61,62,63,69,70,72,74,80,81,82,83,84,85,86],canada:75,cannot:[48,54,55,63,69,70,73,80,81],canon:72,canonical_url:72,capabl:[17,45,49,52,57,69,70,86],capit:74,caption:[74,77],cast:[3,4,54],cat:[63,65],caught:54,caus:[60,63,72],cd:[63,85],cdll:61,ceil:65,ceil_mod:65,cell:75,centercrop:85,cerr:61,certain:[63,81],cfg:55,chain:60,challeng:85,chanc:60,chang:[29,54,58,70,72,81,83,85],changelog:78,channel:[2,69,73],channel_last:[69,70,82],channels_last:69,charact:74,check:[0,1,31,46,52,54,60,61,63,70,81,84,85],check_method_op_support:70,check_method_operator_support:[21,41,45,50],checkmethodoperatorsupport:61,child:75,children:81,choic:[63,68],choos:[80,81],cifar10:83,cifar:83,clamp:65,clamp_max:65,clamp_min:65,class_count:85,classif:[61,80,82],classifi:[75,82],classification_index:85,clean:74,clear:44,cli:[52,62],clickabl:74,clone:65,close:61,closer:54,closet:74,cnn:82,co:[65,75,82],code:[55,58,61,64,73,75,80,81,83],coeffici:82,collapse_navig:72,collat:75,collect:[61,70],colon:74,color:[24,27,67,74],colored_output_on:[27,42,67],column:75,com:[59,61,63,83,84,85],combin:81,come:[63,73,81,85],command:[52,61,63,74,75,80,85],comment:[63,74],commodo:77,common:[53,54,66,74,81],common_subexpression_elimin:54,commonli:75,commun:[49,52,61,70],compar:[62,81],comparis:[0,2],comparison:[1,46],compat:[0,1,46,54,57,63,70,81],compil:[21,31,37,41,45,49,50,52,54,55,57,60,62,67,69,70,72,80,81,83,84,85,86,87],compile_spec:[83,87],compilegraph:[61,83],compilesepc:33,compilespec:[3,4,21,32,37,41,45,50,55,61,83,87],compilespecstruct:50,complet:[55,61,80],complex:[47,49,63,80],complianc:52,compliat:83,complic:63,compon:[56,58,80,84],compos:[80,81,83,85],composit:61,compound:82,comput:[49,74,81,82,83],conceiv:74,concorr:85,condimentum:77,condit:74,conf:[72,79],confidence_scor:85,config:[63,85],configur:[32,37,48,61,63,64,69,70,78,83,85],congu:77,connect:[54,70,74,85,87],consectetur:[75,77],consecut:55,consid:[61,70],consider:85,consist:[54,74,81],consol:52,consolid:80,constant:[53,54,61],constant_pad_nd:65,constexpr:[0,1,2,45,46],construct:[0,1,2,3,4,46,48,49,53,54,56,58,60,61,68,69,74,75,81,83],constructor:[0,2,46,48,49,57,80],consult:73,consum:[4,53,80],contact:75,contain:[30,31,52,53,54,60,61,63,66,69,74,75,80,81,83,84,85],content:[78,83,85],context:[53,56,57,58,67],contextnet:82,contigu:[2,48,49,52,69,70],continu:[74,81,84],contributor:61,control:[80,81],conv1:[61,80],conv2:[61,80],conv2d:80,conv:[49,52,61],conval:77,convect:48,conveni:[82,83],converison:81,convers:[54,55,57,61,69,70,81],conversionctx:[60,61],convert:[3,4,31,32,37,52,54,55,56,58,62,64,69,70,82,84,86],convert_method_to_trt_engin:[21,41,45,50,69,70,86],convertgraphtotrtengin:61,convien:49,convienc:[3,4,49],convolut:[70,83,87],convtert:81,coordin:58,copi:[44,60,65,68,75,81,85],copy_:65,copyright:[42,43,44,45,61,75],core:[45,52,54,55,58,61,69,87],core_id:69,corpor:[42,43,44,45],corpu:82,correct:[57,63,72],correctli:63,correspond:[60,63,81],cosh:65,could:81,count_include_pad:65,coupl:[53,58,81,84],cout:61,cover:82,cp:63,cpp:[14,15,42,43,44,45,51,54,58,61,63,83],cpp_frontend:83,cppdirectori:50,cppdoc:61,cpu:66,cra:77,creat:[29,30,33,52,53,57,60,61,64,70,74,81,85],credit:61,criteria:[55,56,58],cross:74,cs:83,csrc:[54,59],cstddef:83,ctx:[60,61],ctype:61,cuda113:63,cuda:[49,57,61,62,63,66,69,83,85,86],cuda_graph_batch_s:66,cuda_runtim:[21,45],cudafloattyp:61,cudasetdevic:35,cudnn_en:65,cumsum:65,curabitur:77,curl:[63,74],current:[23,57,60,63,70,72,81],cursu:77,custom:[52,63,81],custom_class:[21,45],custom_mapp:81,customclasshold:[45,48],cut:74,cxx11:84,d:[52,74,75,87],dapibu:77,data:[0,2,3,4,29,30,44,46,48,49,52,53,55,56,58,60,65,68,69,70,74,78,82,83],data_dir:83,data_item_1:73,data_typ:85,dataclass:81,dataflow:[60,61],dataload:[4,29,30,44,49,68,83],dataloader_:44,dataloadercalibr:[68,83],dataloaderopt:83,dataloaderuniqueptr:[4,44],dataset:[29,68,82,83],datatyp:[1,21,38,45,46,48,49,50,62,69,70,85],datatypeclass:50,date:75,david:75,dbg:63,dcmake_build_typ:63,dcmake_module_path:63,dead_code_elimin:54,deal:60,debug:[16,27,45,49,52,60,67,70,86],debugg:[52,70],decid:69,declar:63,deconvolut:87,decor:81,dedic:[54,75],deep:[60,64,72,83,87],deeplearn:[59,81],def:[74,80,81,85],defin:[0,1,2,3,4,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,43,46,47,48,49,51,52,61,62,69,72,80,81,82,83],definit:[51,60,74],deiti:74,delet:[0,1,2,45,46,54],delimit:54,demo:[74,83],demonstr:[74,75,76,82,83,85],demostr:82,denot:74,dep:63,depend:[29,34,53,55,58,61,62,81,84,85],depickl:57,deploi:[56,58,61,64,83,85],deploy:[52,61,62,82,83,84,85,87],deprec:[65,81],depth:[72,82],descclassnam:74,descnam:74,describ:[49,60,80,85,86],descript:[55,75],deseri:[61,69,70],design:[81,82,87],desir:[75,83],destini:75,destroi:[60,75],destructor:60,detail:[61,80,81,84,85],detect:[48,57],determin:[54,81],determinist:65,develop:[61,63,64,74,75,81],deviat:52,devic:[21,33,35,38,45,49,50,52,57,62,65,66,68,69,70,82,83,86,87],device_typ:[45,46,69,83,86,87],deviceclass:50,devicetyp:[21,38,45,46,50,69,83,86,87],devicetypestruct:50,diam:77,dict:[69,70],dictionari:[69,70,86],dictum:77,dictumst:77,did:74,didn:74,differ:[29,54,58,63,64,72,80,81],dignissim:77,dilat:65,dim0:65,dim1:65,dim:[65,85],dim_int:65,dim_intlist:65,dimens:[48,54,66,81,82],direct:[78,84],directli:[60,63,64,68,83],directori:[18,19,20,21,42,43,44,45,50,63,83],disabl:[52,67,72,73],disable_tf32:[45,49,70,83],disclos:63,disconnect:74,discret:74,discuss:85,displai:[52,67,72],display_github:72,display_gitlab:72,display_vers:72,dist:63,distdir:63,distribut:[61,69,83,84],div:65,div_:65,divisor_overrid:65,django:73,dl:74,dl_open:84,dla:[1,45,46,49,52,64,69,70],dla_cor:[45,46,52,69,83,86,87],dla_global_dram_s:[45,49,52,70],dla_local_dram_s:[45,49,52,70],dla_sram_s:[45,49,52,70],dla_standalon:52,dlacor:52,dll:52,doc:[58,59,63,72,73,74,79],docker:85,docsrc:58,docstr:[74,75],document:[42,43,44,45,50,58,61,72,74,75,79,80,83,84,85,86],docutil:[74,75],doe:[43,44,54,55,60,74,81,83],doesn:[61,63,74,80],dolor:[75,77],domain:[75,83],don:[60,72,74,75,81,83,85],done:[53,55,58,85],donec:[75,77],dont:42,dothismethod:74,dotpai:73,dotpayprovid:73,doubl:[49,52,70,74],down:[63,72,81],download:[63,78,83,85],downstream:82,doxygen_should_skip_thi:[44,45],dpython:[69,70],dram:52,dream:75,driver:63,drop:[63,72],dt:74,dtensorrt_root:63,dtorch_dir:63,dtyep:66,dtype:[45,48,49,52,62,65,66,69,70,81,82],dual:74,due:[3,4,63,73,74],dui:[75,77],dump:[36,52,63],dump_build_info:[21,38,45,50,69],durat:74,dure:[49,52,60,68,82,83,84],dynam:[48,49,66,69,70,81],dynamic_batch:81,e:[29,30,52,54,60,61,63,66,69,80,81,83],each:[3,4,49,53,54,55,57,60,61,63,66,72,74,81],ear:74,earli:81,eas:43,easi:[52,53,54,61,83],easier:[56,58,60,61,81,83],easiest:63,easili:[3,4],echo:74,edg:74,edit:72,edu:83,effect:[54,61,72,81,82,83],effici:60,efficientnet:82,efficitur:77,eg:85,egesta:77,eget:77,either:[47,48,52,60,61,62,63,69,70,72,74,80],el:65,eleifend:75,element:[57,74,75,78,81],element_typ:44,elementum:77,elit:[75,77],elk:74,els:[43,44,48,70,74,75],elu:65,emb:[33,52,70,75],embed:[52,57,65,70,74,87],embed_engine_in_new_modul:[21,41,45,50,70],emit:53,emphasi:74,empti:[49,66,70,75,80],emum:[16,17],en:72,enabl:[3,4,24,49,52,55,56,58,67,68,70,72,81],enable_precis:61,enabled_precis:[45,49,61,62,69,70,83,85,86,87],enalbed_precis:87,encod:[57,82],encount:63,encourag:85,end:[44,52,60,61,65,70,74,83],end_dim:[61,65],endif:[43,44,45],energi:74,enforc:61,engin:[0,1,17,32,33,37,45,46,48,49,52,53,55,56,58,61,62,64,66,69,70,72,83,84,86,87],engine_converted_from_jit:61,enginecap:[21,38,45,49,50,69,70,86],english:82,enhanc:74,enim:77,ensur:[29,54,55],enter:53,entir:74,entiti:74,entri:[49,60],entropi:[29,30,83],entropy_calibr:68,entropy_calibration_2:[68,83],enumer:[0,1,2,16,17,46,68],environ:[81,85],ep:65,eq:[65,74],equat:74,equival:[32,56,58,60,61,70,80,83],equivil:37,erat:77,erf:65,eric:74,ero:77,error:[16,49,52,53,54,58,61,63,67,70,74,81],essenc:74,essenti:81,est:77,et:77,etc:[72,74,81,87],etiam:77,eu:77,euismod:77,eval:[61,62,85],evalu:[56,57,58],evaluated_value_map:[53,60],even:61,event:48,everi:[55,61,66],everyth:16,ex:[0,1,2,33,46,70,75,77],exact:85,examin:81,exampl:[48,55,57,58,60,61,64,67,69,70,72,73,75,78,80,81,83,84,85],exceedingli:74,except:81,exception_elimin:54,excerpt:75,excit:82,execpt:54,execut:[33,49,52,54,56,57,58,61,63,69,70,80,81,83,85],execute_engin:[57,61],exert:74,exeuct:57,exhaust:61,exist:[4,31,32,37,63,68,69,70,81,82,83],exit:85,exp:65,expand:[54,65],expand_a:65,expect:[48,54,60,61,69,82],experiment:81,explain:81,explan:81,explic:44,explicit:[0,1,2,3,4,45,46,54,64,74,81,83],explicit_batch_dimens:[66,81],explicit_precis:66,explicitli:[55,56,58,62,83,86],explict:44,explictli:0,expon:65,expos:83,express:74,ext:[74,75],extend:[56,58,60,61,65,82],extent:[61,64],extern:[72,74],extra:61,extract:[61,82],extrem:74,ey:74,f16:[52,61,87],f32:52,f:[63,74,80,81],facilisi:77,fact:63,facto:74,factori:[4,29,30,83],fail:[61,87],fake_quantize_per_channel_affin:65,fake_quantize_per_tensor_affin:65,fall:69,fallback:[52,56,58,60,87],fals:[0,1,2,3,4,44,45,46,49,61,65,66,69,70,72,73,74,75,81,83,86],fame:77,familiar:85,far:[74,81],fashion:[61,82],fast:[49,52,70],faster:82,faucibu:77,fc1:[61,80],fc2:[61,80],fc3:[61,80],fc:[49,52,54],feat:[61,80],featur:[52,55,61,81,82,83,86],fed:[3,4,48],feed:[29,30,61],feedforward:82,feel:64,feli:77,feugiat:[75,77],few:[63,69,81],field:[3,4,66,83],fifth:75,figur:[55,75,77],file:[0,1,2,3,4,5,6,7,8,9,10,11,12,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,46,47,48,49,52,55,57,58,61,63,68,69,70,72,73,75,79,81,83,85],file_path:52,find:[4,61,63,81],finder:63,finibu:77,finish:81,first:[48,53,54,61,62,74,75,81,83,85],firstli:85,fit:74,fix:[49,74,81,87],fixed_s:[45,49],flag:[52,55,56,58,62,63,68,84],flaten:49,flatten:[45,47,61,65,80],flatten_convert:61,flesh:85,float16:[52,69],float32:[48,49,52,69,70,81],float64:70,float_int:65,floor:65,floor_divid:65,floordiv:65,flow:[60,74,80,81,82],flox:74,flush:74,fly:80,fold:75,folder:81,follow:[33,52,55,57,61,63,70,72,74,75,79,80,81,82,83,84,85],foo:[74,75,81],foo_kwarg:81,foo_nod:81,forc:[52,70,72,81],force_fp32_output:81,forced_fallback_op:55,form:[53,69,74,85],format:[33,45,48,49,52,62,65,69,70,74,75,82,85],forth:75,forum:63,forward:[29,30,32,33,55,57,60,61,69,70,80,83,86],found:[42,43,44,45,61,63,74,83,84],four:[74,75],fp16:[0,48,49,52,61,62,64,87],fp32:[0,48,49,52,64,70,81,82,83,85],frac:74,freed:60,freeze_modul:54,friend:45,fringilla:77,from:[0,1,2,3,4,29,30,44,46,48,49,52,53,54,55,56,57,58,60,61,64,66,70,72,73,74,75,80,81,82,83,85],from_tensor:81,frontend:[62,64],fssl:63,fstream:[20,44],full:[45,49,52,60,61,67,83,84,85,87],fulli:[31,52,54,61,70,83,87],further:81,fusc:77,fuse_addmm_branch:54,fuse_flatten_linear:54,fuse_linear:54,fusion:[60,81],futur:[70,81],fx2trt_exampl:81,fx:[62,64,69],g:[29,30,52,54,63,66,69,74,81,83],g_:74,gamma:65,gatewai:73,gaurd:43,gcc:[58,61],ge:65,gear:83,gener:[3,4,29,52,54,57,58,60,61,63,66,72,74,75,78,80,81,83],get:[0,1,2,3,4,23,34,44,46,54,55,60,61,63,67,69,81,82,83,85],get_batch:68,get_batch_impl:44,get_batch_s:68,get_build_info:[21,38,45,50,69],get_cache_mode_batch:68,get_is_colored_output_on:[18,39,42,50,67],get_logging_prefix:[18,39,42,50,67],get_output:81,get_reportable_log_level:[18,39,42,50,67],getattr:[54,57,61,80],getbatch:[3,4,44],getbatchs:[3,4,44],getdimens:[60,61],getoutput:[60,61],git:78,github:[59,61,63,72,83,84,85],github_url:72,gitlab:72,gitlab_url:72,give:[72,74,81],given:[48,49,52,54,61,62,66,68,69,70,80,81,86],global:[26,52,61],gnu:63,go:[44,54,55,61,64,80,81,82,85],goal:60,goe:[74,81],good:[44,60,74,81],goodger:75,googl:72,got:[61,74],gpu:[1,32,35,37,45,46,52,61,69,70,81,83,85,86,87],gpu_id:[35,45,46,52,69,83,86,87],graph:[16,31,32,37,45,52,53,55,56,58,60,61,64,67,70,80,81,82],graph_input:[45,49],graph_modul:66,graphinput:[21,38,45,49,50],graphinputsstruct:50,graphmodul:[62,66],gravida:77,great:[61,74],greater:67,group:[65,74,75],grpc:85,gru_cel:65,gt:65,gtc:64,guangzhou:75,guard:54,guard_elimin:54,gui:74,guid:[64,73],gulf:85,gz:[74,75,83],h:[0,1,2,3,4,5,6,7,8,9,10,11,12,15,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,46,47,48,49,50,51,52,54,61,83],ha:[49,53,54,55,56,58,60,61,66,74,75,80,81,82,83],habit:77,habitass:77,hac:77,hack:44,hakaimagazin:85,half:[52,61,62,69,74,83,85,86,87],hand:85,handl:[54,57,81],happen:[80,81],hardtanh:[60,65],hardtanh_:65,hardwar:87,has_batch_dim:66,hash:63,have:[29,33,44,52,53,54,60,61,63,64,66,69,70,74,80,81,82,83,85],haven:61,header:[61,72,74,75,85],heart:75,heaven:74,heck:74,heh:75,hehe:75,height:74,help:[27,52,53,60,61,81,82,84],helper:60,hendrerit:77,here:[44,53,55,57,61,63,72,74,75,80,81,83,84,85],hermet:63,hexagram:74,hfile:50,hi:[65,74,75],hidden:[43,72],high:[54,55,72],higher:[54,72,74,80],highli:[82,85],highlight:74,hinton:83,hold:[46,47,48,53,60,83],holder:[57,76],holi:74,home:63,hood:58,hope:75,host:[49,52,63,70,85],how:[3,4,63,74,76,78,80,82,84,85,86],howev:[29,63,72,73,85],html:[59,63,74,80,83],html_theme:79,html_theme_opt:72,html_theme_path:79,http:[59,61,63,72,74,80,82,83,84,85],http_archiv:63,httpclient:85,hub:85,huggingfac:82,human:74,humankind:75,hx:65,hybrid:70,hyperlink:74,hyphen:74,i8:52,i:[52,54,60,61,74,75,80,83],iaculi:77,icon:[72,74],id:[35,45,52,69,72,73,77,87],idea:[54,74],ident:52,idx:65,ifndef:[44,45],ifstream:44,ignor:69,iii:75,iint8calibr:[3,4,29,30,44,45,49,70,83],iint8entropycalibrator2:[3,4,29,30,44,83],iint8minmaxcalibr:[29,30,83],ilay:60,illustr:[81,82],imag:[83,85],imagenet:82,imagenett:82,images_:83,img1:85,img:85,img_path:85,imperdiet:77,implement:[3,4,54,55,57,61,73,81,83,84],implic:54,implicit:[65,74,81],implicitli:69,implictli:69,improv:75,in_shap:61,in_tensor:80,incas:44,includ:[13,15,16,34,36,42,43,44,45,51,52,55,56,57,58,61,63,66,72,74,80,81,83],includedirectori:50,includehidden:72,incompat:63,incorpor:75,indent:74,index:[33,59,64,65,70,72,78,83],indic:[65,72,74],indirect:74,inetworkdefinit:53,infer:[54,61,69,70,81,82,83],inference_output:85,inferenceservercli:85,inferinput:85,inferrequestedoutput:85,info:[16,32,37,45,52,60,61,67,69],inform:[25,33,34,36,48,52,53,55,57,61,63,64,66,67,69,74,80,81,83,86],infrastructur:[83,85],ingest:58,inherit:[50,81,83],initi:74,injuri:74,inlin:[0,1,2,3,4,29,30,44,46,48,54,61,75,78],inner:[49,75,82],input0:61,input1:61,input2:61,input:[3,4,21,29,33,38,44,45,47,49,50,52,53,54,55,57,60,61,62,65,66,67,69,70,75,80,81,82,83,85,86,87],input_0:[57,61],input__0:85,input_data:[62,80],input_file_path:[52,87],input_is_dynam:45,input_nam:[66,81],input_s:[55,61],input_scal:65,input_shap:[83,87],input_signatur:[45,47,49,70],input_spec:[52,66,81],input_tensor_spec:[66,81],input_v:81,inputclass:50,inputrang:[55,61],inputtensorspec:[66,81],insert:[61,83],inserting_befor:81,insid:[74,85],inspect:[60,61,80],instal:[61,64,78,84,85],instanc:[54,61,68,80,82],instance_norm:65,instanti:[56,57,58,60,61],instatin:[0,1,2,46],instead:[49,52,53,54,61,63,84],instnanti:57,instruct:[55,56,58,61,63,81,85],insur:63,int32:[69,70,82],int64:70,int64_t:[45,46,48,49,83,87],int8:[0,44,48,49,52,64,69,70,83,87],int8_t:[17,45],int8cachecalibr:[20,29,40,44,50],int8cachecalibratortempl:50,int8calibr:[3,20,30,40,44,50],int8calibratornamespac:50,int_float:65,integ:[69,77],integr:64,intend:63,intent:[54,74],interact:74,interdum:77,interest:[54,74],interfac:[0,1,2,46,57,58,60,83],interfer:74,intermedi:[16,49,52,67,70,80],intern:[1,16,46,60,61,67,74],internal_error:67,internalerror:67,interpol:74,interpret:[57,74,81],intro_to_torchscript_tutori:80,introduc:[81,82],invok:[61,80,81],io:[44,85],iostream:[20,21,44,45,61],ipso:74,ipsum:[75,77],ir:[56,58,60,62,69,80],is_floating_point:65,is_train:83,iscustomclass:60,isinst:81,isn:[72,74],issu:[3,4,61,63],istensor:60,istream_iter:44,it_:44,ital:74,item:[73,75],itensor:[53,60,61,81],iter:[20,44,49,52,53,68,70],its:[29,53,57,60,63,74],itself:[0,1,2,46,52,54,63,85,86],iv:75,ivalu:[45,47,49,53,57,60,61],jan:75,jetpack:63,jetpack_5:63,jetpack_x:63,jetson:[69,82],jit:[31,32,33,37,45,47,49,52,53,54,55,56,57,58,59,60,61,62,69,70,80,85,86],jp_workspac:63,jpg:85,jump:85,just:[44,45,54,61,62,64,67,74,76,80,81,82,84,86],justo:[75,77],k:[65,83],kbool:[0,45],kchannelslast:[2,45],kchar:[0,45],kclip:60,kcontigu:[2,45,48],kcpu:[1,46],kcuda:[1,46,55,61],kdebug:[16,42,44],kdla:[1,45,46,87],kdla_standalon:[17,45],keepdim:65,kei:[74,80,85],kept:75,kernel:[48,49,52,60,69,70,81],kernel_s:65,kerror:[16,42],keyboard:74,keyword:[69,70],kf16:[83,87],kfloat:[0,45,49],kgpu:[1,45,46],kgraph:[16,42,54],khalf:[0,45,61],ki8:83,kind:[53,69,81],kinfo:[16,42,44],kint:[0,45],kinternal_error:[16,42],know:[42,60,72,74],knowledg:74,kriz:83,krizhevski:83,ksafeti:[17,45],kstandard:[17,45,49],ktest:83,ktrain:83,kunknown:[0,2,45],kwarg:[68,69,81,82],kwarn:[16,42],l:65,label:[74,82,83,85],lacinia:77,lack:[55,56,58,81],lacu:77,laid:61,lambda:[60,61,74,85],lang:73,languag:[73,74,75,80,85],laoreet:77,larg:[56,58,61,72,74,82,83],larger:[72,82],largest:65,last:[2,54,69,81],lastli:85,later:[29,61],latest:[63,72],launch:85,layer:[46,49,52,53,54,60,61,70,81,82,83,85,87],layer_norm:65,layout:[2,48,65,69,70],ld_library_path:63,ld_preload:84,ldd:63,le:65,lead:74,leader:74,leaky_relu:65,leaky_relu_:65,learn:[61,64,83,85,87],leas:75,least:[74,75],leav:54,lectu:[75,77],left:[72,74],legacy_calibr:68,legend:74,len:65,lenet:[61,80],lenet_script:[61,80],lenetclassifi:80,lenetfeatextractor:80,length:[3,4,44,65,75,81],leo:77,let:[46,52,54,60,69,70,72,74,81,82,85],letter:[75,82],level:[18,23,25,26,39,42,44,50,54,55,58,67,70,78,80,81,85],levelnamespac:50,leverag:[81,83],lib:[54,61,63],libero:[75,77],librari:[34,42,43,44,45,52,56,57,58,60,61],libtorch:[4,36,60,61,63,83],libtorch_pre_cxx11_abi:63,libtorchtrt:[52,61,63],libtorchtrt_plugin:84,libtorchtrt_runtim:84,licens:[42,43,44,45,61],light:74,ligula:77,like:[52,53,54,57,60,61,62,63,73,74,80,81,83,84,85],limit:[54,67,73,83],line:[52,61,75],linear:[2,65,69,80],link:[52,53,61,64,72,73,78,84],linux:[58,61,63],list:[18,19,20,21,31,49,51,53,55,57,60,61,62,63,65,66,69,70,78,81,85],listconstruct:[53,57,61],listunpack:[57,61],liter:75,literal:75,literal_block:74,live:[60,74],ll:81,lo:65,load:[52,55,57,61,62,68,70,81,82,83,84,85,86],load_librari:84,loading_data_recip:83,loborti:[75,77],local:[52,54,61,72],localhost:85,locat:[63,83],lock:73,log:[15,16,19,20,38,44,50,51,54,60,64,65,69,81],log_debug:60,logger:67,logger_level:66,loggingenum:50,logic:81,login:85,logist:81,loglevel:67,logo_onli:72,lone:75,longer:[72,84],look:[53,54,80,83,85,86],loop:81,loop_unrol:54,lorem:[75,77],lose:72,loss:[82,83],lot:60,low:81,lower:[16,67,69,75,82],lower_exampl:81,lower_graph:54,lower_precis:81,lower_tupl:54,loweralltupl:54,lowersimpletupl:54,lstm_cell:65,lt:65,ltorchtrt:84,luctu:77,lvl:[25,26,42],m:75,machin:[57,83,85],macro:[5,6,7,8,9,10,11,12,15,18,20,21,42,44,45,50,51],mad:74,made:[54,56,58,74],maecena:77,magna:77,mai:[53,57,58,61,62,70,74,75,80,81,83,85],main:[54,55,56,57,58,60,61,72,74,76,81],mainli:81,maintain:[55,57,60],major:[58,81],make:[53,61,62,63,74,76,81,82,83,85,87],make_data_load:[4,83],make_int8_cache_calibr:[20,40,44,50,83],make_int8_calibr:[20,29,40,44,50,83],malesuada:77,man:[74,75],manag:[49,52,53,56,58,60,61,67,69,70],mangag:54,mani:[72,74,75,81],mantissa:[49,70],manual:[73,74,81],map:[1,46,53,54,56,58,60,61,81,82,83,85,86],mapper:81,mark:[54,72],marknodesforfallback:54,markup:[75,78],markup_process:74,mask:65,masked_fil:65,massa:77,master:[59,63,74,83,84],mat2:65,match:[54,63],math:78,matmul:[54,61,65],matrix:59,matter:81,matti:75,matur:58,mauri:[75,77],max:[48,52,60,65,69,72],max_batch_s:[81,85],max_c:52,max_h:52,max_input_shap:66,max_n:52,max_pool1d:65,max_pool2d:[61,65,80],max_pool3d:65,max_shap:[45,48,62,69,70,81,82],max_val:[60,65],max_w:52,max_workspace_s:81,maximu:77,maximum:[48,49,52,70,81,85],mayb:74,mb:52,md:59,me:[74,75],mean:[55,60,64,65,81,85],mechan:[60,81,82],medium:74,meet:69,member:[46,47,48,49,69],memeori:2,memori:[20,21,44,45,54,60,61,62,69,70],memory_format:[65,69],memoryformat:[2,45],men:74,mental:74,menu:[52,72,74],menuselect:74,messag:[16,25,26,52,67],meta:[78,81],metadata:[49,52,57,60,70,72],meth:74,method:[31,32,33,37,48,52,54,60,61,63,69,70,74,80,82,86],method_nam:[31,37,45,52,61,69,70],metu:77,mi:77,middl:74,might:[54,63,72],min:[48,52,60,65,69],min_block_s:[45,49,55,70],min_c:52,min_h:52,min_input_shap:66,min_n:52,min_shap:[45,48,62,69,70,81,82],min_val:[60,65],min_w:52,mind:74,mine:74,minim:83,minimum:[48,49,52,55,67,70],minmax:[29,30,83],minmax_calibr:68,misbuild:72,miss:[61,74],mkdir:63,mm:85,mmb:74,mobilenet_v2:86,mobilenetv2:82,mod:[52,55,61,78,81,83],mode:[62,81,83],mode_:83,model:[52,55,57,61,62,64,66,67,80,83,86],model_nam:85,model_repositori:85,model_torchtrt:67,model_trt:67,modifi:[75,81],modul:[31,32,33,37,45,49,52,55,56,57,58,60,62,63,64,66,69,70,73,74,75,81,82,83,86,87],modular:61,module_fallback:54,module_nam:52,molesti:77,momentum:65,morbi:77,more:[53,61,63,64,69,72,75,80,81,83,84,85,86],most:[58,63,66,81,84,85],mother:74,motion:74,mous:74,move:[30,44,54,57,61,70,83],msg:[26,42,67],mu:74,much:[60,72,74,83],mul:65,mul_:65,multi:52,multipl:[57,74,75,83,85],multipli:[49,70],must:[33,48,49,52,54,55,60,61,63,69,70,74,75,81,84],mutil:75,my:74,my_pytorch_model:81,myclass:74,mymodel:[55,62],myself:75,n:[52,60,61,83],nabla:74,nam:[75,77],name:[3,4,31,33,37,44,55,57,60,61,63,68,70,74,75,80,81,85,86],namedtupl:81,namespac:[42,43,44,45,51,54,64,83],narrow:65,nativ:[58,59,61],native_funct:59,natur:74,nav:[72,78],navig:72,navigation_depth:72,nbbind:[3,4,44],nchw:[2,69,70],ne:[54,65],nec:77,necessari:[42,84],need:[0,1,2,25,29,43,46,53,54,60,61,62,63,66,74,81,82,83,84,85],neg:65,negative_slop:65,nequ:[75,77],nest:[45,49,50,74,75],net:[60,61,74,75],netu:77,network:[29,30,60,61,81,82,83,85,87],neural:87,new_lay:60,new_local_repositori:63,new_siz:83,newer:63,next:[3,4,53,57,72,74,75,83,85],ngc:[63,85],nhwc:[2,52,69],nibh:[75,77],nice:63,nickel:74,night:75,nightli:81,ninja:63,nisi:77,nisl:77,nlp:[29,30,83],nn:[54,59,61,62,69,70,80,81],node:[54,55,56,58,60,61,81,82],node_info:[60,61],noexcept:[3,4,44,83],non:[75,77],non_block:65,none:[60,65,66,69,70,72,74,81],nonetheless:74,nonexist:74,norm:65,normal:[0,1,2,46,61,74,80,81,83,85,87],normalized_shap:65,noskipw:44,notatemoduleforfallback:54,note:[1,46,48,60,61,63,69,72,74,81,87],notebook:[58,64],now:[54,58,60,61,63,74,81,86],np:85,nu:74,nulla:77,nullptr:[44,45,49],num:52,num_avg_timing_it:[45,49,70,86],num_it:52,num_op:52,num_work:83,number:[3,4,49,52,54,55,60,61,62,69,70,72,81,82],numel:65,numer:[52,75,81],numpi:85,nunc:77,nvcr:85,nvidia:[32,37,42,43,44,45,52,59,61,63,69,70,81,85,87],nvinfer1:[3,4,29,30,44,45,49,60,83],nvinfer:[20,44],o:[63,74,85],obj:65,object:[0,1,2,3,4,46,48,49,52,57,60,67,68,70,83,86],obtain:82,obvious:80,odio:[75,77],off:[55,57],offici:63,ofstream:[44,61],often:74,oh:75,ok:[61,74],okai:49,older:58,onc:[42,43,44,45,53,54,57,81,83,84,85],one:[47,54,60,61,62,67,69,74,80,81,85],ones:[42,55,56,58,61,63,74],onli:[1,3,4,16,29,44,46,48,52,54,55,58,60,63,66,67,69,74,81,83,84,87],onnx:54,onto:[52,57],op:[52,53,54,56,58,60,61,69,84],op_and_target:81,op_nam:52,open:[82,85],oper:[0,1,2,3,4,31,44,45,46,49,52,53,54,55,56,57,58,60,62,64,69,70,81,83,87],oppos:70,opset:[56,58],opt:[48,63,69],opt_c:52,opt_h:52,opt_n:52,opt_shap:[45,48,62,69,70,82],opt_w:52,optim:[48,52,54,61,62,64,66,80,81,82],optimin:48,optimiz:80,optimize_target_shap:81,optimized_input_shap:66,optimz:85,option:[44,48,52,55,56,58,63,69,74,78,81,83,84,87],orchestra:74,orci:77,order:[49,55,60,61,62,63,66,70,81],org:[59,61,63,72,74,80,83],organ:75,origin:81,ornar:[75,77],os:45,ostream:45,other:[0,1,2,45,46,52,53,54,57,61,62,63,64,65,73,74,81,84],otherwis:[63,84],our:[55,58,61,80,85],out:[31,44,53,54,55,56,58,60,61,63,67,70,74,85],out_shap:61,out_tensor:[60,61],output0:54,output:[24,27,33,49,52,53,54,55,57,60,61,63,67,70,72,74,75,81,82,85],output__0:85,output_file_path:[52,87],output_nam:[66,81],output_pad:65,output_s:65,outself:61,outsid:74,over:[56,58,74,81,85],overal:82,overrid:[3,4,29,30,44,69,81,83],overview:[59,64],own:[60,61,63,74,85],p:[52,61,65,85,87],packag:[52,54,61],pad:65,padding_idx:65,page:[64,76,78,85],pair:[54,60,63,74,82,83],pane:74,paragraph:[75,78],param:[68,73],paramet:[0,1,2,3,4,25,26,27,29,30,31,32,33,35,37,46,48,49,53,54,60,61,67,69,70,78,80,81],parent:[14,15,18,19,20,21],pars:[61,74],parser:74,part:[52,55,58,72,73,74,81],partial:[52,74],partit:54,partitioninfo:55,pass:[53,55,56,57,58,60,61,63,67,68,80,81,83],past:74,path:[4,13,14,15,29,30,52,61,63,68,69,80,81,83,85],path_to_torchtrt_root:63,pathwai:80,pattern:[60,61,69],payment:73,pbtxt:85,peephole_optimz:54,pellentesqu:77,peopl:74,pep:74,perforamnc:81,perform:[29,30,82,83,85],permit:74,permut:[65,81],persist:74,pharetra:77,phase:[16,60,61],phasellu:77,phi:74,philosoph:74,phrase:74,pi:74,pick:80,pickler:57,piec:82,pil:85,pin:73,pin_memori:65,pip3:63,pip:[63,85],pipelin:[52,87],piplein:61,pixel_shuffl:65,pl:73,place:[48,54,63,74,75,76,81,83],placerat:77,plan:[52,58],platea:77,platform:[45,52,58,63,85,87],pleas:[61,63,74,81,85],plugin:81,point:[61,69,72,73,74,85],pointer:[3,4,83],polish:73,pool:87,pop:57,popul:66,popular:[63,73,82],portabl:[57,70],portion:74,porttitor:[75,77],posit:[52,69,72,81],possibl:[63,74,82,85],post:[29,30,49,52,61,64],posuer:[75,77],potenti:[49,77],pow:65,power:[61,74,81,82],pr:61,praesent:77,pragma:[42,43,44,45,83],pre:[33,54,68,70,83,84],pre_cxx11_abi:63,preced:74,precis:[49,52,61,62,64,69,81,83,87],prefer:61,prefix:[27,28,42,67,74],preinstal:63,prelu:65,prepar:[81,85],preprint:83,preproc:68,preprocess:[83,85],preserv:[74,80,83],prespect:80,press:74,pretium:77,pretrain:[82,85,86],pretti:61,prev_next_buttons_loc:72,prevent:[49,52],previou:72,previous:[29,33,61],prim:[53,54,57,61,65,80],prim_devic:65,primal:74,primarili:[58,61],print:[16,31,44,61,67,69,70,74,85,86],priorit:63,privat:[3,4,44,45,83],problem:74,problemat:74,proce:85,proceed:85,process:[52,55,73,74,80,82,83,85,86],prod:65,produc:[48,53,57,60,61,74,82],product:49,profil:[48,66],profiling_verbos:81,program:[18,19,20,21,29,51,52,56,57,58,64,80],programm:74,progress:75,proin:77,project:[63,73,78],promis:81,prop:66,properli:63,properti:72,propog:54,prose:74,provid:[3,4,49,52,55,57,60,61,62,63,66,69,70,74,81,83,84,85,86],providi:[56,58],provok:74,pt:[52,61,81,85],ptq:[3,4,15,18,19,38,50,51,52,64,69,70],ptq_calibr:[3,4,45,49,83],ptqtemplat:50,publish:85,pull:[63,85],purchas:73,pure:31,purpos:[63,81,82,85],puru:77,push:57,push_back:[44,55],put:[74,82],pwd:85,py3:85,py:[54,58,61,63,72,74,79,80,81,83],pyindex:85,pypi:63,python3:[54,61,63],python:[52,55,58,61,69,70,74,75,81,82,84,85,86,87],python_api:59,pytorch:[48,49,52,54,55,56,57,58,60,61,62,63,68,69,70,80,83,84,85],pytorch_libtorch:85,pytorch_sphinx_them:[72,79],qat:82,quant_max:65,quant_min:65,quantiz:[29,30,52,61,64],quantizatiom:49,quartznet:82,question:61,qui:[75,77],quickli:[52,61,83],quisqu:77,quit:[60,61,82],quot:75,r:74,rais:[54,81],raiseexcept:54,ram:[49,52,70],rand:[61,81],randn:[55,61,69,70,86],rang:[48,49,52,69,81,82],rank:72,rather:54,raw:72,re:[74,81],read:[3,4,29,30,44,72,74,83],read_calibration_cach:68,readcalibrationcach:[3,4,44],reader:74,realiz:57,realli:60,reason:[0,80,81],reattribut:75,recalibr:29,receiv:81,recip:83,reciproc:65,recognit:[82,83],recomend:[29,30],recommend:[29,30,61,63,74,81,85],record:[53,80],recurs:53,redistribut:75,reduc:[54,56,58,81,82,83],redund:81,ref:74,refer:[48,56,58,61,73,78,81,83,85],referenc:63,refit:[45,49,70,86],reflect:45,reflection_pad1d:65,reflection_pad2d:65,regard:[63,74],regardless:75,region:81,regist:[33,57,60,70,81],register_acc_op:81,register_acc_op_map:81,register_custom_acc_mapper_fn:81,registernodeconversionpattern:[60,61],registr:81,registri:[53,61],reinterpret_cast:44,rel:52,relat:[46,74],relationship:50,releas:74,reload_model_output:81,reload_trt_mod:81,relu:[55,61,65,80],relu_:65,remain:[54,83],rememb:81,remov:72,remove_contigu:54,remove_dropout:54,remove_to:54,render:72,rent:75,repack:57,repeat:[52,65],replac:[54,63],replication_pad1d:65,replication_pad2d:65,replication_pad3d:65,report:[23,44],reportable_log_level:67,repositori:[58,72,79,85],repres:[48,49,60,67,74,81],represent:[54,60,80,81,82],request:[61,69,85],requir:[29,49,52,53,54,61,67,69,70,72,81,83,84,85],require_full_compil:[45,49,70],requires_grad:65,research:81,reserv:[42,43,44,45],reset:44,reshap:[65,85],resiz:85,resnet50:85,resnet:[57,82,85],resnet_trt:57,resolut:82,resolv:[53,54,56,58],resourc:[53,83],respons:[29,57,74],rest:[74,75,81],restrict:[49,70],restructuredtext:[74,75],result:[53,54,62,67,70,72,80,85],ret:54,reus:[54,81,83],revert:72,revis:[74,75],revisit:74,rfc:74,rho_:74,rhoncu:77,right:[42,43,44,45,54,58,60,74],risu:77,rm:85,rn50_preprocess:85,role:74,roll:65,roman:75,room:74,root:[42,43,44,45,63,72,83],roughli:55,round:[49,70],rounding_mod:65,row:75,rst:[72,74],rsub:65,rtol:52,rule:[63,70,81],ruler:74,run:[1,37,46,49,52,53,54,55,56,57,58,60,61,62,63,64,66,69,70,74,80,81,82,83,84,85,86,87],running_mean:65,running_var:65,runtim:[61,64],runtimeerror:81,rutrum:[75,77],s:[48,49,55,57,60,61,62,63,64,66,69,72,74,75,80,81,82,83,85],safe:[60,70],safe_dla:69,safe_gpu:69,safeti:[49,52,69],sage:74,sagitti:[75,77],sai:[75,82],said:74,same:[57,61,63,72,74,80,81,85,86],sampl:[74,81,83,85],sample_input:81,sapien:77,satisfi:[55,81],save:[29,44,52,57,61,62,69,70,81,82,84,85],saw:61,scalar:[60,65],scalaropt_dim:65,scalartyp:[0,45,65],scale:[65,82,83],scale_factor:65,scale_grad_by_freq:65,scales_d:65,scales_h:65,scales_w:65,scelerisqu:77,schedul:[69,85],schema:[60,61],scheme:81,scientist:74,scope:54,scratch:29,scratch_spac:85,screen:72,script:[31,54,55,61,62,69,70,80,86],script_model:[80,86],scriptclass:70,scripted_model:87,scriptmodul:[61,62,69,70],scroll:[72,76],sdk:59,se:82,seamlessli:64,search:[64,72],second:[54,74,81],secondli:85,section:[61,72,74,75,76,78,81,83,85],sed:[75,77],see:[31,54,57,61,63,69,70,74,80,81],seen:[74,75],segment:[55,82],select:[17,29,30,37,49,52,57,62,63,65,69,70,73,76,81,83],self:[54,57,60,61,65,68,80,82,87],self_1:[57,61],self_int:65,sell:75,seller:73,seller_id:73,sem:77,semant:74,semper:77,send:85,senectu:77,sens:[61,74],sentenc:[74,82],sentinel:[0,2],separ:[55,56,58],sequenc:[66,74,81,82],serial:[33,37,52,56,58,61,69,70],seriali:70,serializ:[57,80],serialized_cach:[66,81],serialized_engin:70,seril:57,serv:[52,57,64,81],servic:74,session:74,session_nam:74,set:[3,4,16,21,25,27,29,32,35,37,45,46,48,49,52,53,54,55,56,57,58,61,62,63,64,66,67,69,70,72,76,79,80,81,82,83,87],set_data_from_numpi:85,set_devic:[21,38,45,50,69],set_is_colored_output_on:[18,39,42,50,67],set_logging_prefix:[18,39,42,50,67],set_reportable_log_level:[18,39,42,50,67],setalpha:60,setbeta:60,setnam:[60,61],setreshapedimens:61,setup:[43,83,85],sever:[16,26,67],sh:63,sha256:63,shape:[45,47,48,49,52,55,60,62,65,66,69,70,81,85,87],shape_mod:69,shape_rang:[66,81],share:[49,52,63,70],shell_command:74,shift:[63,65,74],ship:[61,84],shorthand:74,should:[0,3,4,29,45,49,52,53,54,55,56,58,60,64,67,69,70,72,74,77,81,83,85],show:[72,74,82],shown:[61,72,74],shuffl:[61,83],side:[54,61,72],sidebar:[72,78],sigmoid:[65,81],sigmoid_:65,sign:85,signatur:70,signifi:[48,54],signific:74,significantli:[54,72],similar:[60,61,81,84,86],simonyan:83,simpil:83,simpl:[74,75,80,81,82,85],simplest:85,simpli:[54,82],simplifi:53,simul:82,sin:[65,74],sinc:[54,61,74,80,81,83],sing:74,singl:[48,52,54,61,69,74,80,81,83],singular:60,sinh:65,sink:74,sit:[75,77],site:[54,61,63,74],six:74,sixth:75,size:[3,4,44,48,49,52,54,55,61,65,69,70,72,81,82,83],size_t:[3,4,44,83],skip:52,slash:72,slice:65,slightli:81,sm:57,small:[54,85],smaller:82,so:[0,44,52,53,54,57,58,60,61,63,64,73,74,75,81,83],sodal:77,softmax:[54,65,81],softwar:[49,52,70,74],sole:[62,83],sollicitudin:77,solv:85,some:[53,54,56,57,58,60,61,73,74,81,83],some_funct:74,someth:[43,54,74,85],someurl:74,sort:[60,65,86],sourc:[42,43,44,45,58,66,67,68,69,70,81],sourceforg:[74,75],space:[74,75,83],spaces_and_linebreak:74,span:75,spars:[52,65],sparse_weight:[45,49,70,81],sparsiti:[49,52,70,81],spec:[45,48,49,52,67,69,70,86],specif:[32,49,54,56,58,69,70,74,82],specifi:[3,4,52,60,62,63,64,67,69,70,72,74,81,85,86],specifii:69,speech:82,speedup:82,sphinx:[72,73,74,75,79],sphinx_rtd_them:[74,75],spin:85,spirit:74,split:[65,81],split_siz:65,split_with_s:65,sqrt:65,squeez:[65,82],sram:52,src:[57,59,65],ss:44,ssd300_trt:57,ssd:57,ssd_trace:52,ssd_trt:52,sstream:[20,44],stabl:[59,70,72],stack:[57,65,83],stage:[53,81],stand:[57,74],standalon:74,standard:[52,57,64,74,82,84,86],stapl:75,start:[53,55,61,63,65,75,81,82,86],start_dim:[61,65],start_step:65,state:[53,60,61],statement:[54,74],static_cast:44,statu:[44,75],std:[3,4,22,26,28,29,30,31,33,34,37,42,44,45,47,48,49,55,61,83,85,87],stdout:[36,67,69],steamlin:83,step:[64,65,81,82,83],stick:72,sticki:[72,78],sticky_navig:[72,76],still:[44,55,81,83],stitch:[55,61],stop:61,storag:83,store:[2,4,49,52,53,57,60,61,70,80,81],str:[19,43,44,50,65,67,69,70,81],straight:60,strang:74,strategi:69,street:75,strict:84,strict_type_constraint:81,stride:65,string:[3,4,18,20,21,22,26,28,29,30,31,33,34,37,42,44,45,49,55,57,60,61,69,72,83],stringstream:44,strip_prefix:63,strong:74,strongli:74,struct:[1,21,38,41,45,83],structur:[29,46,49,55,58,60,72,74,78,80,85],structuredtext:74,stub:75,stuff:74,style:[42,43,44,45,72,74,75],style_external_link:72,sub:[65,74,80],sub_:65,subdirectori:51,subexpress:54,subgraph:[49,52,53,54,60,61],subject:58,submenu:78,submodul:80,subscript:74,subsect:74,subset:[82,83],substitut:74,subtitl:74,subtre:79,subword:82,sudo:63,suffic:54,suggest:85,suit:64,suitabl:81,sum:[49,65,70,81],superscript:74,supervis:82,suppli:74,support:[0,1,2,27,31,46,48,49,52,55,59,61,62,63,64,69,70,72,73,80,81,85,87],sure:[61,62,63,85,87],suscipit:[75,77],suspendiss:77,symbol:[33,63,70,74,81,84],symlink:79,system:[53,60,63,64,70],t1:65,t2:65,t:[0,1,2,45,46,54,60,61,63,65,72,74,75,80,81,83,85],t_:74,tabl:[63,78],tag:[74,85],take:[31,32,33,37,53,56,57,58,60,61,69,70,72,74,81,82,83,86],taken:74,talk:64,tan:65,tanh:65,tanh_:65,tar:[63,74,83],tarbal:[61,83],target:[1,33,45,46,48,49,52,57,58,62,64,69,70,81,83,86,87],targets_:83,task:[29,30,81,82,83],techinqu:61,techniqu:83,tell:[54,55,56,57,58,60,74],tellu:77,tem:52,templat:[20,40,44,45,50,61,72],temporari:81,tempu:77,tensor:[2,33,44,45,48,49,52,53,54,55,57,60,61,65,66,69,70,80,81,82,83],tensor_mod:65,tensor_scalar:65,tensor_tensor:65,tensorcontain:60,tensorformat:[21,38,45,48,50,69],tensorformatenum:50,tensorlist:[55,60],tensorrt:[0,1,3,4,29,30,31,32,33,36,37,44,45,46,48,49,52,53,54,55,56,58,60,66,68,69,70,80,83],tensorrt_convert:81,tensorrtcompilespec:[70,86],tensort:81,teo:52,term:[69,74,75,82,83],termin:[27,52,61],test:[52,58,63,74,75,81,82,83,85],test_acc_trac:81,test_ptq_dataloader_calibr:83,test_ptq_trt_calibr:83,test_py_modul:[74,78],testing_dataload:83,testing_dataset:83,text:[67,75,77,82],tf32:[49,52],than:[54,64,73,74,82,84],thats:[53,83],the_model_repositori:85,thei:[46,52,53,54,57,60,62,63,69,72,74,81],them:[54,55,57,61,63,72,81,82],theori:[53,74],therebi:[57,82],therefor:[29,57,61,74,81,82],theres:84,therfor:84,theta:74,thi:[0,1,2,29,30,42,43,44,45,46,47,48,49,52,53,54,55,56,57,58,60,61,63,66,69,70,72,73,74,76,77,80,81,82,83,84,85,86],thicker:74,thin:74,thing1:74,thing2:74,thing3:74,thing:[63,74,81],think:[60,74],third:[75,81],third_parti:[58,63],this_arg_is_opt:81,those:[53,74],though:[52,58,60,61,80],thought:74,three:[48,56,58,66,69,74,75,81,82,85],threshold:52,through:[48,53,54,55,57,61,62,64,67,68,74,81,82],throught:81,thrown:[49,70],thu:74,time:[49,52,53,54,56,57,58,60,61,70,72,74,81,83],timing_cach:81,tincidunt:77,tini:83,titles_onli:72,tmp:61,toctre:72,tocustomclass:60,todim:61,todo:[72,81],togeth:[53,60,61],token:82,toler:52,too:[63,72,74,75],tool:[60,61,81,82],toolchain:[58,63],top:[58,72,76],topk:65,torch:[0,1,2,4,20,21,29,30,31,32,33,36,37,44,45,46,47,48,49,52,53,54,55,56,57,58,60,63,66,69,70,80,83,87],torch_executed_modul:[45,49,55,70],torch_executed_op:[45,49,55,70],torch_scirpt_modul:80,torch_script_modul:61,torch_tensorrt:[0,1,2,3,4,14,16,17,42,43,44,46,47,48,49,50,51,52,55,61,62,64,81,82,83,84,85,86,87],torch_tensorrt_export:43,torch_tensorrt_major_vers:[19,43,50],torch_tensorrt_minor_vers:[19,43,50],torch_tensorrt_patch_vers:[19,43,50],torch_tensorrt_vers:[19,43,50],torch_tensorrtfil:50,torch_tensorrtnamespac:50,torchbind:57,torchhub:85,torchscript:[19,21,38,43,45,49,50,52,56,57,58,62,69,70,82,86,87],torchscriptstruct:50,torchtrt:[43,55],torchtrt_api:[0,2,19,22,23,24,25,26,27,28,31,32,33,34,35,36,37,42,43,44,45,48,49,50],torchtrt_check:60,torchtrt_hidden:[19,43,50],torchtrt_runtime_exampl:84,torchtrt_unus:60,torchtrtc:[63,64,87],torchvis:[57,83,85,86],toronto:83,tortor:77,totensor:[83,85],tovec:61,toward:83,trace:[55,61,70,80,81],traced_model:80,track:[60,83],tradit:[48,70,83],traget:32,trail:72,train:[29,30,49,52,61,62,64,65],trainabl:54,transcrib:82,transfer:73,transform:[61,83,85],transformed_img:85,translat:61,transmit:74,transpos:[65,81],trash:74,travers:[56,58],treat:52,tree:[42,43,44,45,72,83,84],trigger:[61,81],trim:83,tristiqu:77,triton:64,triton_to_np_dtyp:85,tritoncli:85,tritonserv:85,trt:[0,1,3,4,46,48,53,54,57,60,61,65,81],trt_interpreter_result:81,trt_lenet_script:61,trt_mod:[55,61,83,87],trt_model:[55,85,86],trt_ts_modul:[55,62],trtinterpret:[66,81],trtinterpreterresult:[66,81],trtmodul:[66,81],truncat:[49,52,70],truncate_long_and_doubl:[45,49,70],ts:[43,52,55,61,62,64,69,80,86],ts_model:[55,61],tt:74,tue:75,tupl:[57,66,69,70,81],tupleconstruct:[54,57],tupleunpack:54,turpi:77,tutori:[80,83],two:[52,54,60,62,63,74,75,79,80,81,83,85],type:[0,1,2,30,49,50,52,53,57,60,61,62,66,67,69,70,74,81,82,83],type_fp32:85,typenam:[3,4,29,30,44],typic:[53,60,85],ugli:74,ui:73,uint64_t:[45,49],ultric:77,un:83,unabl:[60,61],unbind:65,unbroken:74,uncas:82,uncom:63,under:[42,43,44,45,58,74],underli:[0,1,2,46,60],uniformli:82,union:[60,61,69,70],uniqu:[4,62],unique_ptr:[4,30],unit:81,univers:74,unknown:69,unless:81,unlik:[63,64,86],unlimit:72,unpack_addmm:54,unpack_log_softmax:54,unqiue_ptr:4,unreferenc:74,unrestrict:74,unsqueez:65,unstabl:58,unsupport:[31,49],unsur:60,untest:58,until:[53,58,60,63],unwrap:60,unwraptodoubl:60,unwraptoint:61,unzip:63,up:[53,54,56,57,58,74,80,81,82],updat:81,upload:85,upon:72,upper:75,upsample_bilinear2d:65,upsample_linear1d:65,upsample_nearest1d:65,upsample_nearest2d:65,upsample_nearest3d:65,upsample_trilinear3d:65,upscale_factor:65,upstream:61,uri:74,url:[63,72,85],urna:77,us:[0,1,2,3,4,29,30,32,35,37,43,44,45,46,48,49,52,53,55,57,58,60,64,66,67,68,69,70,72,73,74,75,80,81,83,84,85,87],usag:[61,68,74],use_cach:[3,4,30,44,68,83],use_cache_:44,use_cmake_generated_export_head:43,use_input_stat:65,use_subset:83,usecas:[62,63],user:[42,48,55,56,57,58,61,62,63,64,74,75,83,85],using_int:[61,65],usr:63,usual:[72,81],ut:77,utf:[74,75],util:[60,61,70,82,83,85],v0:[71,85],v2:[29,30],v:[52,75,85],valid:[1,46,60],valu:[0,1,2,16,17,45,46,48,53,57,60,61,65,67,68,69,72,82],value_tensor_map:[53,60],vanilla:81,vari:66,variabl:[48,69,81],variant:84,varient:54,varieti:85,variou:[81,87],variu:77,vcs_pageview_mod:72,vec:65,vector:[20,21,44,45,47,48,49,55,57,61,83,87],vehicula:77,vel:77,velit:77,venenati:77,verbios:52,verbos:[52,75],veri:[75,76,81,83,85,86],verifi:55,verison:63,version:[34,36,58,63,72,75,81,82,85],vertic:[72,74],vestibulum:[75,77],vgg16:83,vgg:83,vi:74,via:[64,69,70,72,78,81,82,83,84],view:[65,72],virtual:83,vision:[81,85],visitor:72,vita:[75,77],vivamu:77,viverra:77,vm:75,volutpat:77,vs:[0,1,2,46,54,70,86],vulput:77,w:52,w_hh:65,w_ih:65,wa:[54,57,61,74,81],wai:[52,61,63,80,81,82,83],walkthrough:82,want:[42,55,61,66,80,81,83,85,86],warn:[16,44,52,60,67],wash:74,we:[42,44,53,54,56,57,58,60,61,66,72,74,80,81,82,83,85],weak:74,web:74,websit:63,weight:[48,49,52,53,61,65,70,74,81,82],welcom:[61,81],well:[61,63,67,74,83],were:61,wget:85,what:[4,54,61,62,74,80,81],whatev:[57,81],wheel:63,when:[27,44,45,46,52,53,54,56,57,58,60,61,63,67,69,70,72,74,76,80,81,82,83],where:[53,54,60,61,70,75,81,83],wherev:81,whether:[4,52,66,69,73,81,83],which:[1,2,29,32,37,46,49,53,54,55,56,57,58,60,61,62,63,68,70,72,74,75,80,81,82,83,84,85,86],white:74,whitespac:74,whl:63,who:74,whole:81,whose:[54,81],why:74,wide:78,width:[74,82],window:74,window_nam:74,wish:75,within:[49,52,56,58,70,72,74],without:[60,61,72,74,83],wl:84,wooden:74,word:[74,82],work:[44,54,58,60,74,75,81,83],worker:83,workflow:[81,82,86],workspac:[49,52,63,70],workspace_s:[45,49,52,70],world:74,would:[52,60,61,62,63,81,84,85,86],wp:85,wrap:[56,57,58,61,74,77,81,86],wrapper:[60,81],write:[3,4,29,30,44,53,61,64,74,81,83,85],write_calibration_cach:68,writecalibrationcach:[3,4,44],wrote:74,www:[61,63,72,74,83,85],x86:84,x86_64:[58,63],x9:54,x:[5,10,33,43,54,61,63,70,75,80],x_0:74,x_1:74,x_2:74,x_3:74,x_4:74,x_:74,xavier:[45,87],xstr:[19,43,50],xx:85,xxx:81,y:[33,70,75],yahoo:75,yaml:59,yet:[81,82],you:[0,1,2,29,30,46,48,49,52,53,54,55,57,58,60,61,62,63,64,69,70,72,74,75,76,80,81,82,83,84,85,86],your:[60,61,62,63,64,72,74,75,79,80,84,86],yourself:61,yy:85,z:75,zero_point:65,zip:[57,63],zisserman:83},titles:["Class DataType","Class Device::DeviceType","Class TensorFormat","Template Class Int8CacheCalibrator","Template Class Int8Calibrator","Define STR","Define TORCH_TENSORRT_PATCH_VERSION","Define TORCH_TENSORRT_MAJOR_VERSION","Define TORCH_TENSORRT_MINOR_VERSION","Define TORCHTRT_API","Define XSTR","Define TORCHTRT_HIDDEN","Define TORCH_TENSORRT_VERSION","Directory cpp","Directory include","Directory torch_tensorrt","Enum Level","Enum EngineCapability","File logging.h","File macros.h","File ptq.h","File torch_tensorrt.h","Function torch_tensorrt::logging::get_logging_prefix","Function torch_tensorrt::logging::get_reportable_log_level","Function torch_tensorrt::logging::get_is_colored_output_on","Function torch_tensorrt::logging::set_reportable_log_level","Function torch_tensorrt::logging::log","Function torch_tensorrt::logging::set_is_colored_output_on","Function torch_tensorrt::logging::set_logging_prefix","Template Function torch_tensorrt::ptq::make_int8_cache_calibrator","Template Function torch_tensorrt::ptq::make_int8_calibrator","Function torch_tensorrt::torchscript::check_method_operator_support","Function torch_tensorrt::torchscript::compile","Function torch_tensorrt::torchscript::embed_engine_in_new_module","Function torch_tensorrt::get_build_info","Function torch_tensorrt::set_device","Function torch_tensorrt::dump_build_info","Function torch_tensorrt::torchscript::convert_method_to_trt_engine","Namespace torch_tensorrt","Namespace torch_tensorrt::logging","Namespace torch_tensorrt::ptq","Namespace torch_tensorrt::torchscript","Program Listing for File logging.h","Program Listing for File macros.h","Program Listing for File ptq.h","Program Listing for File torch_tensorrt.h","Struct Device","Struct GraphInputs","Struct Input","Struct CompileSpec","Torch-TensorRT C++ API","Full API","torchtrtc","Conversion Phase","Lowering Phase","Partitioning Phase","Compiler Phases","Runtime Phase","System Overview","Useful Links for Torch-TensorRT Development","Writing Converters","Using Torch-TensorRT in C++","Using Torch-TensorRT in Python","Installation","Torch-TensorRT","Operators Supported","torch_tensorrt.fx","torch_tensorrt.logging","torch_tensorrt.ptq","torch_tensorrt","torch_tensorrt.ts","Changelog","Configuration","5. :mod:`test_py_module`","3. Paragraph Level Markup","4. Lists & Tables","1. Long Sticky Nav","1. Structural Elements","<no title>","Installation","Creating a TorchScript Module","Torch-TensorRT (FX Frontend) User Guide","Example notebooks","Post Training Quantization (PTQ)","Deploying Torch-TensorRT Programs","Serving a Torch-TensorRT model with Triton","Using Torch-TensorRT Directly From PyTorch","DLA"],titleterms:{"1":[76,85],"10":76,"11":76,"12":76,"13":76,"14":76,"15":76,"16":76,"17":76,"18":76,"19":76,"2":[76,77,85],"20":76,"3":[76,85],"4":76,"5":76,"6":76,"7":76,"8":76,"9":76,"class":[0,1,2,3,4,20,21,38,40,41,50,66,68,69],"enum":[16,17,18,21,38,39,50,68,69],"function":[18,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,50,59,66,69,70],"long":[76,78],A:74,And:74,But:75,By:[18,19],Or:54,The:[61,74],To:54,aarch64:63,abi:[57,63],acc:81,acceler:82,add:81,addmm:54,admonit:74,advic:60,ahead:64,an:78,api:[50,51,59,63,64],applic:83,arg:[60,73],automat:55,avail:59,awar:82,background:[57,60],base:[3,4,48,72],bert:82,binari:63,block:74,branch:54,build:[63,72,85],bullet:75,c:[50,59,61,63,64,82,83],can:75,caption:[75,78],center:74,ch:74,changelog:71,check_method_operator_support:31,choos:63,citat:[74,83],citrinet:82,cli:[63,64],client:85,cmake:63,code:[54,74],compil:[32,56,58,61,63,64,82],compilespec:49,compound:74,configur:72,construct:57,content:[18,19,20,21,38,39,40,41,72,73,74,75,76,77],context:[60,72],contigu:54,contract:60,contributor:64,convers:[53,56,58,60],convert:[53,60,61,65,81],convert_method_to_trt_engin:37,cpp:[13,18,19,20,21,55],creat:[80,83],creativ:74,cudnn:63,current:65,custom:61,cxx11:63,data:73,datatyp:0,dead:54,debug:63,deep:82,deeper:75,defin:[5,6,7,8,9,10,11,12,19,50],definit:[18,19,20,21,75],demo:78,depend:63,deploi:[82,84],deseri:57,detect:82,develop:59,devic:[1,46],devicetyp:1,dimens:59,direct:74,directli:86,directori:[13,14,15,51],disk:80,distribut:63,dla:87,doctest:74,documen:64,document:[0,1,2,3,4,5,6,7,8,9,10,11,12,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,46,47,48,49,59,64,77,78],down:75,download:[74,79],dropout:54,dump_build_info:36,dynam:82,easier:59,efficentnet:82,element:77,elimin:54,eliminatecommonsubexpress:54,embed_engine_in_new_modul:33,emphas:74,engin:[57,81],enginecap:17,enumer:75,envior:63,evalu:[53,65],exampl:[74,76,82],execept:54,executor:57,expect:59,face:82,fallback:[54,55],field:75,figur:74,file:[15,18,19,20,21,42,43,44,45,50,51],flatten:54,footnot:74,format:57,freez:54,from:[63,86],frontend:[81,82],full:[50,51],fuse:54,fx2trt:81,fx:[66,81,82],gaurd:54,gener:73,get:64,get_build_info:34,get_is_colored_output_on:24,get_logging_prefix:22,get_reportable_log_level:23,giant:75,git:79,glossari:74,gpu:64,graph:[54,57],graphinput:47,grid:75,guarante:60,guid:81,h:[18,19,20,21,42,43,44,45,55],have:75,hierarchi:50,hlist:75,hole:75,hood:61,how:[72,81,83],html:72,hug:82,ien:74,imag:[74,75],includ:[14,18,19,20,21],incred:78,index:73,indic:64,infer:85,inherit:[3,4,48],inlin:74,input:48,instal:[63,79],int8:82,int8cachecalibr:3,int8calibr:4,ir:59,jetson:63,jit:64,languag:82,layer:59,learn:82,lenet:82,level:[16,72,74,75],librari:[63,84],libtorchtrt:84,like:75,line:74,linear:54,link:[59,74],list:[42,43,44,45,75],liter:74,local:63,log:[18,22,23,24,25,26,27,28,39,42,67],logsoftmax:54,loop:54,lower:[54,56,58],macro:[19,43],make_int8_cache_calibr:29,make_int8_calibr:30,markup:74,mask:82,math:74,menu:[76,78],meta:74,miss:81,mlm:82,mod:73,model:[81,82,85],modul:[54,61,80],namespac:[18,19,20,21,38,39,40,41,50],nativ:63,native_op:59,nav:76,nest:[1,46],node:53,notebook:82,number:[74,75],nvidia:64,object:82,one:75,op:[57,81],oper:[61,65],optim:85,optimz:54,option:[72,73,75],other:60,overview:58,own:83,packag:[63,84],page:72,paragraph:[74,77],paramet:73,partit:[55,56,58],partitoninfo:55,pass:54,pattern:54,peephol:54,phase:[53,54,55,56,57,58],plugin:84,post:83,pre:63,precompil:63,prerequisit:63,program:[42,43,44,45,84],project:72,ptq:[20,29,30,40,44,68,83],python:[59,62,63,64,80,83],pytorch:[59,64,81,82,86],quantiz:[82,83],queri:85,quickstart:61,quot:74,rabbit:75,read:59,redund:54,refer:74,regist:61,relationship:[1,3,4,46,48],releas:63,remov:54,replac:74,resnet50:82,respons:60,result:57,right:63,rubric:74,runtim:[56,57,58,84],save:80,second:75,section:77,segmentedblock:55,serial:57,serv:[82,85],server:85,set:85,set_devic:35,set_is_colored_output_on:27,set_logging_prefix:28,set_reportable_log_level:25,setup:63,shape:82,shape_analysi:55,sidebar:74,so:84,sometim:59,sourc:63,ssd:82,start:64,step:85,sticki:76,str:5,struct:[46,47,48,49,50],structur:77,subdirectori:[13,14],submenu:76,submodul:69,subsect:77,subsubmenu:76,subsubsect:77,support:65,system:58,tabl:[72,73,74,75,76,77],tarbal:63,target:74,templat:[3,4,29,30],tensorformat:2,tensorrt:[50,57,59,61,62,63,64,81,82,84,85,86],test_py_modul:73,text:74,theme:[72,78],thi:[75,78],through:65,time:64,titl:74,toc:72,topic:74,torch:[50,59,61,62,64,81,82,84,85,86],torch_tensorrt:[15,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,45,66,67,68,69,70],torch_tensorrt_major_vers:7,torch_tensorrt_minor_vers:8,torch_tensorrt_patch_vers:6,torch_tensorrt_vers:12,torchscript:[31,32,33,37,41,61,64,80],torchtrt_api:9,torchtrt_hidden:11,torchtrtc:[52,61],tracer:81,train:[82,83],transform:82,triton:85,ts:70,tupl:54,tutori:64,type:[3,4,46,48],under:61,unpack:54,unrol:54,unsupport:61,up:85,us:[54,59,61,62,63,82,86],user:81,version:57,via:79,wai:74,weight:60,what:60,wide:72,work:[61,80],write:60,xstr:10,your:[83,85]}}) \ No newline at end of file +Search.setIndex({docnames:["_cpp_api/classtorch__tensorrt_1_1DataType","_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType","_cpp_api/classtorch__tensorrt_1_1TensorFormat","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502","_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268","_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e","_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827","_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da","_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59","_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883","_cpp_api/dir_cpp","_cpp_api/dir_cpp_include","_cpp_api/dir_cpp_include_torch_tensorrt","_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558","_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb","_cpp_api/file_cpp_include_torch_tensorrt_logging.h","_cpp_api/file_cpp_include_torch_tensorrt_macros.h","_cpp_api/file_cpp_include_torch_tensorrt_ptq.h","_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h","_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3","_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650","_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a","_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2","_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8","_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5","_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc","_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c","_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178","_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797","_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9","_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef","_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528","_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384","_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1","_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2","_cpp_api/namespace_torch_tensorrt","_cpp_api/namespace_torch_tensorrt__logging","_cpp_api/namespace_torch_tensorrt__ptq","_cpp_api/namespace_torch_tensorrt__torchscript","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h","_cpp_api/structtorch__tensorrt_1_1Device","_cpp_api/structtorch__tensorrt_1_1GraphInputs","_cpp_api/structtorch__tensorrt_1_1Input","_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec","_cpp_api/torch_tensort_cpp","_cpp_api/unabridged_orphan","cli/torchtrtc","contributors/conversion","contributors/lowering","contributors/partitioning","contributors/phases","contributors/runtime","contributors/system_overview","contributors/useful_links","contributors/writing_converters","getting_started/getting_started_with_cpp_api","getting_started/getting_started_with_python_api","getting_started/getting_started_with_windows","getting_started/installation","index","indices/supported_ops","py_api/fx","py_api/logging","py_api/ptq","py_api/torch_tensorrt","py_api/ts","src/pytorch-sphinx-theme/docs/changelog","src/pytorch-sphinx-theme/docs/configuring","src/pytorch-sphinx-theme/docs/demo/api","src/pytorch-sphinx-theme/docs/demo/demo","src/pytorch-sphinx-theme/docs/demo/lists_tables","src/pytorch-sphinx-theme/docs/demo/long","src/pytorch-sphinx-theme/docs/demo/structure","src/pytorch-sphinx-theme/docs/index","src/pytorch-sphinx-theme/docs/installing","tutorials/creating_torchscript_module_in_python","tutorials/getting_started_with_fx_path","tutorials/notebooks","tutorials/ptq","tutorials/runtime","tutorials/serving_torch_tensorrt_with_triton","tutorials/use_from_pytorch","tutorials/using_dla"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":5,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":3,"sphinx.domains.rst":2,"sphinx.domains.std":2,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":2,"sphinx.ext.viewcode":1,nbsphinx:4,sphinx:56},filenames:["_cpp_api/classtorch__tensorrt_1_1DataType.rst","_cpp_api/classtorch__tensorrt_1_1Device_1_1DeviceType.rst","_cpp_api/classtorch__tensorrt_1_1TensorFormat.rst","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.rst","_cpp_api/classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.rst","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.rst","_cpp_api/define_macros_8h_1a282fd3c0b1c3a215148ae372070e1268.rst","_cpp_api/define_macros_8h_1a31398a6d4d27e28817afb0f0139e909e.rst","_cpp_api/define_macros_8h_1a35703561b26b1a9d2738ad7d58b27827.rst","_cpp_api/define_macros_8h_1abd1465eb38256d3f22cc1426b23d516b.rst","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.rst","_cpp_api/define_macros_8h_1ad19939408f7be171a74a89928b36eb59.rst","_cpp_api/define_macros_8h_1adad592a7b1b7eed529cdf6acd584c883.rst","_cpp_api/dir_cpp.rst","_cpp_api/dir_cpp_include.rst","_cpp_api/dir_cpp_include_torch_tensorrt.rst","_cpp_api/enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.rst","_cpp_api/enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.rst","_cpp_api/file_cpp_include_torch_tensorrt_logging.h.rst","_cpp_api/file_cpp_include_torch_tensorrt_macros.h.rst","_cpp_api/file_cpp_include_torch_tensorrt_ptq.h.rst","_cpp_api/file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst","_cpp_api/function_logging_8h_1a0593f776f469c20469e2f729fc7861a3.rst","_cpp_api/function_logging_8h_1a0c012cb374addd90eb1f42eaec570650.rst","_cpp_api/function_logging_8h_1a56e110feaaba2c3fd44bd201fd21a76a.rst","_cpp_api/function_logging_8h_1a7cb50492421ea9de4e3db895819df6f2.rst","_cpp_api/function_logging_8h_1ac46ac0901cb97e3ae6e93b45f24e90b8.rst","_cpp_api/function_logging_8h_1ad2efd47b6c3689e58ccc595680579ae5.rst","_cpp_api/function_logging_8h_1af8f3443813315af7901903d25dd495cc.rst","_cpp_api/function_ptq_8h_1a226e3c83379d1012cde8578c1c86b16c.rst","_cpp_api/function_ptq_8h_1a6186e305f47c1d94b6130ef6c7f7e178.rst","_cpp_api/function_torch__tensorrt_8h_1a5b405fd3bf3c8fc2e2a54cbbab979797.rst","_cpp_api/function_torch__tensorrt_8h_1a6e19490a08fb1553c9dd347a5ae79db9.rst","_cpp_api/function_torch__tensorrt_8h_1a710df824a7718b440e4bc17bf9693cef.rst","_cpp_api/function_torch__tensorrt_8h_1ac4ab8313ae72c2c899ea31548b528528.rst","_cpp_api/function_torch__tensorrt_8h_1ad1acd06eaeaffbbcf6e7ebf426891384.rst","_cpp_api/function_torch__tensorrt_8h_1ad6a4ee8ca6c8f6e5519eb1128ec7f4a1.rst","_cpp_api/function_torch__tensorrt_8h_1ae8d56472106eeef37fbe51ff7f40c9b2.rst","_cpp_api/namespace_torch_tensorrt.rst","_cpp_api/namespace_torch_tensorrt__logging.rst","_cpp_api/namespace_torch_tensorrt__ptq.rst","_cpp_api/namespace_torch_tensorrt__torchscript.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_logging.h.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_macros.h.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_ptq.h.rst","_cpp_api/program_listing_file_cpp_include_torch_tensorrt_torch_tensorrt.h.rst","_cpp_api/structtorch__tensorrt_1_1Device.rst","_cpp_api/structtorch__tensorrt_1_1GraphInputs.rst","_cpp_api/structtorch__tensorrt_1_1Input.rst","_cpp_api/structtorch__tensorrt_1_1torchscript_1_1CompileSpec.rst","_cpp_api/torch_tensort_cpp.rst","_cpp_api/unabridged_orphan.rst","cli/torchtrtc.rst","contributors/conversion.rst","contributors/lowering.rst","contributors/partitioning.rst","contributors/phases.rst","contributors/runtime.rst","contributors/system_overview.rst","contributors/useful_links.rst","contributors/writing_converters.rst","getting_started/getting_started_with_cpp_api.rst","getting_started/getting_started_with_python_api.rst","getting_started/getting_started_with_windows.rst","getting_started/installation.rst","index.rst","indices/supported_ops.rst","py_api/fx.rst","py_api/logging.rst","py_api/ptq.rst","py_api/torch_tensorrt.rst","py_api/ts.rst","src/pytorch-sphinx-theme/docs/changelog.rst","src/pytorch-sphinx-theme/docs/configuring.rst","src/pytorch-sphinx-theme/docs/demo/api.rst","src/pytorch-sphinx-theme/docs/demo/demo.rst","src/pytorch-sphinx-theme/docs/demo/lists_tables.rst","src/pytorch-sphinx-theme/docs/demo/long.rst","src/pytorch-sphinx-theme/docs/demo/structure.rst","src/pytorch-sphinx-theme/docs/index.rst","src/pytorch-sphinx-theme/docs/installing.rst","tutorials/creating_torchscript_module_in_python.rst","tutorials/getting_started_with_fx_path.rst","tutorials/notebooks.rst","tutorials/ptq.rst","tutorials/runtime.rst","tutorials/serving_torch_tensorrt_with_triton.rst","tutorials/use_from_pytorch.rst","tutorials/using_dla.rst"],objects:{"":[[5,0,1,"c.STR","STR"],[9,0,1,"c.TORCHTRT_API","TORCHTRT_API"],[11,0,1,"c.TORCHTRT_HIDDEN","TORCHTRT_HIDDEN"],[7,0,1,"c.TORCH_TENSORRT_MAJOR_VERSION","TORCH_TENSORRT_MAJOR_VERSION"],[8,0,1,"c.TORCH_TENSORRT_MINOR_VERSION","TORCH_TENSORRT_MINOR_VERSION"],[6,0,1,"c.TORCH_TENSORRT_PATCH_VERSION","TORCH_TENSORRT_PATCH_VERSION"],[12,0,1,"c.TORCH_TENSORRT_VERSION","TORCH_TENSORRT_VERSION"],[10,0,1,"c.XSTR","XSTR"],[0,1,1,"_CPPv4N14torch_tensorrt8DataTypeE","torch_tensorrt::DataType"],[0,2,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeE5Value","torch_tensorrt::DataType::DataType"],[0,2,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeEN3c1010ScalarTypeE","torch_tensorrt::DataType::DataType"],[0,2,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeEv","torch_tensorrt::DataType::DataType"],[0,3,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeE5Value","torch_tensorrt::DataType::DataType::t"],[0,3,1,"_CPPv4N14torch_tensorrt8DataType8DataTypeEN3c1010ScalarTypeE","torch_tensorrt::DataType::DataType::t"],[0,4,1,"_CPPv4N14torch_tensorrt8DataType5ValueE","torch_tensorrt::DataType::Value"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kBoolE","torch_tensorrt::DataType::Value::kBool"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kCharE","torch_tensorrt::DataType::Value::kChar"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value6kFloatE","torch_tensorrt::DataType::Value::kFloat"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kHalfE","torch_tensorrt::DataType::Value::kHalf"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value4kIntE","torch_tensorrt::DataType::Value::kInt"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value8kUnknownE","torch_tensorrt::DataType::Value::kUnknown"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kBoolE","torch_tensorrt::DataType::kBool"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kCharE","torch_tensorrt::DataType::kChar"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value6kFloatE","torch_tensorrt::DataType::kFloat"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value5kHalfE","torch_tensorrt::DataType::kHalf"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value4kIntE","torch_tensorrt::DataType::kInt"],[0,5,1,"_CPPv4N14torch_tensorrt8DataType5Value8kUnknownE","torch_tensorrt::DataType::kUnknown"],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypecv5ValueEv","torch_tensorrt::DataType::operator Value"],[0,2,1,"_CPPv4N14torch_tensorrt8DataTypecvbEv","torch_tensorrt::DataType::operator bool"],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeneE8DataType","torch_tensorrt::DataType::operator!="],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeneEN8DataType5ValueE","torch_tensorrt::DataType::operator!="],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeneE8DataType","torch_tensorrt::DataType::operator!=::other"],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeneEN8DataType5ValueE","torch_tensorrt::DataType::operator!=::other"],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeeqE8DataType","torch_tensorrt::DataType::operator=="],[0,2,1,"_CPPv4NK14torch_tensorrt8DataTypeeqEN8DataType5ValueE","torch_tensorrt::DataType::operator=="],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeeqE8DataType","torch_tensorrt::DataType::operator==::other"],[0,3,1,"_CPPv4NK14torch_tensorrt8DataTypeeqEN8DataType5ValueE","torch_tensorrt::DataType::operator==::other"],[46,1,1,"_CPPv4N14torch_tensorrt6DeviceE","torch_tensorrt::Device"],[46,2,1,"_CPPv4N14torch_tensorrt6Device6DeviceEv","torch_tensorrt::Device::Device"],[1,1,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypeE","torch_tensorrt::Device::DeviceType"],[46,1,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypeE","torch_tensorrt::Device::DeviceType"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEv","torch_tensorrt::Device::DeviceType::DeviceType"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEv","torch_tensorrt::Device::DeviceType::DeviceType"],[1,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType::t"],[1,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType::t"],[46,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeE5Value","torch_tensorrt::Device::DeviceType::DeviceType::t"],[46,3,1,"_CPPv4N14torch_tensorrt6Device10DeviceType10DeviceTypeEN3c1010DeviceTypeE","torch_tensorrt::Device::DeviceType::DeviceType::t"],[1,4,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5ValueE","torch_tensorrt::Device::DeviceType::Value"],[46,4,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5ValueE","torch_tensorrt::Device::DeviceType::Value"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kDLAE","torch_tensorrt::Device::DeviceType::Value::kDLA"],[46,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kDLAE","torch_tensorrt::Device::DeviceType::Value::kDLA"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kGPUE","torch_tensorrt::Device::DeviceType::Value::kGPU"],[46,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kGPUE","torch_tensorrt::Device::DeviceType::Value::kGPU"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kDLAE","torch_tensorrt::Device::DeviceType::kDLA"],[1,5,1,"_CPPv4N14torch_tensorrt6Device10DeviceType5Value4kGPUE","torch_tensorrt::Device::DeviceType::kGPU"],[1,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypecv5ValueEv","torch_tensorrt::Device::DeviceType::operator Value"],[46,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypecv5ValueEv","torch_tensorrt::Device::DeviceType::operator Value"],[1,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypecvbEv","torch_tensorrt::Device::DeviceType::operator bool"],[46,2,1,"_CPPv4N14torch_tensorrt6Device10DeviceTypecvbEv","torch_tensorrt::Device::DeviceType::operator bool"],[1,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!="],[46,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!="],[1,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!=::other"],[46,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeneE10DeviceType","torch_tensorrt::Device::DeviceType::operator!=::other"],[1,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator=="],[46,2,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator=="],[1,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator==::other"],[46,3,1,"_CPPv4NK14torch_tensorrt6Device10DeviceTypeeqE10DeviceType","torch_tensorrt::Device::DeviceType::operator==::other"],[46,6,1,"_CPPv4N14torch_tensorrt6Device18allow_gpu_fallbackE","torch_tensorrt::Device::allow_gpu_fallback"],[46,6,1,"_CPPv4N14torch_tensorrt6Device11device_typeE","torch_tensorrt::Device::device_type"],[46,6,1,"_CPPv4N14torch_tensorrt6Device8dla_coreE","torch_tensorrt::Device::dla_core"],[46,6,1,"_CPPv4N14torch_tensorrt6Device6gpu_idE","torch_tensorrt::Device::gpu_id"],[17,4,1,"_CPPv4N14torch_tensorrt16EngineCapabilityE","torch_tensorrt::EngineCapability"],[17,5,1,"_CPPv4N14torch_tensorrt16EngineCapability15kDLA_STANDALONEE","torch_tensorrt::EngineCapability::kDLA_STANDALONE"],[17,5,1,"_CPPv4N14torch_tensorrt16EngineCapability7kSAFETYE","torch_tensorrt::EngineCapability::kSAFETY"],[17,5,1,"_CPPv4N14torch_tensorrt16EngineCapability9kSTANDARDE","torch_tensorrt::EngineCapability::kSTANDARD"],[47,1,1,"_CPPv4N14torch_tensorrt11GraphInputsE","torch_tensorrt::GraphInputs"],[47,6,1,"_CPPv4N14torch_tensorrt11GraphInputs15input_signatureE","torch_tensorrt::GraphInputs::input_signature"],[47,6,1,"_CPPv4N14torch_tensorrt11GraphInputs6inputsE","torch_tensorrt::GraphInputs::inputs"],[48,1,1,"_CPPv4N14torch_tensorrt5InputE","torch_tensorrt::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN2at6TensorE","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input"],[48,2,1,"_CPPv4N14torch_tensorrt5Input5InputEv","torch_tensorrt::Input::Input"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::dtype"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::format"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::max_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::min_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::opt_shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN3c108ArrayRefI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputENSt6vectorI7int64_tEE8DataType12TensorFormat","torch_tensorrt::Input::Input::shape"],[48,3,1,"_CPPv4N14torch_tensorrt5Input5InputEN2at6TensorE","torch_tensorrt::Input::Input::tensor"],[48,6,1,"_CPPv4N14torch_tensorrt5Input5dtypeE","torch_tensorrt::Input::dtype"],[48,6,1,"_CPPv4N14torch_tensorrt5Input6formatE","torch_tensorrt::Input::format"],[48,6,1,"_CPPv4N14torch_tensorrt5Input9max_shapeE","torch_tensorrt::Input::max_shape"],[48,6,1,"_CPPv4N14torch_tensorrt5Input9min_shapeE","torch_tensorrt::Input::min_shape"],[48,6,1,"_CPPv4N14torch_tensorrt5Input9opt_shapeE","torch_tensorrt::Input::opt_shape"],[48,6,1,"_CPPv4N14torch_tensorrt5Input5shapeE","torch_tensorrt::Input::shape"],[2,1,1,"_CPPv4N14torch_tensorrt12TensorFormatE","torch_tensorrt::TensorFormat"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatE5Value","torch_tensorrt::TensorFormat::TensorFormat"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatEN2at12MemoryFormatE","torch_tensorrt::TensorFormat::TensorFormat"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatEv","torch_tensorrt::TensorFormat::TensorFormat"],[2,3,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatE5Value","torch_tensorrt::TensorFormat::TensorFormat::t"],[2,3,1,"_CPPv4N14torch_tensorrt12TensorFormat12TensorFormatEN2at12MemoryFormatE","torch_tensorrt::TensorFormat::TensorFormat::t"],[2,4,1,"_CPPv4N14torch_tensorrt12TensorFormat5ValueE","torch_tensorrt::TensorFormat::Value"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value13kChannelsLastE","torch_tensorrt::TensorFormat::Value::kChannelsLast"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value11kContiguousE","torch_tensorrt::TensorFormat::Value::kContiguous"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value8kUnknownE","torch_tensorrt::TensorFormat::Value::kUnknown"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value13kChannelsLastE","torch_tensorrt::TensorFormat::kChannelsLast"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value11kContiguousE","torch_tensorrt::TensorFormat::kContiguous"],[2,5,1,"_CPPv4N14torch_tensorrt12TensorFormat5Value8kUnknownE","torch_tensorrt::TensorFormat::kUnknown"],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormatcv5ValueEv","torch_tensorrt::TensorFormat::operator Value"],[2,2,1,"_CPPv4N14torch_tensorrt12TensorFormatcvbEv","torch_tensorrt::TensorFormat::operator bool"],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormatneE12TensorFormat","torch_tensorrt::TensorFormat::operator!="],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormatneEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator!="],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormatneE12TensorFormat","torch_tensorrt::TensorFormat::operator!=::other"],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormatneEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator!=::other"],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormateqE12TensorFormat","torch_tensorrt::TensorFormat::operator=="],[2,2,1,"_CPPv4NK14torch_tensorrt12TensorFormateqEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator=="],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormateqE12TensorFormat","torch_tensorrt::TensorFormat::operator==::other"],[2,3,1,"_CPPv4NK14torch_tensorrt12TensorFormateqEN12TensorFormat5ValueE","torch_tensorrt::TensorFormat::operator==::other"],[36,2,1,"_CPPv4N14torch_tensorrt15dump_build_infoEv","torch_tensorrt::dump_build_info"],[34,2,1,"_CPPv4N14torch_tensorrt14get_build_infoEv","torch_tensorrt::get_build_info"],[16,4,1,"_CPPv4N14torch_tensorrt7logging5LevelE","torch_tensorrt::logging::Level"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kDEBUGE","torch_tensorrt::logging::Level::kDEBUG"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kERRORE","torch_tensorrt::logging::Level::kERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kGRAPHE","torch_tensorrt::logging::Level::kGRAPH"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level5kINFOE","torch_tensorrt::logging::Level::kINFO"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level15kINTERNAL_ERRORE","torch_tensorrt::logging::Level::kINTERNAL_ERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level8kWARNINGE","torch_tensorrt::logging::Level::kWARNING"],[24,2,1,"_CPPv4N14torch_tensorrt7logging24get_is_colored_output_onEv","torch_tensorrt::logging::get_is_colored_output_on"],[22,2,1,"_CPPv4N14torch_tensorrt7logging18get_logging_prefixEv","torch_tensorrt::logging::get_logging_prefix"],[23,2,1,"_CPPv4N14torch_tensorrt7logging24get_reportable_log_levelEv","torch_tensorrt::logging::get_reportable_log_level"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kDEBUGE","torch_tensorrt::logging::kDEBUG"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kERRORE","torch_tensorrt::logging::kERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level6kGRAPHE","torch_tensorrt::logging::kGRAPH"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level5kINFOE","torch_tensorrt::logging::kINFO"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level15kINTERNAL_ERRORE","torch_tensorrt::logging::kINTERNAL_ERROR"],[16,5,1,"_CPPv4N14torch_tensorrt7logging5Level8kWARNINGE","torch_tensorrt::logging::kWARNING"],[26,2,1,"_CPPv4N14torch_tensorrt7logging3logE5LevelNSt6stringE","torch_tensorrt::logging::log"],[26,3,1,"_CPPv4N14torch_tensorrt7logging3logE5LevelNSt6stringE","torch_tensorrt::logging::log::lvl"],[26,3,1,"_CPPv4N14torch_tensorrt7logging3logE5LevelNSt6stringE","torch_tensorrt::logging::log::msg"],[27,2,1,"_CPPv4N14torch_tensorrt7logging24set_is_colored_output_onEb","torch_tensorrt::logging::set_is_colored_output_on"],[27,3,1,"_CPPv4N14torch_tensorrt7logging24set_is_colored_output_onEb","torch_tensorrt::logging::set_is_colored_output_on::colored_output_on"],[28,2,1,"_CPPv4N14torch_tensorrt7logging18set_logging_prefixENSt6stringE","torch_tensorrt::logging::set_logging_prefix"],[28,3,1,"_CPPv4N14torch_tensorrt7logging18set_logging_prefixENSt6stringE","torch_tensorrt::logging::set_logging_prefix::prefix"],[25,2,1,"_CPPv4N14torch_tensorrt7logging24set_reportable_log_levelE5Level","torch_tensorrt::logging::set_reportable_log_level"],[25,3,1,"_CPPv4N14torch_tensorrt7logging24set_reportable_log_levelE5Level","torch_tensorrt::logging::set_reportable_log_level::lvl"],[3,1,1,"_CPPv4I0EN14torch_tensorrt3ptq19Int8CacheCalibratorE","torch_tensorrt::ptq::Int8CacheCalibrator"],[3,7,1,"_CPPv4I0EN14torch_tensorrt3ptq19Int8CacheCalibratorE","torch_tensorrt::ptq::Int8CacheCalibrator::Algorithm"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE","torch_tensorrt::ptq::Int8CacheCalibrator::Int8CacheCalibrator"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE","torch_tensorrt::ptq::Int8CacheCalibrator::Int8CacheCalibrator::cache_file_path"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch::bindings"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch::names"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8CacheCalibrator::getBatch::nbBindings"],[3,2,1,"_CPPv4NK14torch_tensorrt3ptq19Int8CacheCalibrator12getBatchSizeEv","torch_tensorrt::ptq::Int8CacheCalibrator::getBatchSize"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibratorcvPN8nvinfer115IInt8CalibratorEEv","torch_tensorrt::ptq::Int8CacheCalibrator::operator nvinfer1::IInt8Calibrator*"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::readCalibrationCache"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::readCalibrationCache::length"],[3,2,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::writeCalibrationCache"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::writeCalibrationCache::cache"],[3,3,1,"_CPPv4N14torch_tensorrt3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8CacheCalibrator::writeCalibrationCache::length"],[4,1,1,"_CPPv4I00EN14torch_tensorrt3ptq14Int8CalibratorE","torch_tensorrt::ptq::Int8Calibrator"],[4,7,1,"_CPPv4I00EN14torch_tensorrt3ptq14Int8CalibratorE","torch_tensorrt::ptq::Int8Calibrator::Algorithm"],[4,7,1,"_CPPv4I00EN14torch_tensorrt3ptq14Int8CalibratorE","torch_tensorrt::ptq::Int8Calibrator::DataLoaderUniquePtr"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator::cache_file_path"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator::dataloader"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb","torch_tensorrt::ptq::Int8Calibrator::Int8Calibrator::use_cache"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch::bindings"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch::names"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator8getBatchEA_PvA_PKci","torch_tensorrt::ptq::Int8Calibrator::getBatch::nbBindings"],[4,2,1,"_CPPv4NK14torch_tensorrt3ptq14Int8Calibrator12getBatchSizeEv","torch_tensorrt::ptq::Int8Calibrator::getBatchSize"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8CalibratorcvPN8nvinfer115IInt8CalibratorEEv","torch_tensorrt::ptq::Int8Calibrator::operator nvinfer1::IInt8Calibrator*"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8Calibrator::readCalibrationCache"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator20readCalibrationCacheER6size_t","torch_tensorrt::ptq::Int8Calibrator::readCalibrationCache::length"],[4,2,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8Calibrator::writeCalibrationCache"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8Calibrator::writeCalibrationCache::cache"],[4,3,1,"_CPPv4N14torch_tensorrt3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t","torch_tensorrt::ptq::Int8Calibrator::writeCalibrationCache::length"],[29,2,1,"_CPPv4I0EN14torch_tensorrt3ptq26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE","torch_tensorrt::ptq::make_int8_cache_calibrator"],[29,7,1,"_CPPv4I0EN14torch_tensorrt3ptq26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE","torch_tensorrt::ptq::make_int8_cache_calibrator::Algorithm"],[29,3,1,"_CPPv4I0EN14torch_tensorrt3ptq26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE","torch_tensorrt::ptq::make_int8_cache_calibrator::cache_file_path"],[30,2,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator"],[30,7,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::Algorithm"],[30,7,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::DataLoader"],[30,3,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::cache_file_path"],[30,3,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::dataloader"],[30,3,1,"_CPPv4I00EN14torch_tensorrt3ptq20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb","torch_tensorrt::ptq::make_int8_calibrator::use_cache"],[35,2,1,"_CPPv4N14torch_tensorrt10set_deviceEKi","torch_tensorrt::set_device"],[35,3,1,"_CPPv4N14torch_tensorrt10set_deviceEKi","torch_tensorrt::set_device::gpu_id"],[49,1,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpecE","torch_tensorrt::torchscript::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecEN5torch3jit6IValueE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorI5InputEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorIN3c108ArrayRefI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,2,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorINSt6vectorI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorIN3c108ArrayRefI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::fixed_sizes"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorINSt6vectorI7int64_tEEEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::fixed_sizes"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecEN5torch3jit6IValueE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::input_signature"],[49,3,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec11CompileSpecENSt6vectorI5InputEE","torch_tensorrt::torchscript::CompileSpec::CompileSpec::inputs"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec10capabilityE","torch_tensorrt::torchscript::CompileSpec::capability"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec5debugE","torch_tensorrt::torchscript::CompileSpec::debug"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec6deviceE","torch_tensorrt::torchscript::CompileSpec::device"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec12disable_tf32E","torch_tensorrt::torchscript::CompileSpec::disable_tf32"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec20dla_global_dram_sizeE","torch_tensorrt::torchscript::CompileSpec::dla_global_dram_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec19dla_local_dram_sizeE","torch_tensorrt::torchscript::CompileSpec::dla_local_dram_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec13dla_sram_sizeE","torch_tensorrt::torchscript::CompileSpec::dla_sram_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec18enabled_precisionsE","torch_tensorrt::torchscript::CompileSpec::enabled_precisions"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec12graph_inputsE","torch_tensorrt::torchscript::CompileSpec::graph_inputs"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14min_block_sizeE","torch_tensorrt::torchscript::CompileSpec::min_block_size"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec20num_avg_timing_itersE","torch_tensorrt::torchscript::CompileSpec::num_avg_timing_iters"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14ptq_calibratorE","torch_tensorrt::torchscript::CompileSpec::ptq_calibrator"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec5refitE","torch_tensorrt::torchscript::CompileSpec::refit"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec24require_full_compilationE","torch_tensorrt::torchscript::CompileSpec::require_full_compilation"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14sparse_weightsE","torch_tensorrt::torchscript::CompileSpec::sparse_weights"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec22torch_executed_modulesE","torch_tensorrt::torchscript::CompileSpec::torch_executed_modules"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec18torch_executed_opsE","torch_tensorrt::torchscript::CompileSpec::torch_executed_ops"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec24truncate_long_and_doubleE","torch_tensorrt::torchscript::CompileSpec::truncate_long_and_double"],[49,6,1,"_CPPv4N14torch_tensorrt11torchscript11CompileSpec14workspace_sizeE","torch_tensorrt::torchscript::CompileSpec::workspace_size"],[31,2,1,"_CPPv4N14torch_tensorrt11torchscript29check_method_operator_supportERKN5torch3jit6ModuleENSt6stringE","torch_tensorrt::torchscript::check_method_operator_support"],[31,3,1,"_CPPv4N14torch_tensorrt11torchscript29check_method_operator_supportERKN5torch3jit6ModuleENSt6stringE","torch_tensorrt::torchscript::check_method_operator_support::method_name"],[31,3,1,"_CPPv4N14torch_tensorrt11torchscript29check_method_operator_supportERKN5torch3jit6ModuleENSt6stringE","torch_tensorrt::torchscript::check_method_operator_support::module"],[32,2,1,"_CPPv4N14torch_tensorrt11torchscript7compileERKN5torch3jit6ModuleE11CompileSpec","torch_tensorrt::torchscript::compile"],[32,3,1,"_CPPv4N14torch_tensorrt11torchscript7compileERKN5torch3jit6ModuleE11CompileSpec","torch_tensorrt::torchscript::compile::info"],[32,3,1,"_CPPv4N14torch_tensorrt11torchscript7compileERKN5torch3jit6ModuleE11CompileSpec","torch_tensorrt::torchscript::compile::module"],[37,2,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine"],[37,3,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine::info"],[37,3,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine::method_name"],[37,3,1,"_CPPv4N14torch_tensorrt11torchscript28convert_method_to_trt_engineERKN5torch3jit6ModuleENSt6stringE11CompileSpec","torch_tensorrt::torchscript::convert_method_to_trt_engine::module"],[33,2,1,"_CPPv4N14torch_tensorrt11torchscript26embed_engine_in_new_moduleERKNSt6stringE6Device","torch_tensorrt::torchscript::embed_engine_in_new_module"],[33,3,1,"_CPPv4N14torch_tensorrt11torchscript26embed_engine_in_new_moduleERKNSt6stringE6Device","torch_tensorrt::torchscript::embed_engine_in_new_module::device"],[33,3,1,"_CPPv4N14torch_tensorrt11torchscript26embed_engine_in_new_moduleERKNSt6stringE6Device","torch_tensorrt::torchscript::embed_engine_in_new_module::engine"],[70,8,0,"-","torch_tensorrt"]],"torch_tensorrt.Device":[[70,10,1,"","__init__"],[70,11,1,"","allow_gpu_fallback"],[70,11,1,"","device_type"],[70,11,1,"","dla_core"],[70,11,1,"","gpu_id"]],"torch_tensorrt.Input":[[70,10,1,"","__init__"],[70,11,1,"","dtype"],[70,11,1,"","format"],[70,11,1,"","shape"],[70,11,1,"","shape_mode"]],"torch_tensorrt.fx":[[67,9,1,"","InputTensorSpec"],[67,9,1,"","TRTInterpreter"],[67,9,1,"","TRTInterpreterResult"],[67,9,1,"","TRTModule"]],"torch_tensorrt.logging":[[68,9,1,"","Level"],[68,9,1,"","debug"],[68,9,1,"","errors"],[68,12,1,"","get_is_colored_output_on"],[68,12,1,"","get_logging_prefix"],[68,12,1,"","get_reportable_log_level"],[68,9,1,"","graphs"],[68,9,1,"","info"],[68,9,1,"","internal_errors"],[68,12,1,"","log"],[68,12,1,"","set_is_colored_output_on"],[68,12,1,"","set_logging_prefix"],[68,12,1,"","set_reportable_log_level"],[68,9,1,"","warnings"]],"torch_tensorrt.logging.Level":[[68,11,1,"","Debug"],[68,11,1,"","Error"],[68,11,1,"","Graph"],[68,11,1,"","Info"],[68,11,1,"","InternalError"],[68,11,1,"","Warning"]],"torch_tensorrt.ptq":[[69,9,1,"id1","CacheCalibrator"],[69,9,1,"id2","CalibrationAlgo"],[69,9,1,"id0","DataLoaderCalibrator"],[69,12,1,"","get_batch"],[69,12,1,"","get_batch_size"],[69,12,1,"","get_cache_mode_batch"],[69,12,1,"","read_calibration_cache"],[69,12,1,"","write_calibration_cache"]],"torch_tensorrt.ptq.CacheCalibrator":[[69,10,1,"","__init__"]],"torch_tensorrt.ptq.CalibrationAlgo":[[69,11,1,"","ENTROPY_CALIBRATION"],[69,11,1,"","ENTROPY_CALIBRATION_2"],[69,11,1,"","LEGACY_CALIBRATION"],[69,11,1,"","MINMAX_CALIBRATION"]],"torch_tensorrt.ptq.DataLoaderCalibrator":[[69,10,1,"","__init__"]],"torch_tensorrt.ts":[[71,12,1,"","TensorRTCompileSpec"],[71,12,1,"","check_method_op_support"],[71,12,1,"","compile"],[71,12,1,"","convert_method_to_trt_engine"],[71,12,1,"","embed_engine_in_new_module"]],torch_tensorrt:[[70,9,1,"","Device"],[70,9,1,"","DeviceType"],[70,9,1,"","EngineCapability"],[70,9,1,"","Input"],[70,9,1,"","TensorFormat"],[70,12,1,"","compile"],[70,12,1,"","convert_method_to_trt_engine"],[70,9,1,"","dtype"],[70,12,1,"","dump_build_info"],[67,8,0,"-","fx"],[70,12,1,"","get_build_info"],[68,8,0,"-","logging"],[69,8,0,"-","ptq"],[70,12,1,"","set_device"],[71,8,0,"-","ts"]]},objnames:{"0":["c","macro","C macro"],"1":["cpp","class","C++ class"],"10":["py","method","Python method"],"11":["py","attribute","Python attribute"],"12":["py","function","Python function"],"2":["cpp","function","C++ function"],"3":["cpp","functionParam","C++ function parameter"],"4":["cpp","enum","C++ enum"],"5":["cpp","enumerator","C++ enumerator"],"6":["cpp","member","C++ member"],"7":["cpp","templateParam","C++ template parameter"],"8":["py","module","Python module"],"9":["py","class","Python class"]},objtypes:{"0":"c:macro","1":"cpp:class","10":"py:method","11":"py:attribute","12":"py:function","2":"cpp:function","3":"cpp:functionParam","4":"cpp:enum","5":"cpp:enumerator","6":"cpp:member","7":"cpp:templateParam","8":"py:module","9":"py:class"},terms:{"0":[33,43,44,45,49,52,58,60,61,64,66,68,69,70,71,72,74,75,82,84,86,87,88],"0000":76,"01":[61,66,76],"0208":61,"03":76,"0358":61,"0383":61,"04":[61,86],"0435":61,"0464":61,"0530":61,"0678":61,"0805":61,"0818":61,"0932":61,"096fd41":75,"0a0":75,"0x7f9bda9065f0":71,"1":[3,4,33,43,44,45,48,49,52,54,55,57,60,61,62,64,66,67,68,69,70,71,72,73,75,76,79,81,82,83,84,87,88],"10":[49,61,64,71,79,81,83,84,86],"100":[67,82],"1000":86,"1012":54,"1013":54,"1024":[52,70,71,83],"1045":61,"1048576":[45,49,71],"1056":61,"1063":61,"1073741824":[45,49,71],"109":61,"11":[54,61,64,75,79,86],"119":81,"12":[54,61,75,79,81,86],"120":[61,81],"123":76,"129":81,"13":[75,79],"136":86,"137":81,"138":81,"14":[79,86],"1409":84,"15":[75,79],"1502":61,"1549":61,"1556":84,"16":[61,62,70,79,81],"1691":61,"17":79,"18":[61,79],"19":[76,79],"1994":84,"1d":54,"1e":52,"2":[33,43,55,60,61,64,66,68,69,70,71,73,75,76,79,81,82,84],"20":79,"2009":84,"2010":84,"2012":76,"2014":84,"2020":[61,65],"2023":84,"22":86,"224":[55,67,70,71,83,86],"225":[67,86],"229":86,"23":[49,54,71,76],"234375":86,"24":54,"244":[70,71],"248":54,"249":54,"25":[61,67,82],"256":86,"258":75,"27":61,"28":61,"2802":61,"2822":75,"287":75,"29":61,"2c3":76,"3":[45,49,52,54,55,57,61,64,66,68,69,70,71,75,76,79,81,82,83,84,87,88],"300":[52,87],"31":61,"32":[52,61,62,70,81,84,88],"320":84,"32bit":52,"33":61,"346":61,"35":61,"36":61,"3677":54,"37":61,"38":81,"39":81,"3d":82,"4":[57,61,64,66,68,73,75,76,79,82],"406":86,"429688":86,"4465":84,"456":86,"468750":86,"4822":84,"485":86,"4914":84,"5":[52,57,58,61,64,68,70,75,76,79,81,82,86],"50":83,"512":[52,70,71,83],"523438":86,"53":76,"536870912":[45,49,71],"539":61,"56":61,"576":61,"6":[54,57,61,64,66,79,81],"622":54,"64":[62,82],"64bit":52,"664062":86,"7":[57,58,61,79],"72048":64,"7302":76,"8":[3,52,54,61,64,70,75,76,79,86],"8000":86,"8001":86,"8002":86,"84":[61,81],"9":[61,79,86],"90":86,"92":86,"9223372036854775807":66,"abstract":[57,60,76],"boolean":[70,82],"break":[75,82],"byte":[70,71,83],"case":[0,1,2,46,49,53,57,60,64,82,84,85],"catch":[54,61],"char":[3,4,44,52,61],"class":[17,29,30,44,45,46,51,57,60,61,62,68,75,76,81,82,83,84],"const":[0,1,2,3,4,29,30,31,32,33,35,37,44,45,46,54,60,61,66,84],"default":[0,1,2,3,4,16,29,30,43,45,46,48,49,52,55,61,62,64,67,70,71,73,74,75,82,84,87],"do":[53,54,55,60,61,62,74,76,81,82,84,88],"enum":[0,1,2,42,45,46,51,68,71,84],"export":64,"final":[53,56,58,64,83],"float":[49,52,61,62,66,70,81,84,87],"function":[0,1,2,3,4,46,48,49,51,54,55,57,60,61,64,81,82,83,84,86,87,88],"import":[52,54,55,61,62,64,73,75,81,82,85,86,87],"int":[0,3,4,35,44,45,49,52,61,66,67,70,71,73],"long":[49,52,53,75,76],"new":[0,1,2,3,4,32,33,46,48,49,57,58,60,61,68,71,75,82,86],"public":[0,1,2,3,4,44,45,46,47,48,49,76,84],"return":[0,1,2,3,4,23,24,29,30,31,32,33,34,37,42,43,44,45,46,54,56,57,58,60,61,62,68,70,71,81,82,84,86],"short":[54,75,76],"static":[48,49,53,60,61,70,71,73],"super":[44,81],"throw":[52,54,61],"true":[0,1,2,4,46,49,54,55,60,61,66,67,70,71,73,76,82,84,86,87,88],"try":[58,61,75,76,87],"var":66,"void":[3,4,25,26,27,28,35,36,42,44,45],"while":[64,83,84,86],A:[4,29,30,32,33,47,48,54,55,60,64,71,76,84,86],And:61,As:[61,82],At:74,But:[61,75],By:[29,30,51,55,73,81],For:[53,55,61,64,67,73,75,76,81,82,83,84,85,86,87],If:[27,53,54,61,62,64,67,68,70,73,75,82,84,85,86,88],In:[0,1,2,46,53,56,57,58,60,62,64,65,75,76,78,82,83,84,85,86],Is:[24,70],It:[52,54,55,56,58,60,64,73,75,82,83],Its:[60,75],Not:3,On:55,One:[61,75,76,82,83],Or:75,THE:75,TO:61,That:75,Thats:61,The:[1,46,48,49,52,53,54,55,56,57,58,60,62,64,68,70,71,73,76,81,82,83,84,86,87],Then:[55,64,84,87],There:[4,53,58,60,64,76,81,82,83,84,85,86],These:[53,57,73,75,84,86],To:[1,46,52,55,61,62,64,73,81,86,87],Will:31,With:[61,73,75,84,86],_:[75,82],___torch_mangle_10:81,___torch_mangle_4847:57,___torch_mangle_5:81,___torch_mangle_9:81,__and__:66,__attribute__:43,__getitem__:66,__gnuc__:43,__init__:[69,70,75,81],__is__:66,__isnot__:66,__not__:66,__or__:66,__range_length:66,__round_to_zero_floordiv:66,__torch__:[57,61,81],__torch___pytorch_detection_ssd_src_model_ssd300_trt_engin:57,__torch___torchvision_models_resnet____torch_mangle_4847_resnet_trt_engin:57,__visibility__:43,__xor__:66,_all_:54,_c:[71,87],_convolut:[61,66],_jit_to_backend:87,_script:71,_shapemod:70,_theme:80,_validate_not_a_forked_repo:86,a1b:76,aarch64:58,ab:66,abi:85,abl:[53,54,60,65,82,84,87],about:[52,53,57,60,61,64,70,73,86],abov:[25,61,64,68,74,75,82],absolut:52,ac:78,acc_mod:82,acc_norm:82,acc_op:82,acc_op_convert:82,acc_ops_sigmoid:82,acc_trac:82,acceler:88,accept:[48,52,57,60,61,62,70],access:[54,60,61,65,73,82,87],accord:[60,71],accordingli:[73,82],account:86,accumsan:78,accumul:[49,71],accuraci:[83,84],achiev:83,aco:66,acosh:66,acoust:83,acquir:61,across:[49,52,54,71,73],acthardtanh:60,action:[75,82],activ:[61,71,75,82,83,84,88],activationtyp:[60,82],actual:[54,57,60,61,68,81,82],ad:[25,52,53,82],adaptive_avg_pool1d:66,adaptive_avg_pool2d:66,adaptive_avg_pool3d:66,adaptive_max_pool1d:66,adaptive_max_pool2d:66,adaptive_max_pool3d:66,add:[26,53,54,55,60,61,62,64,66,68,73,75,80],add_:[54,61,66],add_activ:82,addactiv:60,addit:[54,61,70,82,83],addlay:61,address:76,addshuffl:61,adipisc:[76,78],adjac:75,adjust:75,adopt:83,advanc:[76,84],advis:75,aenean:78,afford:82,aforement:86,after:[52,53,54,55,61,62,63,65,81,82,85,86],again:[44,57,60,75],against:[52,61],agx:45,ahead:61,aim:54,algo_typ:[69,84],algorithm:[3,4,29,30,44,69,82,84],algorithm_selector:82,alias:43,align:75,align_corn:66,aliquam:78,aliquet:[76,78],all:[16,42,43,44,45,49,52,54,55,57,61,62,63,64,68,70,75,76,81,82,83,84,85,86],alloc:60,allow:[48,49,52,53,54,70,73,82],allow_gpu_fallback:[45,46,70,71,84,87,88],allow_tf32:66,almost:61,alpha:[66,76,82],alreadi:[52,53,54,61,84],also:[29,53,60,61,62,64,65,73,75,76,83,84],altern:[48,83],although:75,altogeth:[55,73],alwai:[3,4,27,52,75],amet:[76,78],an:[2,3,4,48,49,52,53,54,55,56,57,58,60,61,62,63,64,65,69,70,71,73,75,76,81,82,83,84,85,86],analogu:60,analysi:55,analyt:73,analytics_id:73,ancient:75,ani:[48,52,53,60,61,62,64,70,73,75,82,84],ann:75,annot:[60,61],anonym:75,anoth:[62,75,76,81],ant:78,anyon:76,anyth:[75,76,85],aot:[61,65],api:[55,58,60,61,62,70,71,74,82,83,84,85,86,87],appear:75,append:66,appli:84,applic:[1,29,46,52,54,58,61,62,85,87,88],apr:61,ar:[42,46,49,52,53,54,55,57,58,60,61,64,65,70,71,73,75,76,77,81,82,83,84,85,86,87],arab:76,arang:66,architectur:[64,65,83],archiv:64,arcu:[76,78],area:77,aren:61,arg:[53,61,69,70,79,82,83],arg_replacement_tupl:82,argc:61,argument:[48,52,54,57,60,61,70,71,75,76,82],argv:61,around:[54,57,60,75,78,81],arrai:[3,4,33,53,71],arrayref:[45,48,49],artifact:63,arxiv:84,as_numpi:86,asin:66,asinh:66,aspect:52,assembl:[53,61],assign:[3,4,74],associ:[53,60,61],associatevalueandivalu:60,associatevalueandtensor:[60,61],assum:87,atan:66,atanh:66,aten:[49,54,55,59,60,61,66,71],atol:52,attrdict:86,attribut:[54,55,57,61,75,82],auctor:78,audio:83,augu:78,author:76,auto:[44,55,60,61,75,76,84,88],autodoc:[75,76],automat:[61,75],avail:[52,60,64,73,82,88],averag:[49,52,71],avg:52,avg_pool1d:66,avg_pool2d:66,avg_pool3d:66,awai:75,awaken:75,axi:66,b0:83,b:[63,64,66,76,86],b_hh:66,b_ih:66,back:[54,55,57,58,61,70,75,81],back_insert:44,backend:[71,74,87],background:[75,81],backlink:75,backward:82,bar:[73,75],base:[36,50,57,64,68,69,70,75,81,83,84],bash:64,basi:75,basic:[52,76,82,86],batch:[3,4,44,67,82,84,86,88],batch_norm:[60,66],batch_siz:[44,84],batched_data_:44,batchnorm:54,batchtyp:44,bathroom:75,bazel:[58,64],bazel_vers:64,bazelbuild:64,bazelisk:64,bazelvers:64,bdist_wheel:64,beat:76,becaus:[60,61,64,67,81,82],becom:60,bee:75,been:[53,60,61,76],befor:[49,54,58,60,61,64,65,71,82,86],beforehand:61,begin:[44,64,75,82],beginn:81,begun:75,behav:77,behavior:[49,70,71,82],behind:75,being:[61,82],belong:75,below:[60,61,64,75,82,86],benchmark:66,benefit:[60,61],besid:75,best:[64,75,82],beta:[66,71,82],better:[81,83],between:[54,60,64,75,76,84],bia:[54,61,66],bibendum:78,bibliograph:76,bigger:75,bin:64,binari:[44,84],binary_data:86,bind:[3,4,33,44,71,75],bird:86,bit:[49,60,61,70,71,82],bitbucket:73,bitbucket_url:73,blandit:78,blank:75,blob:[59,73,84],block0:54,block1:54,block:[52,53,54,79],blue:75,bmm:66,bodi:[75,76],bold:75,bool:[0,1,2,3,4,24,27,30,31,42,44,45,46,49,54,60,61,66,67,68,70,71,73,84],border:75,both:[64,73,75,81,84],bottom:73,box:75,bracket:75,branch:64,bread:75,brief:55,briefli:81,brontosaurus:75,browser:75,bsd:[42,43,44,45],buffer:[3,4,82],bug:64,bui:76,build:[29,30,34,49,52,53,56,58,60,61,70,74,79,82,84],build_fil:64,build_model:82,buildcommandarg:63,builder:82,builderconfig:45,buildroot:63,built:[33,52,57,58,64,71],builtin:82,button:[73,75],bytearrai:82,c10:[0,1,45,46,48,49,61,84],c:[42,43,44,45,52,58,62,66,76,85,86,88],c_api:59,c_str:[60,61],cach:[3,4,29,30,44,52,61,69,82,84],cache_:44,cache_fil:[44,69,84],cache_file_path:[3,4,29,30,44],cache_file_path_:44,cache_size_:44,cachecalibr:[69,84],cackl:76,calcul:[48,53,55,61],calibr:[3,4,29,30,44,49,52,61,69,71,84],calibration_cache_fil:[29,30,84],calibration_dataload:[30,84],calibration_dataset:84,calibrationalgo:[69,84],call:[29,30,32,49,54,57,60,61,71,75,81,82,83,87],call_funct:82,callmethod:81,can:[0,1,4,29,30,37,46,47,48,49,52,53,54,55,56,57,58,60,61,62,64,70,71,73,75,81,82,83,84,85,86,87],canada:76,cannot:[48,54,55,64,70,71,74,81,82],canon:73,canonical_url:73,capabl:[17,45,49,52,57,70,71,87],capit:75,caption:[75,78],cast:[3,4,54],cat:[64,66],caught:54,caus:[60,64,73],cd:[64,86],cdll:61,ceil:66,ceil_mod:66,cell:76,centercrop:86,cerr:61,certain:[64,82],cfg:55,chain:60,challeng:86,chanc:60,chang:[29,54,58,71,73,82,84,86],changelog:79,channel:[2,70,74],channel_last:[70,71,83],channels_last:70,charact:75,check:[0,1,31,46,52,54,60,61,64,71,82,85,86],check_method_op_support:71,check_method_operator_support:[21,41,45,50],checkmethodoperatorsupport:61,child:76,children:82,choic:[64,69],choos:[81,82],cifar10:84,cifar:84,clamp:66,clamp_max:66,clamp_min:66,class_count:86,classif:[61,81,83],classifi:[76,83],classification_index:86,clean:75,clear:44,cli:[52,62],click:63,clickabl:75,clone:66,close:61,closer:54,closet:75,cmake:63,cmake_build_typ:63,cmake_module_path:63,cmakecommandarg:63,cmakeset:63,cnn:83,co:[66,76,83],code:[55,58,61,63,65,74,76,81,82,84],coeffici:83,collapse_navig:73,collat:76,collect:[61,71],colon:75,color:[24,27,68,75],colored_output_on:[27,42,68],column:76,com:[59,61,64,84,85,86],combin:82,come:[64,74,82,86],command:[52,61,64,75,76,81,86],comment:[64,75],commodo:78,common:[53,54,67,75,82],common_subexpression_elimin:54,commonli:76,commun:[49,52,61,63,71],compar:[62,82],comparis:[0,2],comparison:[1,46],compat:[0,1,46,54,57,64,71,82],compil:[21,31,37,41,45,49,50,52,54,55,57,60,62,68,70,71,73,81,82,84,85,86,87,88],compile_spec:[84,88],compilegraph:[61,84],compilesepc:33,compilespec:[3,4,21,32,37,41,45,50,55,61,84,88],compilespecstruct:50,complet:[55,61,81],complex:[47,49,64,81],complianc:52,compliat:84,complic:64,compon:[56,58,81,85],compos:[81,82,84,86],composit:61,compound:83,comput:[49,75,82,83,84],conceiv:75,concorr:86,condimentum:78,condit:75,conf:[73,80],confidence_scor:86,config:[64,86],configur:[32,37,48,61,64,65,70,71,79,84,86],configurationtyp:63,congu:78,connect:[54,71,75,86,88],consectetur:[76,78],consecut:55,consid:[61,71],consider:86,consist:[54,75,82],consol:52,consolid:81,constant:[53,54,61],constant_pad_nd:66,constexpr:[0,1,2,45,46],construct:[0,1,2,3,4,46,48,49,53,54,56,58,60,61,69,70,75,76,82,84],constructor:[0,2,46,48,49,57,81],consult:74,consum:[4,53,81],contact:76,contain:[30,31,52,53,54,60,61,64,67,70,75,76,81,82,84,85,86],content:[79,84,86],context:[53,56,57,58,68],contextnet:83,contigu:[2,48,49,52,70,71],continu:[75,82,85],contributor:61,control:[81,82],conv1:[61,81],conv2:[61,81],conv2d:81,conv:[49,52,61],conval:78,convect:48,conveni:[83,84],converison:82,convers:[54,55,57,61,70,71,82],conversionctx:[60,61],convert:[3,4,31,32,37,52,54,55,56,58,62,65,70,71,83,85,87],convert_method_to_trt_engin:[21,41,45,50,70,71,87],convertgraphtotrtengin:61,convien:49,convienc:[3,4,49],convolut:[71,84,88],convtert:82,coordin:58,copi:[44,60,66,69,76,82,86],copy_:66,copyright:[42,43,44,45,61,76],core:[45,52,54,55,58,61,70,88],core_id:70,corpor:[42,43,44,45],corpu:83,correct:[57,64,73],correctli:64,correspond:[60,64,82],cosh:66,could:82,count_include_pad:66,coupl:[53,58,82,85],cout:61,cover:83,cp:64,cpp:[14,15,42,43,44,45,51,54,58,61,64,84],cpp_frontend:84,cppdirectori:50,cppdoc:61,cpu:67,cra:78,creat:[29,30,33,52,53,57,60,61,65,71,75,82,86],credit:61,criteria:[55,56,58],cross:75,cs:84,csrc:[54,59],cstddef:84,ctestcommandarg:63,ctrl:63,ctx:[60,61],ctype:61,cuda113:64,cuda:[49,57,61,62,63,64,67,70,84,86,87],cuda_graph_batch_s:67,cuda_runtim:[21,45],cudafloattyp:61,cudasetdevic:35,cudnn:63,cudnn_en:66,cumsum:66,curabitur:78,curl:[64,75],current:[23,57,60,64,71,73,82],cursu:78,custom:[52,64,82],custom_class:[21,45],custom_mapp:82,customclasshold:[45,48],cut:75,cxx11:85,d:[52,75,76,88],dapibu:78,data:[0,2,3,4,29,30,44,46,48,49,52,53,55,56,58,60,66,69,70,71,75,79,83,84],data_dir:84,data_item_1:74,data_typ:86,dataclass:82,dataflow:[60,61],dataload:[4,29,30,44,49,69,84],dataloader_:44,dataloadercalibr:[69,84],dataloaderopt:84,dataloaderuniqueptr:[4,44],dataset:[29,69,83,84],datatyp:[1,21,38,45,46,48,49,50,62,70,71,86],datatypeclass:50,date:76,david:76,dbg:64,dcmake_build_typ:64,dcmake_module_path:64,dead_code_elimin:54,deal:60,debug:[16,27,45,49,52,60,63,68,71,87],debugg:[52,71],decid:70,declar:64,deconvolut:88,decor:82,dedic:[54,76],deep:[60,65,73,84,88],deeplearn:[59,82],def:[75,81,82,86],defin:[0,1,2,3,4,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,43,46,47,48,49,51,52,61,62,70,73,81,82,83,84],definit:[51,60,75],deiti:75,delet:[0,1,2,45,46,54],delimit:54,demo:[75,84],demonstr:[75,76,77,83,84,86],demostr:83,denot:75,dep:64,depend:[29,34,53,55,58,61,62,82,85,86],depickl:57,deploi:[56,58,61,65,84,86],deploy:[52,61,62,83,84,85,86,88],deprec:[66,82],depth:[73,83],descclassnam:75,descnam:75,describ:[49,60,81,86,87],descript:[55,76],deseri:[61,70,71],design:[82,83,88],desir:[76,84],destini:76,destroi:[60,76],destructor:60,detail:[61,81,82,85,86],detect:[48,57],determin:[54,82],determinist:66,develop:[61,64,65,75,76,82],deviat:52,devic:[21,33,35,38,45,49,50,52,57,62,66,67,69,70,71,83,84,87,88],device_typ:[45,46,70,84,87,88],deviceclass:50,devicetyp:[21,38,45,46,50,70,84,87,88],devicetypestruct:50,diam:78,dict:[70,71],dictionari:[70,71,87],dictum:78,dictumst:78,did:75,didn:75,differ:[29,54,58,64,65,73,81,82],dignissim:78,dilat:66,dim0:66,dim1:66,dim:[66,86],dim_int:66,dim_intlist:66,dimens:[48,54,67,82,83],direct:[79,85],directli:[60,63,64,65,69,84],directori:[18,19,20,21,42,43,44,45,50,63,64,84],disabl:[52,68,73,74],disable_tf32:[45,49,71,84],disclos:64,disconnect:75,discret:75,discuss:86,displai:[52,68,73],display_github:73,display_gitlab:73,display_vers:73,dist:64,distdir:64,distribut:[61,70,84,85],div:66,div_:66,divisor_overrid:66,django:74,dl:75,dl_open:85,dla:[1,45,46,49,52,65,70,71],dla_cor:[45,46,52,70,84,87,88],dla_global_dram_s:[45,49,52,71],dla_local_dram_s:[45,49,52,71],dla_sram_s:[45,49,52,71],dla_standalon:52,dlacor:52,dll:52,doc:[58,59,64,73,74,75,80],docker:86,docsrc:58,docstr:[75,76],document:[42,43,44,45,50,58,61,73,75,76,80,81,84,85,86,87],docutil:[75,76],doe:[43,44,54,55,60,75,82,84],doesn:[61,64,75,81],dolor:[76,78],domain:[76,84],don:[60,73,75,76,82,84,86],done:[53,55,58,86],donec:[76,78],dont:42,dothismethod:75,dotpai:74,dotpayprovid:74,doubl:[49,52,71,75],down:[64,73,82],download:[64,79,84,86],downstream:83,doxygen_should_skip_thi:[44,45],dpython:[70,71],dram:52,dream:76,driver:64,drop:[64,73],dt:75,dtensorrt_root:64,dtorch_dir:64,dtyep:67,dtype:[45,48,49,52,62,66,67,70,71,82,83],dual:75,due:[3,4,64,74,75],dui:[76,78],dump:[36,52,64],dump_build_info:[21,38,45,50,70],durat:75,dure:[49,52,60,69,83,84,85],dynam:[48,49,67,70,71,82],dynamic_batch:82,e:[29,30,52,54,60,61,64,67,70,81,82,84],each:[3,4,49,53,54,55,57,60,61,64,67,73,75,82],ear:75,earli:82,eas:43,easi:[52,53,54,61,84],easier:[56,58,60,61,82,84],easiest:64,easili:[3,4],echo:75,edg:75,edit:[63,73],edu:84,effect:[54,61,73,82,83,84],effici:60,efficientnet:83,efficitur:78,eg:86,egesta:78,eget:78,either:[47,48,52,60,61,62,64,70,71,73,75,81],el:66,eleifend:76,element:[57,75,76,79,82],element_typ:44,elementum:78,elit:[76,78],elk:75,els:[43,44,48,71,75,76],elu:66,emb:[33,52,71,76],embed:[52,57,66,71,75,88],embed_engine_in_new_modul:[21,41,45,50,71],emit:53,emphasi:75,empti:[49,67,71,76,81],emum:[16,17],en:73,enabl:[3,4,24,49,52,55,56,58,68,69,71,73,82],enable_precis:61,enabled_precis:[45,49,61,62,70,71,84,86,87,88],enalbed_precis:88,encod:[57,83],encount:64,encourag:86,end:[44,52,60,61,66,71,75,84],end_dim:[61,66],endif:[43,44,45],energi:75,enforc:61,engin:[0,1,17,32,33,37,45,46,48,49,52,53,55,56,58,61,62,65,67,70,71,73,84,85,87,88],engine_converted_from_jit:61,enginecap:[21,38,45,49,50,70,71,87],english:83,enhanc:75,enim:78,ensur:[29,54,55],enter:53,entir:75,entiti:75,entri:[49,60],entropi:[29,30,84],entropy_calibr:69,entropy_calibration_2:[69,84],enumer:[0,1,2,16,17,46,69],environ:[82,86],ep:66,eq:[66,75],equat:75,equival:[32,56,58,60,61,71,81,84],equivil:37,erat:78,erf:66,eric:75,ero:78,error:[16,49,52,53,54,58,61,64,68,71,75,82],essenc:75,essenti:82,est:78,et:78,etc:[73,75,82,88],etiam:78,eu:78,euismod:78,eval:[61,62,86],evalu:[56,57,58],evaluated_value_map:[53,60],even:61,event:48,everi:[55,61,67],everyth:16,ex:[0,1,2,33,46,71,76,78],exact:86,examin:82,exampl:[48,55,57,58,60,61,63,65,68,70,71,73,74,76,79,81,82,84,85,86],exceedingli:75,except:82,exception_elimin:54,excerpt:76,excit:83,execpt:54,execut:[33,49,52,54,56,57,58,61,64,70,71,81,82,84,86],execute_engin:[57,61],exert:75,exeuct:57,exhaust:61,exist:[4,31,32,37,64,69,70,71,82,83,84],exit:86,exp:66,expand:[54,66],expand_a:66,expect:[48,54,60,61,70,83],experiment:82,explain:82,explan:82,explic:44,explicit:[0,1,2,3,4,45,46,54,65,75,82,84],explicit_batch_dimens:[67,82],explicit_precis:67,explicitli:[55,56,58,62,84,87],explict:44,explictli:0,expon:66,expos:84,express:75,ext:[75,76],extend:[56,58,60,61,66,83],extent:[61,65],extern:[73,75],extra:61,extract:[61,83],extrem:75,ey:75,f16:[52,61,88],f32:52,f:[64,75,81,82],facilisi:78,fact:64,facto:75,factori:[4,29,30,84],fail:[61,88],fake_quantize_per_channel_affin:66,fake_quantize_per_tensor_affin:66,fall:70,fallback:[52,56,58,60,88],fals:[0,1,2,3,4,44,45,46,49,61,66,67,70,71,73,74,75,76,82,84,87],fame:78,familiar:86,far:[75,82],fashion:[61,83],fast:[49,52,71],faster:83,faucibu:78,fc1:[61,81],fc2:[61,81],fc3:[61,81],fc:[49,52,54],feat:[61,81],featur:[52,55,61,82,83,84,87],fed:[3,4,48],feed:[29,30,61],feedforward:83,feel:65,feli:78,feugiat:[76,78],few:[64,70,82],field:[3,4,67,84],fifth:76,figur:[55,76,78],file:[0,1,2,3,4,5,6,7,8,9,10,11,12,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,46,47,48,49,52,55,57,58,61,63,64,69,70,71,73,74,76,80,82,84,86],file_path:52,filepath:63,find:[4,61,64,82],finder:64,finibu:78,finish:82,first:[48,53,54,61,62,75,76,82,84,86],firstli:86,fit:75,fix:[49,75,82,88],fixed_s:[45,49],flag:[52,55,56,58,62,64,69,85],flaten:49,flatten:[45,47,61,66,81],flatten_convert:61,flesh:86,float16:[52,70],float32:[48,49,52,70,71,82],float64:71,float_int:66,floor:66,floor_divid:66,floordiv:66,flow:[60,75,81,82,83],flox:75,flush:75,fly:81,fold:76,folder:[63,82],follow:[33,52,55,57,61,63,64,71,73,75,76,80,81,82,83,84,85,86],foo:[75,76,82],foo_kwarg:82,foo_nod:82,forc:[52,71,73,82],force_fp32_output:82,forced_fallback_op:55,form:[53,70,75,86],format:[33,45,48,49,52,62,66,70,71,75,76,83,86],forth:76,forum:64,forward:[29,30,32,33,55,57,60,61,70,71,81,84,87],found:[42,43,44,45,61,64,75,84,85],four:[75,76],fp16:[0,48,49,52,61,62,65,88],fp32:[0,48,49,52,65,71,82,83,84,86],frac:75,freed:60,freeze_modul:54,friend:45,fringilla:78,from:[0,1,2,3,4,29,30,44,46,48,49,52,53,54,55,56,57,58,60,61,65,67,71,73,74,75,76,81,82,83,84,86],from_tensor:82,frontend:[62,65],fssl:64,fstream:[20,44],full:[45,49,52,60,61,68,84,85,86,88],fulli:[31,52,54,61,71,84,88],further:82,fusc:78,fuse_addmm_branch:54,fuse_flatten_linear:54,fuse_linear:54,fusion:[60,82],futur:[71,82],fx2trt_exampl:82,fx:[62,65,70],g:[29,30,52,54,64,67,70,75,82,84],g_:75,gamma:66,gatewai:74,gaurd:43,gcc:[58,61],ge:66,gear:84,gener:[3,4,29,52,54,57,58,60,61,63,64,67,73,75,76,79,81,82,84],get:[0,1,2,3,4,23,34,44,46,54,55,60,61,64,68,70,82,83,84,86],get_batch:69,get_batch_impl:44,get_batch_s:69,get_build_info:[21,38,45,50,70],get_cache_mode_batch:69,get_is_colored_output_on:[18,39,42,50,68],get_logging_prefix:[18,39,42,50,68],get_output:82,get_reportable_log_level:[18,39,42,50,68],getattr:[54,57,61,81],getbatch:[3,4,44],getbatchs:[3,4,44],getdimens:[60,61],getoutput:[60,61],git:79,github:[59,61,64,73,84,85,86],github_url:73,gitlab:73,gitlab_url:73,give:[73,75,82],given:[48,49,52,54,61,62,67,69,70,71,81,82,87],global:[26,52,61],gnu:64,go:[44,54,55,61,65,81,82,83,86],goal:60,goe:[75,82],good:[44,60,75,82],goodger:76,googl:73,got:[61,75],gpu:[1,32,35,37,45,46,52,61,70,71,82,84,86,87,88],gpu_id:[35,45,46,52,70,84,87,88],graph:[16,31,32,37,45,52,53,55,56,58,60,61,65,68,71,81,82,83],graph_input:[45,49],graph_modul:67,graphinput:[21,38,45,49,50],graphinputsstruct:50,graphmodul:[62,67],gravida:78,great:[61,75],greater:68,group:[66,75,76],grpc:86,gru_cel:66,gt:66,gtc:65,guangzhou:76,guard:54,guard_elimin:54,gui:75,guid:[65,74],gulf:86,gz:[75,76,84],h:[0,1,2,3,4,5,6,7,8,9,10,11,12,15,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,46,47,48,49,50,51,52,54,61,84],ha:[49,53,54,55,56,58,60,61,63,67,75,76,81,82,83,84],habit:78,habitass:78,hac:78,hack:44,hakaimagazin:86,half:[52,61,62,70,75,84,86,87,88],hand:86,handl:[54,57,82],happen:[81,82],hardtanh:[60,66],hardtanh_:66,hardwar:88,has_batch_dim:67,hash:64,have:[29,33,44,52,53,54,60,61,64,65,67,70,71,75,81,82,83,84,86],haven:61,header:[61,73,75,76,86],heart:76,heaven:75,heck:75,heh:76,hehe:76,height:75,help:[27,52,53,60,61,82,83,85],helper:60,hendrerit:78,here:[44,53,55,57,61,64,73,75,76,81,82,84,85,86],hermet:64,hexagram:75,hfile:50,hi:[66,75,76],hidden:[43,73],high:[54,55,73],higher:[54,73,75,81],highli:[83,86],highlight:75,hinton:84,hold:[46,47,48,53,60,84],holder:[57,77],holi:75,home:64,hood:58,hope:76,host:[49,52,64,71,86],how:[3,4,64,75,77,79,81,83,85,86,87],howev:[29,64,73,74,86],html:[59,64,75,81,84],html_theme:80,html_theme_opt:73,html_theme_path:80,http:[59,61,64,73,75,81,83,84,85,86],http_archiv:64,httpclient:86,hub:86,huggingfac:83,human:75,humankind:76,hx:66,hybrid:71,hyperlink:75,hyphen:75,i8:52,i:[52,54,60,61,75,76,81,84],iaculi:78,icon:[73,75],id:[35,45,52,70,73,74,78,88],idea:[54,75],ident:52,idx:66,ifndef:[44,45],ifstream:44,ignor:70,iii:76,iint8calibr:[3,4,29,30,44,45,49,71,84],iint8entropycalibrator2:[3,4,29,30,44,84],iint8minmaxcalibr:[29,30,84],ilay:60,illustr:[82,83],imag:[84,86],imagenet:83,imagenett:83,images_:84,img1:86,img:86,img_path:86,imperdiet:78,implement:[3,4,54,55,57,61,74,82,84,85],implic:54,implicit:[66,75,82],implicitli:70,implictli:70,improv:76,in_shap:61,in_tensor:81,incas:44,includ:[13,15,16,34,36,42,43,44,45,51,52,55,56,57,58,61,64,67,73,75,81,82,84],includedirectori:50,includehidden:73,incompat:64,incorpor:76,indent:75,index:[33,59,65,66,71,73,79,84],indic:[66,73,75],indirect:75,inetworkdefinit:53,infer:[54,61,70,71,82,83,84],inference_output:86,inferenceservercli:86,inferinput:86,inferrequestedoutput:86,info:[16,32,37,45,52,60,61,68,70],inform:[25,33,34,36,48,52,53,55,57,61,64,65,67,68,70,75,81,82,84,87],infrastructur:[84,86],ingest:58,inherit:[50,82,84],inheritenviron:63,initi:75,injuri:75,inlin:[0,1,2,3,4,29,30,44,46,48,54,61,76,79],inner:[49,76,83],input0:61,input1:61,input2:61,input:[3,4,21,29,33,38,44,45,47,49,50,52,53,54,55,57,60,61,62,66,67,68,70,71,76,81,82,83,84,86,87,88],input_0:[57,61],input__0:86,input_data:[62,81],input_file_path:[52,88],input_is_dynam:45,input_nam:[67,82],input_s:[55,61],input_scal:66,input_shap:[84,88],input_signatur:[45,47,49,71],input_spec:[52,67,82],input_tensor_spec:[67,82],input_v:82,inputclass:50,inputrang:[55,61],inputtensorspec:[67,82],insert:[61,84],inserting_befor:82,insid:[75,86],inspect:[60,61,81],instal:[61,65,79,85,86],installroot:63,instanc:[54,61,69,81,83],instance_norm:66,instanti:[56,57,58,60,61],instatin:[0,1,2,46],instead:[49,52,53,54,61,64,85],instnanti:57,instruct:[55,56,58,61,64,82,86],insur:64,int32:[70,71,83],int64:71,int64_t:[45,46,48,49,84,88],int8:[0,44,48,49,52,65,70,71,84,88],int8_t:[17,45],int8cachecalibr:[20,29,40,44,50],int8cachecalibratortempl:50,int8calibr:[3,20,30,40,44,50],int8calibratornamespac:50,int_float:66,integ:[70,78],integr:65,intend:64,intent:[54,75],interact:75,interdum:78,interest:[54,75],interfac:[0,1,2,46,57,58,60,84],interfer:75,intermedi:[16,49,52,68,71,81],intern:[1,16,46,60,61,68,75],internal_error:68,internalerror:68,interpol:75,interpret:[57,75,82],intro_to_torchscript_tutori:81,introduc:[82,83],invok:[61,81,82],io:[44,86],iostream:[20,21,44,45,61],ipso:75,ipsum:[76,78],ir:[56,58,60,62,70,81],is_floating_point:66,is_train:84,iscustomclass:60,isinst:82,isn:[73,75],issu:[3,4,61,64],istensor:60,istream_iter:44,it_:44,ital:75,item:[74,76],itensor:[53,60,61,82],iter:[20,44,49,52,53,69,71],its:[29,53,57,60,64,75],itself:[0,1,2,46,52,54,64,86,87],iv:76,ivalu:[45,47,49,53,57,60,61],jan:76,jetpack:64,jetpack_5:64,jetpack_x:64,jetson:[70,83],jit:[31,32,33,37,45,47,49,52,53,54,55,56,57,58,59,60,61,62,70,71,81,86,87],jp_workspac:64,jpg:86,json:63,jump:86,just:[44,45,54,61,62,65,68,75,77,81,82,83,85,87],justo:[76,78],k:[66,84],kbool:[0,45],kchannelslast:[2,45],kchar:[0,45],kclip:60,kcontigu:[2,45,48],kcpu:[1,46],kcuda:[1,46,55,61],kdebug:[16,42,44],kdla:[1,45,46,88],kdla_standalon:[17,45],keepdim:66,kei:[75,81,86],kept:76,kernel:[48,49,52,60,70,71,82],kernel_s:66,kerror:[16,42],keyboard:75,keyword:[70,71],kf16:[84,88],kfloat:[0,45,49],kgpu:[1,45,46],kgraph:[16,42,54],khalf:[0,45,61],ki8:84,kind:[53,70,82],kinfo:[16,42,44],kint:[0,45],kinternal_error:[16,42],know:[42,60,73,75],knowledg:75,kriz:84,krizhevski:84,ksafeti:[17,45],kstandard:[17,45,49],ktest:84,ktrain:84,kunknown:[0,2,45],kwarg:[69,70,82,83],kwarn:[16,42],l:66,label:[75,83,84,86],lacinia:78,lack:[55,56,58,82],lacu:78,laid:61,lambda:[60,61,75,86],lang:74,languag:[74,75,76,81,86],laoreet:78,larg:[56,58,61,73,75,83,84],larger:[73,83],largest:66,last:[2,54,70,82],lastli:86,later:[29,61],latest:[64,73],launch:86,layer:[46,49,52,53,54,60,61,71,82,83,84,86,88],layer_norm:66,layout:[2,48,66,70,71],ld_library_path:64,ld_preload:85,ldd:64,le:66,lead:75,leader:75,leaky_relu:66,leaky_relu_:66,learn:[61,65,84,86,88],leas:76,least:[75,76],leav:54,lectu:[76,78],left:[73,75],legacy_calibr:69,legend:75,len:66,lenet:[61,81],lenet_script:[61,81],lenetclassifi:81,lenetfeatextractor:81,length:[3,4,44,66,76,82],leo:78,let:[46,52,54,60,70,71,73,75,82,83,86],letter:[76,83],level:[18,23,25,26,39,42,44,50,54,55,58,68,71,79,81,82,86],levelnamespac:50,leverag:[82,84],lib:[54,61,64],libero:[76,78],librari:[34,42,43,44,45,52,56,57,58,60,61],libtorch:[4,36,60,61,63,64,84],libtorch_pre_cxx11_abi:64,libtorchtrt:[52,61,64],libtorchtrt_plugin:85,libtorchtrt_runtim:85,licens:[42,43,44,45,61],light:75,ligula:78,like:[52,53,54,57,60,61,62,64,74,75,81,82,84,85,86],limit:[54,68,74,84],line:[52,61,76],linear:[2,66,70,81],link:[52,53,61,65,73,74,79,85],linux:[58,61,64],list:[18,19,20,21,31,49,51,53,55,57,60,61,62,64,66,67,70,71,79,82,86],listconstruct:[53,57,61],listunpack:[57,61],liter:76,literal:76,literal_block:75,live:[60,75],ll:82,lo:66,load:[52,55,57,61,62,69,71,82,83,84,85,86,87],load_librari:85,loading_data_recip:84,loborti:[76,78],local:[52,54,61,73],localhost:86,locat:[64,84],lock:74,log:[15,16,19,20,38,44,50,51,54,60,65,66,70,82],log_debug:60,logger:68,logger_level:67,loggingenum:50,logic:82,login:86,logist:82,loglevel:68,logo_onli:73,lone:76,longer:[73,85],look:[53,54,81,84,86,87],loop:82,loop_unrol:54,lorem:[76,78],lose:73,loss:[83,84],lot:60,low:82,lower:[16,68,70,76,83],lower_exampl:82,lower_graph:54,lower_precis:82,lower_tupl:54,loweralltupl:54,lowersimpletupl:54,lstm_cell:66,lt:66,ltorchtrt:85,luctu:78,lvl:[25,26,42],m:76,machin:[57,84,86],macro:[5,6,7,8,9,10,11,12,15,18,20,21,42,44,45,50,51],mad:75,made:[54,56,58,75],maecena:78,magna:78,mai:[53,57,58,61,62,71,75,76,81,82,84,86],main:[54,55,56,57,58,60,61,73,75,77,82],mainli:82,maintain:[55,57,60],major:[58,82],make:[53,61,62,64,75,77,82,83,84,86,88],make_data_load:[4,84],make_int8_cache_calibr:[20,40,44,50,84],make_int8_calibr:[20,29,40,44,50,84],malesuada:78,man:[75,76],manag:[49,52,53,56,58,60,61,63,68,70,71],mangag:54,mani:[73,75,76,82],mantissa:[49,71],manual:[74,75,82],map:[1,46,53,54,56,58,60,61,82,83,84,86,87],mapper:82,mark:[54,73],marknodesforfallback:54,markup:[76,79],markup_process:75,mask:66,masked_fil:66,massa:78,master:[59,64,75,84,85],mat2:66,match:[54,64],math:79,matmul:[54,61,66],matrix:59,matter:82,matti:76,matur:58,mauri:[76,78],max:[48,52,60,66,70,73],max_batch_s:[82,86],max_c:52,max_h:52,max_input_shap:67,max_n:52,max_pool1d:66,max_pool2d:[61,66,81],max_pool3d:66,max_shap:[45,48,62,70,71,82,83],max_val:[60,66],max_w:52,max_workspace_s:82,maximu:78,maximum:[48,49,52,71,82,86],mayb:75,mb:52,md:59,me:[75,76],mean:[55,60,65,66,82,86],mechan:[60,82,83],medium:75,meet:70,member:[46,47,48,49,70],memeori:2,memori:[20,21,44,45,54,60,61,62,70,71],memory_format:[66,70],memoryformat:[2,45],men:75,mental:75,menu:[52,73,75],menuselect:75,messag:[16,25,26,52,68],meta:[79,82],metadata:[49,52,57,60,71,73],meth:75,method:[31,32,33,37,48,52,54,60,61,64,70,71,75,81,83,87],method_nam:[31,37,45,52,61,70,71],metu:78,mi:78,microsoft:63,middl:75,might:[54,64,73],min:[48,52,60,66,70],min_block_s:[45,49,55,71],min_c:52,min_h:52,min_input_shap:67,min_n:52,min_shap:[45,48,62,70,71,82,83],min_val:[60,66],min_w:52,mind:75,mine:75,minim:84,minimum:[48,49,52,55,68,71],minmax:[29,30,84],minmax_calibr:69,misbuild:73,miss:[61,75],mkdir:64,mm:86,mmb:75,mobilenet_v2:87,mobilenetv2:83,mod:[52,55,61,79,82,84],mode:[62,82,84],mode_:84,model:[52,55,57,61,62,65,67,68,81,84,87],model_nam:86,model_repositori:86,model_torchtrt:68,model_trt:68,modifi:[76,82],modul:[31,32,33,37,45,49,52,55,56,57,58,60,62,63,64,65,67,70,71,74,75,76,82,83,84,87,88],modular:61,module_fallback:54,module_nam:52,molesti:78,momentum:66,morbi:78,more:[53,61,64,65,70,73,76,81,82,84,85,86,87],most:[58,64,67,82,85,86],mother:75,motion:75,mous:75,move:[30,44,54,57,61,71,84],msg:[26,42,68],msvc_x64_x64:63,mu:75,much:[60,73,75,84],mul:66,mul_:66,multi:52,multipl:[57,75,76,84,86],multipli:[49,71],must:[33,48,49,52,54,55,60,61,64,70,71,75,76,82,85],mutil:76,my:75,my_pytorch_model:82,myclass:75,mymodel:[55,62],myself:76,n:[52,60,61,84],nabla:75,nam:[76,78],name:[3,4,31,33,37,44,55,57,60,61,63,64,69,71,75,76,81,82,86,87],namedtupl:82,namespac:[42,43,44,45,51,54,65,84],narrow:66,nativ:[58,59,61],native_funct:59,natur:75,nav:[73,79],navig:73,navigation_depth:73,nbbind:[3,4,44],nchw:[2,70,71],ne:[54,66],nec:78,necessari:[42,85],need:[0,1,2,25,29,43,46,53,54,60,61,62,64,67,75,82,83,84,85,86],neg:66,negative_slop:66,nequ:[76,78],nest:[45,49,50,75,76],net:[60,61,75,76],netu:78,network:[29,30,60,61,82,83,84,86,88],neural:88,new_lay:60,new_local_repositori:64,new_siz:84,newer:64,next:[3,4,53,57,73,75,76,84,86],ngc:[64,86],nhwc:[2,52,70],nibh:[76,78],nice:64,nickel:75,night:76,nightli:82,ninja:[63,64],nisi:78,nisl:78,nlp:[29,30,84],nn:[54,59,61,62,70,71,81,82],node:[54,55,56,58,60,61,82,83],node_info:[60,61],noexcept:[3,4,44,84],non:[76,78],non_block:66,none:[60,66,67,70,71,73,75,82],nonetheless:75,nonexist:75,norm:66,normal:[0,1,2,46,61,75,81,82,84,86,88],normalized_shap:66,noskipw:44,notatemoduleforfallback:54,note:[1,46,48,60,61,63,64,70,73,75,82,88],notebook:[58,65],now:[54,58,60,61,64,75,82,87],np:86,nu:75,nulla:78,nullptr:[44,45,49],num:52,num_avg_timing_it:[45,49,71,87],num_it:52,num_op:52,num_work:84,number:[3,4,49,52,54,55,60,61,62,70,71,73,82,83],numel:66,numer:[52,76,82],numpi:86,nunc:78,nvcr:86,nvidia:[32,37,42,43,44,45,52,59,61,64,70,71,82,86,88],nvinfer1:[3,4,29,30,44,45,49,60,84],nvinfer:[20,44],o:[64,75,86],obj:66,object:[0,1,2,3,4,46,48,49,52,57,60,68,69,71,84,87],obtain:83,obvious:81,odio:[76,78],off:[55,57],offici:64,ofstream:[44,61],often:75,oh:76,ok:[61,75],okai:49,older:58,onc:[42,43,44,45,53,54,57,82,84,85,86],one:[47,54,60,61,62,68,70,75,81,82,86],ones:[42,55,56,58,61,64,75],onli:[1,3,4,16,29,44,46,48,52,54,55,58,60,64,67,68,70,75,82,84,85,88],onnx:54,onto:[52,57],op:[52,53,54,56,58,60,61,70,85],op_and_target:82,op_nam:52,open:[63,83,86],oper:[0,1,2,3,4,31,44,45,46,49,52,53,54,55,56,57,58,60,62,65,70,71,82,84,88],oppos:71,opset:[56,58],opt:[48,64,70],opt_c:52,opt_h:52,opt_n:52,opt_shap:[45,48,62,70,71,83],opt_w:52,optim:[48,52,54,61,62,65,67,81,82,83],optimin:48,optimiz:81,optimize_target_shap:82,optimized_input_shap:67,optimz:86,option:[44,48,52,55,56,58,64,70,75,79,82,84,85,88],orchestra:75,orci:78,order:[49,55,60,61,62,64,67,71,82],org:[59,61,64,73,75,81,84],organ:76,origin:82,ornar:[76,78],os:45,ostream:45,other:[0,1,2,45,46,52,53,54,57,61,62,64,65,66,74,75,82,85],otherwis:[64,85],our:[55,58,61,81,86],out:[31,44,53,54,55,56,58,60,61,63,64,68,71,75,86],out_shap:61,out_tensor:[60,61],output0:54,output:[24,27,33,49,52,53,54,55,57,60,61,64,68,71,73,75,76,82,83,86],output__0:86,output_file_path:[52,88],output_nam:[67,82],output_pad:66,output_s:66,outself:61,outsid:75,over:[56,58,75,82,86],overal:83,overrid:[3,4,29,30,44,70,82,84],overview:[59,65],own:[60,61,64,75,86],p:[52,61,66,86,88],packag:[52,54,61],pad:66,padding_idx:66,page:[65,77,79,86],pair:[54,60,64,75,83,84],pane:75,paragraph:[76,79],param:[69,74],paramet:[0,1,2,3,4,25,26,27,29,30,31,32,33,35,37,46,48,49,53,54,60,61,68,70,71,79,81,82],parent:[14,15,18,19,20,21],pars:[61,75],parser:75,part:[52,55,58,73,74,75,82],partial:[52,75],partit:54,partitioninfo:55,pass:[53,55,56,57,58,60,61,64,68,69,81,82,84],past:75,path:[4,13,14,15,29,30,52,61,63,64,69,70,81,82,84,86],path_to_torchtrt_root:64,pathwai:81,pattern:[60,61,70],payment:74,pbtxt:86,peephole_optimz:54,pellentesqu:78,peopl:75,pep:75,perforamnc:82,perform:[29,30,83,84,86],permit:75,permut:[66,82],persist:75,pharetra:78,phase:[16,60,61],phasellu:78,phi:75,philosoph:75,phrase:75,pi:75,pick:81,pickler:57,piec:83,pil:86,pin:74,pin_memori:66,pip3:64,pip:[64,86],pipelin:[52,88],piplein:61,pixel_shuffl:66,pl:74,place:[48,54,64,75,76,77,82,84],placerat:78,plan:[52,58],platea:78,platform:[45,52,58,63,64,86,88],pleas:[61,64,75,82,86],plugin:82,point:[61,70,73,74,75,86],pointer:[3,4,84],polish:74,pool:88,pop:57,popul:67,popular:[64,74,83],portabl:[57,71],portion:75,porttitor:[76,78],posit:[52,70,73,82],possibl:[64,75,83,86],post:[29,30,49,52,61,65],posuer:[76,78],potenti:[49,78],pow:66,power:[61,75,82,83],pr:61,praesent:78,pragma:[42,43,44,45,84],pre:[33,54,69,71,84,85],pre_cxx11_abi:64,preced:75,precis:[49,52,61,62,65,70,82,84,88],prefer:61,prefix:[27,28,42,68,75],preinstal:64,prelu:66,prepar:[82,86],preprint:84,preproc:69,preprocess:[84,86],prerequisit:63,present:63,preserv:[75,81,84],prespect:81,press:[63,75],pretium:78,pretrain:[83,86,87],pretti:61,prev_next_buttons_loc:73,prevent:[49,52],previou:73,previous:[29,33,61],prim:[53,54,57,61,66,81],prim_devic:66,primal:75,primarili:[58,61],print:[16,31,44,61,68,70,71,75,86,87],priorit:64,privat:[3,4,44,45,84],problem:75,problemat:75,proce:86,proceed:86,process:[52,55,74,75,81,83,84,86,87],prod:66,produc:[48,53,57,60,61,75,83],product:49,profil:[48,67],profiling_verbos:82,program:[18,19,20,21,29,51,52,56,57,58,65,81],programm:75,progress:76,proin:78,project:[64,74,79],projectdir:63,promis:82,prop:67,properli:64,properti:73,propog:54,prose:75,provid:[3,4,49,52,55,57,60,61,62,64,67,70,71,75,82,84,85,86,87],providi:[56,58],provok:75,pt:[52,61,82,86],ptq:[3,4,15,18,19,38,50,51,52,65,70,71],ptq_calibr:[3,4,45,49,84],ptqtemplat:50,publish:86,pull:[64,86],purchas:74,pure:31,purpos:[64,82,83,86],puru:78,push:57,push_back:[44,55],put:[75,83],pwd:[63,86],py3:86,py:[54,58,61,64,73,75,80,81,82,84],pyindex:86,pypi:64,python3:[54,61,64],python:[52,55,58,61,70,71,75,76,82,83,85,86,87,88],python_api:59,pytorch:[48,49,52,54,55,56,57,58,60,61,62,64,69,70,71,81,84,85,86],pytorch_libtorch:86,pytorch_sphinx_them:[73,80],qat:83,quant_max:66,quant_min:66,quantiz:[29,30,52,61,65],quantizatiom:49,quartznet:83,question:61,qui:[76,78],quickli:[52,61,84],quisqu:78,quit:[60,61,83],quot:76,r:75,rais:[54,82],raiseexcept:54,ram:[49,52,71],rand:[61,82],randn:[55,61,70,71,87],rang:[48,49,52,70,82,83],rank:73,rather:54,raw:73,re:[75,82],read:[3,4,29,30,44,73,75,84],read_calibration_cach:69,readcalibrationcach:[3,4,44],reader:75,realiz:57,realli:60,reason:[0,81,82],reattribut:76,recalibr:29,receiv:82,recip:84,reciproc:66,recognit:[83,84],recomend:[29,30],recommend:[29,30,61,64,75,82,86],record:[53,81],recurs:53,redistribut:76,reduc:[54,56,58,82,83,84],redund:82,ref:75,refer:[48,56,58,61,74,79,82,84,86],referenc:64,refit:[45,49,71,87],reflect:45,reflection_pad1d:66,reflection_pad2d:66,regard:[64,75],regardless:76,region:82,regist:[33,57,60,71,82],register_acc_op:82,register_acc_op_map:82,register_custom_acc_mapper_fn:82,registernodeconversionpattern:[60,61],registr:82,registri:[53,61],reinterpret_cast:44,rel:52,relat:[46,75],relationship:50,releas:[63,75],reload_model_output:82,reload_trt_mod:82,relu:[55,61,66,81],relu_:66,remain:[54,84],rememb:82,remov:73,remove_contigu:54,remove_dropout:54,remove_to:54,render:73,rent:76,repack:57,repeat:[52,66],replac:[54,64],replication_pad1d:66,replication_pad2d:66,replication_pad3d:66,report:[23,44],reportable_log_level:68,repositori:[58,73,80,86],repres:[48,49,60,68,75,82],represent:[54,60,81,82,83],request:[61,70,86],requir:[29,49,52,53,54,61,68,70,71,73,82,84,85,86],require_full_compil:[45,49,71],requires_grad:66,research:82,reserv:[42,43,44,45],reset:44,reshap:[66,86],resiz:86,resnet50:86,resnet:[57,83,86],resnet_trt:57,resolut:83,resolv:[53,54,56,58],resourc:[53,84],respons:[29,57,75],rest:[75,76,82],restrict:[49,71],restructuredtext:[75,76],result:[53,54,62,68,71,73,81,86],ret:54,reus:[54,82,84],revert:73,revis:[75,76],revisit:75,rfc:75,rho_:75,rhoncu:78,right:[42,43,44,45,54,58,60,75],risu:78,rm:86,rn50_preprocess:86,role:75,roll:66,roman:76,room:75,root:[42,43,44,45,64,73,84],roughli:55,round:[49,71],rounding_mod:66,row:76,rst:[73,75],rsub:66,rtol:52,rule:[64,71,82],ruler:75,run:[1,37,46,49,52,53,54,55,56,57,58,60,61,62,64,65,67,70,71,75,81,82,83,84,85,86,87,88],running_mean:66,running_var:66,runtim:[61,65],runtimeerror:82,rutrum:[76,78],s:[48,49,55,57,60,61,62,63,64,65,67,70,73,75,76,81,82,83,84,86],safe:[60,71],safe_dla:70,safe_gpu:70,safeti:[49,52,70],sage:75,sagitti:[76,78],sai:[76,83],said:75,same:[57,61,64,73,75,81,82,86,87],sampl:[75,82,84,86],sample_input:82,sapien:78,satisfi:[55,82],save:[29,44,52,57,61,62,70,71,82,83,85,86],saw:61,scalar:[60,66],scalaropt_dim:66,scalartyp:[0,45,66],scale:[66,83,84],scale_factor:66,scale_grad_by_freq:66,scales_d:66,scales_h:66,scales_w:66,scelerisqu:78,schedul:[70,86],schema:[60,61],scheme:82,scientist:75,scope:54,scratch:29,scratch_spac:86,screen:73,script:[31,54,55,61,62,70,71,81,87],script_model:[81,87],scriptclass:71,scripted_model:88,scriptmodul:[61,62,70,71],scroll:[73,77],sdk:59,se:83,seamlessli:65,search:[65,73],second:[54,75,82],secondli:86,section:[61,73,75,76,77,79,82,84,86],sed:[76,78],see:[31,54,57,61,64,70,71,75,81,82],seen:[75,76],segment:[55,83],select:[17,29,30,37,49,52,57,62,64,66,70,71,74,77,82,84],self:[54,57,60,61,66,69,81,83,88],self_1:[57,61],self_int:66,sell:76,seller:74,seller_id:74,sem:78,semant:75,semper:78,send:86,senectu:78,sens:[61,75],sentenc:[75,83],sentinel:[0,2],separ:[55,56,58],sequenc:[67,75,82,83],serial:[33,37,52,56,58,61,70,71],seriali:71,serializ:[57,81],serialized_cach:[67,82],serialized_engin:71,seril:57,serv:[52,57,65,82],servic:75,session:75,session_nam:75,set:[3,4,16,21,25,27,29,32,35,37,45,46,48,49,52,53,54,55,56,57,58,61,62,64,65,67,68,70,71,73,77,80,81,82,83,84,88],set_data_from_numpi:86,set_devic:[21,38,45,50,70],set_is_colored_output_on:[18,39,42,50,68],set_logging_prefix:[18,39,42,50,68],set_reportable_log_level:[18,39,42,50,68],setalpha:60,setbeta:60,setnam:[60,61],setreshapedimens:61,setup:[43,84,86],sever:[16,26,68],sh:64,sha256:64,shape:[45,47,48,49,52,55,60,62,66,67,70,71,82,86,88],shape_mod:70,shape_rang:[67,82],share:[49,52,63,64,71],shell_command:75,shift:[63,64,66,75],ship:[61,85],shorthand:75,should:[0,3,4,29,45,49,52,53,54,55,56,58,60,65,68,70,71,73,75,78,82,84,86],show:[73,75,83],shown:[61,73,75],shuffl:[61,84],side:[54,61,73],sidebar:[73,79],sigmoid:[66,82],sigmoid_:66,sign:86,signatur:71,signifi:[48,54],signific:75,significantli:[54,73],similar:[60,61,82,85,87],simonyan:84,simpil:84,simpl:[75,76,81,82,83,86],simplest:86,simpli:[54,83],simplifi:53,simul:83,sin:[66,75],sinc:[54,61,75,81,82,84],sing:75,singl:[48,52,54,61,70,75,81,82,84],singular:60,sinh:66,sink:75,sit:[76,78],site:[54,61,64,75],six:75,sixth:76,size:[3,4,44,48,49,52,54,55,61,66,70,71,73,82,83,84],size_t:[3,4,44,84],skip:52,slash:73,slice:66,slightli:82,sm:57,small:[54,86],smaller:83,so:[0,44,52,53,54,57,58,60,61,64,65,74,75,76,82,84],sodal:78,softmax:[54,66,82],softwar:[49,52,71,75],sole:[62,84],sollicitudin:78,solv:86,some:[53,54,56,57,58,60,61,74,75,82,84],some_funct:75,someth:[43,54,75,86],someurl:75,sort:[60,66,87],sourc:[42,43,44,45,58,63,67,68,69,70,71,82],sourceforg:[75,76],space:[75,76,84],spaces_and_linebreak:75,span:76,spars:[52,66],sparse_weight:[45,49,71,82],sparsiti:[49,52,71,82],spec:[45,48,49,52,68,70,71,87],specif:[32,49,54,56,58,70,71,75,83],specifi:[3,4,52,60,62,64,65,68,70,71,73,75,82,86,87],specifii:70,speech:83,speedup:83,sphinx:[73,74,75,76,80],sphinx_rtd_them:[75,76],spin:86,spirit:75,split:[66,82],split_siz:66,split_with_s:66,sqrt:66,squeez:[66,83],sram:52,src:[57,59,66],ss:44,ssd300_trt:57,ssd:57,ssd_trace:52,ssd_trt:52,sstream:[20,44],stabl:[59,71,73],stack:[57,66,84],stage:[53,82],stand:[57,75],standalon:75,standard:[52,57,65,75,83,85,87],stapl:76,start:[53,55,61,64,66,76,82,83,87],start_dim:[61,66],start_step:66,state:[53,60,61],statement:[54,75],static_cast:44,statu:[44,76],std:[3,4,22,26,28,29,30,31,33,34,37,42,44,45,47,48,49,55,61,84,86,88],stdout:[36,68,70],steamlin:84,step:[65,66,82,83,84],stick:73,sticki:[73,79],sticky_navig:[73,77],still:[44,55,82,84],stitch:[55,61],stop:61,storag:84,store:[2,4,49,52,53,57,60,61,71,81,82],str:[19,43,44,50,66,68,70,71,82],straight:60,strang:75,strategi:70,street:76,strict:85,strict_type_constraint:82,stride:66,string:[3,4,18,20,21,22,26,28,29,30,31,33,34,37,42,44,45,49,55,57,60,61,63,70,73,84],stringstream:44,strip_prefix:64,strong:75,strongli:75,struct:[1,21,38,41,45,84],structur:[29,46,49,55,58,60,73,75,79,81,86],structuredtext:75,stub:76,studio:63,stuff:75,style:[42,43,44,45,73,75,76],style_external_link:73,sub:[66,75,81],sub_:66,subdirectori:51,subexpress:54,subgraph:[49,52,53,54,60,61],subject:58,submenu:79,submodul:81,subscript:75,subsect:75,subset:[83,84],substitut:75,subtitl:75,subtre:80,subword:83,success:63,sudo:64,suffic:54,suggest:86,suit:65,suitabl:82,sum:[49,66,71,82],superscript:75,supervis:83,suppli:75,support:[0,1,2,27,31,46,48,49,52,55,59,61,62,63,64,65,70,71,73,74,81,82,86,88],sure:[61,62,64,86,88],suscipit:[76,78],suspendiss:78,symbol:[33,64,71,75,82,85],symlink:80,system:[53,60,64,65,71],t1:66,t2:66,t:[0,1,2,45,46,54,60,61,64,66,73,75,76,81,82,84,86],t_:75,tabl:[64,79],tag:[75,86],take:[31,32,33,37,53,56,57,58,60,61,70,71,73,75,82,83,84,87],taken:75,talk:65,tan:66,tanh:66,tanh_:66,tar:[64,75,84],tarbal:[61,84],target:[1,33,45,46,48,49,52,57,58,62,65,70,71,82,84,87,88],targets_:84,task:[29,30,82,83,84],techinqu:61,techniqu:84,tell:[54,55,56,57,58,60,75],tellu:78,tem:52,templat:[20,40,44,45,50,61,73],temporari:82,tempu:78,tensor:[2,33,44,45,48,49,52,53,54,55,57,60,61,66,67,70,71,81,82,83,84],tensor_mod:66,tensor_scalar:66,tensor_tensor:66,tensorcontain:60,tensorformat:[21,38,45,48,50,70],tensorformatenum:50,tensorlist:[55,60],tensorrt:[0,1,3,4,29,30,31,32,33,36,37,44,45,46,48,49,52,53,54,55,56,58,60,67,69,70,71,81,84],tensorrt_convert:82,tensorrt_root:63,tensorrtcompilespec:[71,87],tensort:82,teo:52,term:[70,75,76,83,84],termin:[27,52,61],test:[52,58,64,75,76,82,83,84,86],test_acc_trac:82,test_ptq_dataloader_calibr:84,test_ptq_trt_calibr:84,test_py_modul:[75,79],testing_dataload:84,testing_dataset:84,text:[68,76,78,83],tf32:[49,52],than:[54,65,74,75,83,85],thats:[53,84],the_model_repositori:86,thei:[46,52,53,54,57,60,62,64,70,73,75,82],them:[54,55,57,61,64,73,82,83],theori:[53,75],therebi:[57,83],therefor:[29,57,61,75,82,83],theres:85,therfor:85,theta:75,thi:[0,1,2,29,30,42,43,44,45,46,47,48,49,52,53,54,55,56,57,58,60,61,64,67,70,71,73,74,75,77,78,81,82,83,84,85,86,87],thicker:75,thin:75,thing1:75,thing2:75,thing3:75,thing:[64,75,82],think:[60,75],third:[76,82],third_parti:[58,64],this_arg_is_opt:82,those:[53,75],though:[52,58,60,61,81],thought:75,three:[48,56,58,67,70,75,76,82,83,86],threshold:52,through:[48,53,54,55,57,61,62,65,68,69,75,82,83],throught:82,thrown:[49,71],thu:75,time:[49,52,53,54,56,57,58,60,61,71,73,75,82,84],timing_cach:82,tincidunt:78,tini:84,titles_onli:73,tmp:61,toctre:73,tocustomclass:60,todim:61,todo:[73,82],togeth:[53,60,61],token:83,toler:52,too:[64,73,75,76],tool:[60,61,82,83],toolchain:[58,64],top:[58,73,77],topk:66,torch:[0,1,2,4,20,21,29,30,31,32,33,36,37,44,45,46,47,48,49,52,53,54,55,56,57,58,60,64,67,70,71,81,84,88],torch_dir:63,torch_executed_modul:[45,49,55,71],torch_executed_op:[45,49,55,71],torch_scirpt_modul:81,torch_script_modul:61,torch_tensorrt:[0,1,2,3,4,14,16,17,42,43,44,46,47,48,49,50,51,52,55,61,62,65,82,83,84,85,86,87,88],torch_tensorrt_export:43,torch_tensorrt_major_vers:[19,43,50],torch_tensorrt_minor_vers:[19,43,50],torch_tensorrt_patch_vers:[19,43,50],torch_tensorrt_vers:[19,43,50],torch_tensorrtfil:50,torch_tensorrtnamespac:50,torchbind:57,torchhub:86,torchscript:[19,21,38,43,45,49,50,52,56,57,58,62,70,71,83,87,88],torchscriptstruct:50,torchtrt:[43,55],torchtrt_api:[0,2,19,22,23,24,25,26,27,28,31,32,33,34,35,36,37,42,43,44,45,48,49,50],torchtrt_check:60,torchtrt_hidden:[19,43,50],torchtrt_runtime_exampl:85,torchtrt_unus:60,torchtrtc:[64,65,88],torchvis:[57,84,86,87],toronto:84,tortor:78,totensor:[84,86],tovec:61,toward:84,trace:[55,61,71,81,82],traced_model:81,track:[60,84],tradit:[48,71,84],traget:32,trail:73,train:[29,30,49,52,61,62,65,66],trainabl:54,transcrib:83,transfer:74,transform:[61,84,86],transformed_img:86,translat:61,transmit:75,transpos:[66,82],trash:75,travers:[56,58],treat:52,tree:[42,43,44,45,73,84,85],trigger:[61,82],trim:84,tristiqu:78,triton:65,triton_to_np_dtyp:86,tritoncli:86,tritonserv:86,trt:[0,1,3,4,46,48,53,54,57,60,61,66,82],trt_interpreter_result:82,trt_lenet_script:61,trt_mod:[55,61,84,88],trt_model:[55,86,87],trt_ts_modul:[55,62],trtinterpret:[67,82],trtinterpreterresult:[67,82],trtmodul:[67,82],truncat:[49,52,71],truncate_long_and_doubl:[45,49,71],ts:[43,52,55,61,62,65,70,81,87],ts_model:[55,61],tt:75,tue:76,tupl:[57,67,70,71,82],tupleconstruct:[54,57],tupleunpack:54,turpi:78,tutori:[81,84],two:[52,54,60,62,64,75,76,80,81,82,84,86],type:[0,1,2,30,49,50,52,53,57,60,61,62,63,67,68,70,71,75,82,83,84],type_fp32:86,typenam:[3,4,29,30,44],typic:[53,60,86],ugli:75,ui:74,uint64_t:[45,49],ultric:78,un:84,unabl:[60,61],unbind:66,unbroken:75,uncas:83,uncom:64,under:[42,43,44,45,58,75],underli:[0,1,2,46,60],uniformli:83,union:[60,61,70,71],uniqu:[4,62],unique_ptr:[4,30],unit:82,univers:75,unknown:70,unless:82,unlik:[64,65,87],unlimit:73,unpack_addmm:54,unpack_log_softmax:54,unqiue_ptr:4,unreferenc:75,unrestrict:75,unsqueez:66,unstabl:58,unsupport:[31,49],unsur:60,untest:58,until:[53,58,60,64],unwrap:60,unwraptodoubl:60,unwraptoint:61,unzip:64,up:[53,54,56,57,58,75,81,82,83],updat:82,upload:86,upon:73,upper:76,upsample_bilinear2d:66,upsample_linear1d:66,upsample_nearest1d:66,upsample_nearest2d:66,upsample_nearest3d:66,upsample_trilinear3d:66,upscale_factor:66,upstream:61,uri:75,url:[64,73,86],urna:78,us:[0,1,2,3,4,29,30,32,35,37,43,44,45,46,48,49,52,53,55,57,58,60,63,65,67,68,69,70,71,73,74,75,76,81,82,84,85,86,88],usag:[61,69,75],use_cach:[3,4,30,44,69,84],use_cache_:44,use_cmake_generated_export_head:43,use_input_stat:66,use_subset:84,usecas:[62,64],user:[42,48,55,56,57,58,61,62,64,65,75,76,84,86],using_int:[61,66],usr:64,usual:[73,82],ut:78,utf:[75,76],util:[60,61,71,83,84,86],v0:[72,86],v2:[29,30],v:[52,76,86],valid:[1,46,60],valu:[0,1,2,16,17,45,46,48,53,57,60,61,63,66,68,69,70,73,83],value_tensor_map:[53,60],vanilla:82,vari:67,variabl:[48,63,70,82],variant:85,varient:54,varieti:86,variou:[82,88],variu:78,vcs_pageview_mod:73,vec:66,vector:[20,21,44,45,47,48,49,55,57,61,84,88],vehicula:78,vel:78,velit:78,venenati:78,verbios:52,verbos:[52,76],veri:[76,77,82,84,86,87],verifi:55,verison:64,version:[34,36,58,64,73,76,82,83,86],vertic:[73,75],vestibulum:[76,78],vgg16:84,vgg:84,vi:75,via:[65,70,71,73,79,82,83,84,85],view:[66,73],virtual:84,vision:[82,86],visitor:73,visual:63,vita:[76,78],vivamu:78,viverra:78,vm:76,volutpat:78,vs:[0,1,2,46,54,71,87],vulput:78,w:52,w_hh:66,w_ih:66,wa:[54,57,61,75,82],wai:[52,61,64,81,82,83,84],walkthrough:83,want:[42,55,61,67,81,82,84,86,87],warn:[16,44,52,60,68],wash:75,we:[42,44,53,54,56,57,58,60,61,67,73,75,81,82,83,84,86],weak:75,web:75,websit:64,weight:[48,49,52,53,61,66,71,75,82,83],welcom:[61,82],well:[61,64,68,75,84],were:61,wget:86,what:[4,54,61,62,75,81,82],whatev:[57,82],wheel:64,when:[27,44,45,46,52,53,54,56,57,58,60,61,64,68,70,71,73,75,77,81,82,83,84],where:[53,54,60,61,71,76,82,84],wherev:82,whether:[4,52,67,70,74,82,84],which:[1,2,29,32,37,46,49,53,54,55,56,57,58,60,61,62,64,69,71,73,75,76,81,82,83,84,85,86,87],white:75,whitespac:75,whl:64,who:75,whole:82,whose:[54,82],why:75,wide:79,width:[75,83],window:75,window_nam:75,wish:76,within:[49,52,56,58,71,73,75],without:[60,61,73,75,84],wl:85,wooden:75,word:[75,83],work:[44,54,58,60,75,76,82,84],worker:84,workflow:[82,83,87],workspac:[49,52,64,71],workspace_s:[45,49,52,71],world:75,would:[52,60,61,62,64,82,85,86,87],wp:86,wrap:[56,57,58,61,75,78,82,87],wrapper:[60,82],write:[3,4,29,30,44,53,61,65,75,82,84,86],write_calibration_cach:69,writecalibrationcach:[3,4,44],wrote:75,www:[61,64,73,75,84,86],x64:63,x86:85,x86_64:[58,64],x9:54,x:[5,10,33,43,54,61,64,71,76,81],x_0:75,x_1:75,x_2:75,x_3:75,x_4:75,x_:75,xavier:[45,88],xstr:[19,43,50],xx:86,xxx:82,y:[33,71,76],yahoo:76,yaml:59,yet:[82,83],you:[0,1,2,29,30,46,48,49,52,53,54,55,57,58,60,61,62,64,65,70,71,73,75,76,77,81,82,83,84,85,86,87],your:[60,61,62,64,65,73,75,76,80,81,85,87],yourself:61,yy:86,z:76,zero_point:66,zip:[57,64],zisserman:84},titles:["Class DataType","Class Device::DeviceType","Class TensorFormat","Template Class Int8CacheCalibrator","Template Class Int8Calibrator","Define STR","Define TORCH_TENSORRT_PATCH_VERSION","Define TORCH_TENSORRT_MAJOR_VERSION","Define TORCH_TENSORRT_MINOR_VERSION","Define TORCHTRT_API","Define XSTR","Define TORCHTRT_HIDDEN","Define TORCH_TENSORRT_VERSION","Directory cpp","Directory include","Directory torch_tensorrt","Enum Level","Enum EngineCapability","File logging.h","File macros.h","File ptq.h","File torch_tensorrt.h","Function torch_tensorrt::logging::get_logging_prefix","Function torch_tensorrt::logging::get_reportable_log_level","Function torch_tensorrt::logging::get_is_colored_output_on","Function torch_tensorrt::logging::set_reportable_log_level","Function torch_tensorrt::logging::log","Function torch_tensorrt::logging::set_is_colored_output_on","Function torch_tensorrt::logging::set_logging_prefix","Template Function torch_tensorrt::ptq::make_int8_cache_calibrator","Template Function torch_tensorrt::ptq::make_int8_calibrator","Function torch_tensorrt::torchscript::check_method_operator_support","Function torch_tensorrt::torchscript::compile","Function torch_tensorrt::torchscript::embed_engine_in_new_module","Function torch_tensorrt::get_build_info","Function torch_tensorrt::set_device","Function torch_tensorrt::dump_build_info","Function torch_tensorrt::torchscript::convert_method_to_trt_engine","Namespace torch_tensorrt","Namespace torch_tensorrt::logging","Namespace torch_tensorrt::ptq","Namespace torch_tensorrt::torchscript","Program Listing for File logging.h","Program Listing for File macros.h","Program Listing for File ptq.h","Program Listing for File torch_tensorrt.h","Struct Device","Struct GraphInputs","Struct Input","Struct CompileSpec","Torch-TensorRT C++ API","Full API","torchtrtc","Conversion Phase","Lowering Phase","Partitioning Phase","Compiler Phases","Runtime Phase","System Overview","Useful Links for Torch-TensorRT Development","Writing Converters","Using Torch-TensorRT in C++","Using Torch-TensorRT in Python","Building Torch-TensorRT on Windows","Installation","Torch-TensorRT","Operators Supported","torch_tensorrt.fx","torch_tensorrt.logging","torch_tensorrt.ptq","torch_tensorrt","torch_tensorrt.ts","Changelog","Configuration","5. :mod:`test_py_module`","3. Paragraph Level Markup","4. Lists & Tables","1. Long Sticky Nav","1. Structural Elements","<no title>","Installation","Creating a TorchScript Module","Torch-TensorRT (FX Frontend) User Guide","Example notebooks","Post Training Quantization (PTQ)","Deploying Torch-TensorRT Programs","Serving a Torch-TensorRT model with Triton","Using Torch-TensorRT Directly From PyTorch","DLA"],titleterms:{"1":[77,86],"10":77,"11":77,"12":77,"13":77,"14":77,"15":77,"16":77,"17":77,"18":77,"19":77,"2":[77,78,86],"20":77,"3":[77,86],"4":77,"5":77,"6":77,"7":77,"8":77,"9":77,"class":[0,1,2,3,4,20,21,38,40,41,50,67,69,70],"enum":[16,17,18,21,38,39,50,69,70],"function":[18,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,50,59,67,70,71],"long":[77,79],A:75,And:75,But:76,By:[18,19],Or:54,The:[61,75],To:54,aarch64:64,abi:[57,64],acc:82,acceler:83,add:82,addmm:54,admonit:75,advic:60,ahead:65,an:79,api:[50,51,59,64,65],applic:84,arg:[60,74],automat:55,avail:59,awar:83,background:[57,60],base:[3,4,48,73],bert:83,binari:64,block:75,branch:54,build:[63,64,73,86],bullet:76,c:[50,59,61,64,65,83,84],can:76,caption:[76,79],center:75,ch:75,changelog:72,check_method_operator_support:31,choos:64,citat:[75,84],citrinet:83,cli:[64,65],client:86,cmake:64,code:[54,75],compil:[32,56,58,61,63,64,65,83],compilespec:49,compound:75,configur:[63,73],construct:57,content:[18,19,20,21,38,39,40,41,73,74,75,76,77,78],context:[60,73],contigu:54,contract:60,contributor:65,convers:[53,56,58,60],convert:[53,60,61,66,82],convert_method_to_trt_engin:37,cpp:[13,18,19,20,21,55],creat:[81,84],creativ:75,cudnn:64,current:66,custom:61,cxx11:64,data:74,datatyp:0,dead:54,debug:64,deep:83,deeper:76,defin:[5,6,7,8,9,10,11,12,19,50],definit:[18,19,20,21,76],demo:79,depend:64,deploi:[83,85],deseri:57,detect:83,develop:59,devic:[1,46],devicetyp:1,dimens:59,direct:75,directli:87,directori:[13,14,15,51],disk:81,distribut:64,dla:88,doctest:75,documen:65,document:[0,1,2,3,4,5,6,7,8,9,10,11,12,16,17,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,46,47,48,49,59,65,78,79],down:76,download:[75,80],dropout:54,dump_build_info:36,dynam:83,easier:59,efficentnet:83,element:78,elimin:54,eliminatecommonsubexpress:54,embed_engine_in_new_modul:33,emphas:75,engin:[57,82],enginecap:17,enumer:76,envior:64,evalu:[53,66],exampl:[75,77,83],execept:54,executor:57,expect:59,face:83,fallback:[54,55],field:76,figur:75,file:[15,18,19,20,21,42,43,44,45,50,51],flatten:54,footnot:75,format:57,freez:54,from:[64,87],frontend:[82,83],full:[50,51],fuse:54,fx2trt:82,fx:[67,82,83],gaurd:54,gener:74,get:65,get_build_info:34,get_is_colored_output_on:24,get_logging_prefix:22,get_reportable_log_level:23,giant:76,git:80,glossari:75,gpu:65,graph:[54,57],graphinput:47,grid:76,guarante:60,guid:82,h:[18,19,20,21,42,43,44,45,55],have:76,hierarchi:50,hlist:76,hole:76,hood:61,how:[73,82,84],html:73,hug:83,ien:75,imag:[75,76],includ:[14,18,19,20,21],incred:79,index:74,indic:65,infer:86,inherit:[3,4,48],inlin:75,input:48,instal:[63,64,80],int8:83,int8cachecalibr:3,int8calibr:4,ir:59,jetson:64,jit:65,languag:83,layer:59,learn:83,lenet:83,level:[16,73,75,76],librari:[64,85],libtorchtrt:85,like:76,line:75,linear:54,link:[59,75],list:[42,43,44,45,76],liter:75,local:64,log:[18,22,23,24,25,26,27,28,39,42,68],logsoftmax:54,loop:54,lower:[54,56,58],macro:[19,43],make_int8_cache_calibr:29,make_int8_calibr:30,markup:75,mask:83,math:75,menu:[77,79],meta:75,miss:82,mlm:83,mod:74,model:[82,83,86],modul:[54,61,81],namespac:[18,19,20,21,38,39,40,41,50],nativ:64,native_op:59,nav:77,nest:[1,46],node:53,notebook:83,number:[75,76],nvidia:65,object:83,one:76,op:[57,82],oper:[61,66],optim:86,optimz:54,option:[73,74,76],other:60,overview:58,own:84,packag:[64,85],page:73,paragraph:[75,78],paramet:74,partit:[55,56,58],partitoninfo:55,pass:54,pattern:54,peephol:54,phase:[53,54,55,56,57,58],plugin:85,post:84,pre:64,precompil:64,prerequisit:64,program:[42,43,44,45,85],project:73,ptq:[20,29,30,40,44,69,84],python:[59,62,64,65,81,84],pytorch:[59,65,82,83,87],quantiz:[83,84],queri:86,quickstart:61,quot:75,rabbit:76,read:59,redund:54,refer:75,regist:61,relationship:[1,3,4,46,48],releas:64,remov:54,replac:75,resnet50:83,respons:60,result:57,right:64,rubric:75,runtim:[56,57,58,85],save:81,second:76,section:78,segmentedblock:55,serial:57,serv:[83,86],server:86,set:86,set_devic:35,set_is_colored_output_on:27,set_logging_prefix:28,set_reportable_log_level:25,setup:64,shape:83,shape_analysi:55,sidebar:75,so:85,sometim:59,sourc:64,ssd:83,start:65,step:86,sticki:77,str:5,struct:[46,47,48,49,50],structur:78,subdirectori:[13,14],submenu:77,submodul:70,subsect:78,subsubmenu:77,subsubsect:78,support:66,system:58,tabl:[73,74,75,76,77,78],tarbal:64,target:75,templat:[3,4,29,30],tensorformat:2,tensorrt:[50,57,59,61,62,63,64,65,82,83,85,86,87],test_py_modul:74,text:75,theme:[73,79],thi:[76,79],through:66,time:65,titl:75,toc:73,topic:75,torch:[50,59,61,62,63,65,82,83,85,86,87],torch_tensorrt:[15,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,45,67,68,69,70,71],torch_tensorrt_major_vers:7,torch_tensorrt_minor_vers:8,torch_tensorrt_patch_vers:6,torch_tensorrt_vers:12,torchscript:[31,32,33,37,41,61,65,81],torchtrt_api:9,torchtrt_hidden:11,torchtrtc:[52,61],tracer:82,train:[83,84],transform:83,triton:86,ts:71,tupl:54,tutori:65,type:[3,4,46,48],under:61,unpack:54,unrol:54,unsupport:61,up:86,us:[54,59,61,62,64,83,87],user:82,version:57,via:80,wai:75,weight:60,what:60,wide:73,window:63,work:[61,81],write:60,xstr:10,your:[84,86]}}) \ No newline at end of file diff --git a/docs/src/pytorch-sphinx-theme/docs/changelog.html b/docs/src/pytorch-sphinx-theme/docs/changelog.html index 1c06b101c5..46c8448cfb 100644 --- a/docs/src/pytorch-sphinx-theme/docs/changelog.html +++ b/docs/src/pytorch-sphinx-theme/docs/changelog.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/src/pytorch-sphinx-theme/docs/configuring.html b/docs/src/pytorch-sphinx-theme/docs/configuring.html index f1c5a3d2e0..28b617e03b 100644 --- a/docs/src/pytorch-sphinx-theme/docs/configuring.html +++ b/docs/src/pytorch-sphinx-theme/docs/configuring.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/src/pytorch-sphinx-theme/docs/demo/api.html b/docs/src/pytorch-sphinx-theme/docs/demo/api.html index af2ed1dfe0..552258fef0 100644 --- a/docs/src/pytorch-sphinx-theme/docs/demo/api.html +++ b/docs/src/pytorch-sphinx-theme/docs/demo/api.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/src/pytorch-sphinx-theme/docs/demo/demo.html b/docs/src/pytorch-sphinx-theme/docs/demo/demo.html index 9d344d63bc..d467f285b8 100644 --- a/docs/src/pytorch-sphinx-theme/docs/demo/demo.html +++ b/docs/src/pytorch-sphinx-theme/docs/demo/demo.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
@@ -544,7 +544,7 @@

3.4.4.

3.4.5. Code Blocks

# parsed-literal test
-curl -O http://someurl/release-master (1.2.0a0+51a991e).tar-gz
+curl -O http://someurl/release-master (1.2.0a0+096fd41).tar-gz

Code Blocks can have captions.
{
diff --git a/docs/src/pytorch-sphinx-theme/docs/demo/lists_tables.html b/docs/src/pytorch-sphinx-theme/docs/demo/lists_tables.html
index 5695156066..df35196c0b 100644
--- a/docs/src/pytorch-sphinx-theme/docs/demo/lists_tables.html
+++ b/docs/src/pytorch-sphinx-theme/docs/demo/lists_tables.html
@@ -197,7 +197,7 @@
               
               
                 
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/src/pytorch-sphinx-theme/docs/demo/long.html b/docs/src/pytorch-sphinx-theme/docs/demo/long.html index 6371569524..62c792ecf6 100644 --- a/docs/src/pytorch-sphinx-theme/docs/demo/long.html +++ b/docs/src/pytorch-sphinx-theme/docs/demo/long.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/src/pytorch-sphinx-theme/docs/demo/structure.html b/docs/src/pytorch-sphinx-theme/docs/demo/structure.html index 40aff14ed5..9c551adf3a 100644 --- a/docs/src/pytorch-sphinx-theme/docs/demo/structure.html +++ b/docs/src/pytorch-sphinx-theme/docs/demo/structure.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/src/pytorch-sphinx-theme/docs/index.html b/docs/src/pytorch-sphinx-theme/docs/index.html index b03537a762..7af568e2a1 100644 --- a/docs/src/pytorch-sphinx-theme/docs/index.html +++ b/docs/src/pytorch-sphinx-theme/docs/index.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/src/pytorch-sphinx-theme/docs/installing.html b/docs/src/pytorch-sphinx-theme/docs/installing.html index 7ce3043701..816954b554 100644 --- a/docs/src/pytorch-sphinx-theme/docs/installing.html +++ b/docs/src/pytorch-sphinx-theme/docs/installing.html @@ -197,7 +197,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/creating_torchscript_module_in_python.html b/docs/tutorials/creating_torchscript_module_in_python.html index bdb3cf0426..f21fe1598d 100644 --- a/docs/tutorials/creating_torchscript_module_in_python.html +++ b/docs/tutorials/creating_torchscript_module_in_python.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/getting_started_with_fx_path.html b/docs/tutorials/getting_started_with_fx_path.html index 936f983a04..651f96f772 100644 --- a/docs/tutorials/getting_started_with_fx_path.html +++ b/docs/tutorials/getting_started_with_fx_path.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/notebooks.html b/docs/tutorials/notebooks.html index f4c8dbda7b..0152930ccc 100644 --- a/docs/tutorials/notebooks.html +++ b/docs/tutorials/notebooks.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/ptq.html b/docs/tutorials/ptq.html index 6c48abaf2b..ea05af3f48 100644 --- a/docs/tutorials/ptq.html +++ b/docs/tutorials/ptq.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/runtime.html b/docs/tutorials/runtime.html index 274b80dce3..837b96e43d 100644 --- a/docs/tutorials/runtime.html +++ b/docs/tutorials/runtime.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/serving_torch_tensorrt_with_triton.html b/docs/tutorials/serving_torch_tensorrt_with_triton.html index 732b18ef42..a93d70c666 100644 --- a/docs/tutorials/serving_torch_tensorrt_with_triton.html +++ b/docs/tutorials/serving_torch_tensorrt_with_triton.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/use_from_pytorch.html b/docs/tutorials/use_from_pytorch.html index e9e8e75342..4ee09bfc1f 100644 --- a/docs/tutorials/use_from_pytorch.html +++ b/docs/tutorials/use_from_pytorch.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docs/tutorials/using_dla.html b/docs/tutorials/using_dla.html index 6252d51262..c3ca7e9e93 100644 --- a/docs/tutorials/using_dla.html +++ b/docs/tutorials/using_dla.html @@ -199,7 +199,7 @@
- master (1.2.0a0+51a991e) + master (1.2.0a0+096fd41)
diff --git a/docsrc/getting_started/getting_started_with_windows.rst b/docsrc/getting_started/getting_started_with_windows.rst new file mode 100644 index 0000000000..d5d3394855 --- /dev/null +++ b/docsrc/getting_started/getting_started_with_windows.rst @@ -0,0 +1,78 @@ +.. _getting_started_windows: + +Building Torch-TensorRT on Windows +==================================== + +Torch-TensorRT has community support for Windows platform using CMake + +Prerequisite: + +* Microsoft Visual Studio +* LibTorch +* TensorRT +* CUDA +* cuDNN + + +Build configuration +------------------- + +* Open Microsoft Visual Studio +* Open Torch-TensorRT source code folder +* Open Manage configurations -> Edit JSON to open CMakeSettings.json file. +* Configure the CMake build configurations. Following is an example configuration: + +.. code-block:: none + + { + "configurations": [ + { + "name": "x64-Debug", + "generator": "Ninja", + "configurationType": "Debug", + "inheritEnvironments": [ "msvc_x64_x64" ], + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", + "cmakeCommandArgs": "-S . -B out", + "buildCommandArgs": "cmake --build out", + "ctestCommandArgs": "", + "variables": [ + { + "name": "CMAKE_MODULE_PATH", + "value": "$PWD\cmake\Modules", + "type": "FILEPATH" + }, + { + "name": "Torch_DIR", + "value": "\share\cmake\Torch", + "type": "FILEPATH" + }, + { + "name": "TensorRT_ROOT", + "value": "", + "type": "FILEPATH" + }, + { + "name": "CMAKE_BUILD_TYPE", + "value": "Release", + "type": " STRING" + } + ] + } + ] + } + + +Compilation +----------- + +* Click Build -> Build All or directly press Ctrl + Shift + B + +Note: After successful compilation, the build artifacts will be present at buildRoot path configured. + +Installation +------------ + +* Build -> Install Torch-TensorRT + +Note: After successful installation, the artifacts will be present at installRoot. \ No newline at end of file diff --git a/examples/int8/training/vgg16/export_qat.py b/examples/int8/training/vgg16/export_qat.py index af881c5642..72974d1f4d 100644 --- a/examples/int8/training/vgg16/export_qat.py +++ b/examples/int8/training/vgg16/export_qat.py @@ -81,7 +81,7 @@ def test(model, dataloader, crit): quant_nn.TensorQuantizer.use_fb_fake_quant = True with torch.no_grad(): data = iter(testing_dataloader) - images, _ = data.next() + images, _ = next(data) jit_model = torch.jit.trace(model, images.to("cuda")) torch.jit.save(jit_model, "trained_vgg16_qat.jit.pt") diff --git a/noxfile.py b/noxfile.py index 41926b5ee1..eff8136fbb 100644 --- a/noxfile.py +++ b/noxfile.py @@ -30,6 +30,9 @@ if USE_HOST_DEPS: print("Using dependencies from host python") +# Set epochs to train VGG model for accuracy tests +EPOCHS = 25 + SUPPORTED_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] nox.options.sessions = [ @@ -63,31 +66,6 @@ def install_torch_trt(session): session.run("python", "setup.py", "develop") -def download_datasets(session): - print( - "Downloading dataset to path", - os.path.join(TOP_DIR, "examples/int8/training/vgg16"), - ) - session.chdir(os.path.join(TOP_DIR, "examples/int8/training/vgg16")) - session.run_always( - "wget", "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz", external=True - ) - session.run_always("tar", "-xvzf", "cifar-10-binary.tar.gz", external=True) - session.run_always( - "mkdir", - "-p", - os.path.join(TOP_DIR, "tests/accuracy/datasets/data"), - external=True, - ) - session.run_always( - "cp", - "-rpf", - os.path.join(TOP_DIR, "examples/int8/training/vgg16/cifar-10-batches-bin"), - os.path.join(TOP_DIR, "tests/accuracy/datasets/data/cidar-10-batches-bin"), - external=True, - ) - - def train_model(session): session.chdir(os.path.join(TOP_DIR, "examples/int8/training/vgg16")) session.install("-r", "requirements.txt") @@ -107,14 +85,14 @@ def train_model(session): "--ckpt-dir", "vgg16_ckpts", "--epochs", - "25", + str(EPOCHS), env={"PYTHONPATH": PYT_PATH}, ) session.run_always( "python", "export_ckpt.py", - "vgg16_ckpts/ckpt_epoch25.pth", + "vgg16_ckpts/ckpt_epoch" + str(EPOCHS) + ".pth", env={"PYTHONPATH": PYT_PATH}, ) else: @@ -130,10 +108,12 @@ def train_model(session): "--ckpt-dir", "vgg16_ckpts", "--epochs", - "25", + str(EPOCHS), ) - session.run_always("python", "export_ckpt.py", "vgg16_ckpts/ckpt_epoch25.pth") + session.run_always( + "python", "export_ckpt.py", "vgg16_ckpts/ckpt_epoch" + str(EPOCHS) + ".pth" + ) def finetune_model(session): @@ -156,9 +136,9 @@ def finetune_model(session): "--ckpt-dir", "vgg16_ckpts", "--start-from", - "25", + str(EPOCHS), "--epochs", - "26", + str(EPOCHS + 1), env={"PYTHONPATH": PYT_PATH}, ) @@ -166,7 +146,7 @@ def finetune_model(session): session.run_always( "python", "export_qat.py", - "vgg16_ckpts/ckpt_epoch26.pth", + "vgg16_ckpts/ckpt_epoch" + str(EPOCHS + 1) + ".pth", env={"PYTHONPATH": PYT_PATH}, ) else: @@ -182,13 +162,17 @@ def finetune_model(session): "--ckpt-dir", "vgg16_ckpts", "--start-from", - "25", + str(EPOCHS), "--epochs", - "26", + str(EPOCHS + 1), ) # Export model - session.run_always("python", "export_qat.py", "vgg16_ckpts/ckpt_epoch26.pth") + session.run_always( + "python", + "export_qat.py", + "vgg16_ckpts/ckpt_epoch" + str(EPOCHS + 1) + ".pth", + ) def cleanup(session): @@ -219,6 +203,19 @@ def run_base_tests(session): session.run_always("pytest", test) +def run_model_tests(session): + print("Running model tests") + session.chdir(os.path.join(TOP_DIR, "tests/py")) + tests = [ + "models", + ] + for test in tests: + if USE_HOST_DEPS: + session.run_always("pytest", test, env={"PYTHONPATH": PYT_PATH}) + else: + session.run_always("pytest", test) + + def run_accuracy_tests(session): print("Running accuracy tests") session.chdir(os.path.join(TOP_DIR, "tests/py")) @@ -268,8 +265,8 @@ def run_trt_compatibility_tests(session): copy_model(session) session.chdir(os.path.join(TOP_DIR, "tests/py")) tests = [ - "test_trt_intercompatibility.py", - "test_ptq_trt_calibrator.py", + "integrations/test_trt_intercompatibility.py", + # "ptq/test_ptq_trt_calibrator.py", ] for test in tests: if USE_HOST_DEPS: @@ -282,7 +279,7 @@ def run_dla_tests(session): print("Running DLA tests") session.chdir(os.path.join(TOP_DIR, "tests/py")) tests = [ - "test_api_dla.py", + "hw/test_api_dla.py", ] for test in tests: if USE_HOST_DEPS: @@ -295,7 +292,7 @@ def run_multi_gpu_tests(session): print("Running multi GPU tests") session.chdir(os.path.join(TOP_DIR, "tests/py")) tests = [ - "test_multi_gpu.py", + "hw/test_multi_gpu.py", ] for test in tests: if USE_HOST_DEPS: @@ -322,13 +319,12 @@ def run_l0_dla_tests(session): cleanup(session) -def run_l1_accuracy_tests(session): +def run_l1_model_tests(session): if not USE_HOST_DEPS: install_deps(session) install_torch_trt(session) - download_datasets(session) - train_model(session) - run_accuracy_tests(session) + download_models(session) + run_model_tests(session) cleanup(session) @@ -336,7 +332,6 @@ def run_l1_int8_accuracy_tests(session): if not USE_HOST_DEPS: install_deps(session) install_torch_trt(session) - download_datasets(session) train_model(session) finetune_model(session) run_int8_accuracy_tests(session) @@ -347,9 +342,6 @@ def run_l2_trt_compatibility_tests(session): if not USE_HOST_DEPS: install_deps(session) install_torch_trt(session) - download_models(session) - download_datasets(session) - train_model(session) run_trt_compatibility_tests(session) cleanup(session) @@ -376,9 +368,9 @@ def l0_dla_tests(session): @nox.session(python=SUPPORTED_PYTHON_VERSIONS, reuse_venv=True) -def l1_accuracy_tests(session): - """Checking accuracy performance on various usecases""" - run_l1_accuracy_tests(session) +def l1_model_tests(session): + """When a user needs to test the functionality of standard models compilation and results""" + run_l1_model_tests(session) @nox.session(python=SUPPORTED_PYTHON_VERSIONS, reuse_venv=True) @@ -397,13 +389,3 @@ def l2_trt_compatibility_tests(session): def l2_multi_gpu_tests(session): """Makes sure that Torch-TensorRT can operate on multi-gpu systems""" run_l2_multi_gpu_tests(session) - - -@nox.session(python=SUPPORTED_PYTHON_VERSIONS, reuse_venv=True) -def download_test_models(session): - """Grab all the models needed for testing""" - try: - import torch - except ModuleNotFoundError: - install_deps(session) - download_models(session) diff --git a/py/torch_tensorrt/fx/lower.py b/py/torch_tensorrt/fx/lower.py index 59b59d580f..9a641e000e 100644 --- a/py/torch_tensorrt/fx/lower.py +++ b/py/torch_tensorrt/fx/lower.py @@ -86,7 +86,9 @@ def create(cls, lower_setting): def __call__(self, mod, input, split_name) -> TRTInterpreterResult: assert self.lower_setting.input_specs, "Can't find input specs for lowering!" - logger.info(f"{split_name=} {self.lower_setting.input_specs=}") + logger.info( + f"split_name={split_name}, input_specs={self.lower_setting.input_specs}" + ) # Prepare algorithm selector and timing_cache for TRTInterpreter algo_selector = None diff --git a/py/torch_tensorrt/ptq.py b/py/torch_tensorrt/ptq.py index 326f35f942..f60dd74b52 100644 --- a/py/torch_tensorrt/ptq.py +++ b/py/torch_tensorrt/ptq.py @@ -28,7 +28,7 @@ def get_batch(self, names): if self.current_batch_idx + self.batch_size > len(self.data_loader.dataset): return None - batch = self.dataset_iterator.next() + batch = next(self.dataset_iterator) self.current_batch_idx += self.batch_size inputs_gpu = [] if isinstance(batch, list): @@ -56,6 +56,13 @@ def write_calibration_cache(self, cache): return b"" +# deepcopy (which involves pickling) is performed on the compile_spec internally during compilation. +# We register this __reduce__ function for pickler to identity the calibrator object returned by DataLoaderCalibrator during deepcopy. +# This should be the object's local name relative to the module https://docs.python.org/3/library/pickle.html#object.__reduce__ +def __reduce__(self): + return self.__class__.__name__ + + class DataLoaderCalibrator(object): """ Constructs a calibrator class in TensorRT and uses pytorch dataloader to load/preproces @@ -114,24 +121,27 @@ def __new__(cls, *args, **kwargs): "get_batch": get_cache_mode_batch if use_cache else get_batch, "read_calibration_cache": read_calibration_cache, "write_calibration_cache": write_calibration_cache, + "__reduce__": __reduce__, # used when you deepcopy the DataLoaderCalibrator object } # Using type metaclass to construct calibrator class based on algorithm type if algo_type == CalibrationAlgo.ENTROPY_CALIBRATION: return type( - "DataLoaderCalibrator", (_C.IInt8EntropyCalibrator,), attribute_mapping + "Int8EntropyCalibrator", (_C.IInt8EntropyCalibrator,), attribute_mapping )() elif algo_type == CalibrationAlgo.ENTROPY_CALIBRATION_2: return type( - "DataLoaderCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping + "Int8EntropyCalibrator2", + (_C.IInt8EntropyCalibrator2,), + attribute_mapping, )() elif algo_type == CalibrationAlgo.LEGACY_CALIBRATION: return type( - "DataLoaderCalibrator", (_C.IInt8LegacyCalibrator,), attribute_mapping + "Int8LegacyCalibrator", (_C.IInt8LegacyCalibrator,), attribute_mapping )() elif algo_type == CalibrationAlgo.MINMAX_CALIBRATION: return type( - "DataLoaderCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping + "Int8MinMaxCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping )() else: log( diff --git a/py/torch_tensorrt/ts/_compile_spec.py b/py/torch_tensorrt/ts/_compile_spec.py index 154b29dd7b..9616111caa 100644 --- a/py/torch_tensorrt/ts/_compile_spec.py +++ b/py/torch_tensorrt/ts/_compile_spec.py @@ -225,7 +225,7 @@ def _parse_input_signature(input_signature: Any): def _parse_compile_spec(compile_spec_: Dict[str, Any]) -> _ts_C.CompileSpec: - # TODO: Remove deep copy once collections does not need partial compilation + # TODO: Use deepcopy to support partial compilation of collections compile_spec = deepcopy(compile_spec_) info = _ts_C.CompileSpec() @@ -301,7 +301,7 @@ def _parse_compile_spec(compile_spec_: Dict[str, Any]) -> _ts_C.CompileSpec: compile_spec["enabled_precisions"] ) - if "calibrator" in compile_spec: + if "calibrator" in compile_spec and compile_spec["calibrator"]: info.ptq_calibrator = compile_spec["calibrator"] if "sparse_weights" in compile_spec: diff --git a/tests/core/conversion/converters/BUILD b/tests/core/conversion/converters/BUILD index 82bc2f7033..5246de4cf1 100644 --- a/tests/core/conversion/converters/BUILD +++ b/tests/core/conversion/converters/BUILD @@ -71,6 +71,10 @@ converter_test( name = "test_matrix_multiply", ) +converter_test( + name = "test_max", +) + converter_test( name = "test_normalize", ) @@ -156,6 +160,7 @@ test_suite( ":test_linear", ":test_lstm_cell", ":test_matrix_multiply", + ":test_max", ":test_normalize", ":test_pooling", ":test_reduce", diff --git a/tests/core/conversion/converters/test_max.cpp b/tests/core/conversion/converters/test_max.cpp new file mode 100644 index 0000000000..dfc2432c24 --- /dev/null +++ b/tests/core/conversion/converters/test_max.cpp @@ -0,0 +1,147 @@ +#include +#include "core/compiler.h" +#include "gtest/gtest.h" +#include "tests/util/util.h" +#include "torch/csrc/jit/ir/irparser.h" + +TEST(Converters, ATenMaxDimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor, %5 : Tensor = aten::max(%x.1, %2, %3) + return (%4, %5))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} + +TEST(Converters, ATenMinDimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor, %5 : Tensor = aten::min(%x.1, %2, %3) + return (%4, %5))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); +} + +TEST(Converters, ATenArgMaxConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor = aten::argmax(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMaxKeepdimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : bool = prim::Constant[value=1]() + %4 : Tensor = aten::argmax(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMinConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=0]() + %3 : bool = prim::Constant[value=0]() + %4 : Tensor = aten::argmin(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} + +TEST(Converters, ATenArgMinKeepdimConvertsCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=1]() + %3 : bool = prim::Constant[value=1]() + %4 : Tensor = aten::argmin(%x.1, %2, %3) + return (%4))IR"; + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); + + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE( + torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); +} diff --git a/tests/core/conversion/converters/test_topk.cpp b/tests/core/conversion/converters/test_topk.cpp index 1885493737..c53d209c1f 100644 --- a/tests/core/conversion/converters/test_topk.cpp +++ b/tests/core/conversion/converters/test_topk.cpp @@ -30,28 +30,3 @@ TEST(Converters, ATenTopKConvertsCorrectly) { ASSERT_TRUE( torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); } - -TEST(Converters, ATenMaxDimConvertsCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=0]() - %3 : bool = prim::Constant[value=0]() - %4 : Tensor, %5 : Tensor = aten::max(%x.1, %2, %3) - return (%4, %5))IR"; - - auto g = std::make_shared(); - torch::jit::parseIR(graph, g.get()); - - auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); - - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); - - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); -} diff --git a/tests/core/conversion/converters/test_unary.cpp b/tests/core/conversion/converters/test_unary.cpp index 1d40c3c94b..06f092ff36 100644 --- a/tests/core/conversion/converters/test_unary.cpp +++ b/tests/core/conversion/converters/test_unary.cpp @@ -31,6 +31,22 @@ TEST(Converters, ATenAbsIntConvertsCorrectly) { ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0])); } +TEST(Converters, ATenReciprocalIntConvertsCorrectly) { + const auto graph = gen_test_graph("reciprocal"); + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto in = at::tensor({-1, 1, -2, 2, -3, 3}, {at::kCUDA}).to(torch::kInt32); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); + + in = at::clone(in); + params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); + + ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0])); +} + #define test_unary(unary, name) \ TEST(Converters, ATen##name##ConvertsCorrectly) { \ const auto graph = gen_test_graph(#unary); \ diff --git a/tests/core/lowering/test_module_fallback_passes.cpp b/tests/core/lowering/test_module_fallback_passes.cpp index f11882df8b..e6eb098079 100644 --- a/tests/core/lowering/test_module_fallback_passes.cpp +++ b/tests/core/lowering/test_module_fallback_passes.cpp @@ -124,5 +124,5 @@ TEST(Lowering, LowerAndPartitionSimpleModuleFallbackCorrectly) { } auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor(); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99)); } diff --git a/tests/core/partitioning/test_fallback_graph_output.cpp b/tests/core/partitioning/test_fallback_graph_output.cpp index 98fc4e6128..3da717074a 100644 --- a/tests/core/partitioning/test_fallback_graph_output.cpp +++ b/tests/core/partitioning/test_fallback_graph_output.cpp @@ -34,7 +34,7 @@ TEST(Partitioning, ComputeResNet50FallbackGraphCorrectly) { auto jit_results = mod.forward(jit_inputs_ivalues).toTensor(); auto trt_mod = torch_tensorrt::core::CompileGraph(mod, cfg); auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor(); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99)); } TEST(Partitioning, ComputeMobileNetFallbackGraphCorrectly) { @@ -64,6 +64,6 @@ TEST(Partitioning, ComputeMobileNetFallbackGraphCorrectly) { auto jit_results = mod.forward(jit_inputs_ivalues).toTensor(); auto trt_mod = torch_tensorrt::core::CompileGraph(mod, cfg); auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor(); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99)); } #endif diff --git a/tests/cpp/test_collections.cpp b/tests/cpp/test_collections.cpp index d01665adcd..e3f0d91dfe 100644 --- a/tests/cpp/test_collections.cpp +++ b/tests/cpp/test_collections.cpp @@ -42,7 +42,7 @@ TEST(CppAPITests, TestCollectionStandardTensorInput) { auto trt_mod = torch_tensorrt::torchscript::compile(mod, compile_settings); auto trt_out = trt_mod.forward(inputs_); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(out.toTensor(), trt_out.toTensor(), 1e-5)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(out.toTensor(), trt_out.toTensor(), 0.99)); } TEST(CppAPITests, TestCollectionTupleInput) { @@ -85,7 +85,7 @@ TEST(CppAPITests, TestCollectionTupleInput) { auto trt_mod = torch_tensorrt::torchscript::compile(mod, compile_settings); auto trt_out = trt_mod.forward(complex_inputs); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(out.toTensor(), trt_out.toTensor(), 1e-5)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(out.toTensor(), trt_out.toTensor(), 0.99)); } TEST(CppAPITests, TestCollectionListInput) { @@ -144,7 +144,7 @@ TEST(CppAPITests, TestCollectionListInput) { LOG_DEBUG("Finish compile"); auto trt_out = trt_mod.forward(complex_inputs); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(out.toTensor(), trt_out.toTensor(), 1e-5)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(out.toTensor(), trt_out.toTensor(), 0.99)); } TEST(CppAPITests, TestCollectionTupleInputOutput) { @@ -317,4 +317,4 @@ TEST(CppAPITests, TestCollectionComplexModel) { out.toTuple()->elements()[0].toTensor(), trt_out.toTuple()->elements()[0].toTensor(), 1e-5)); ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual( out.toTuple()->elements()[1].toTensor(), trt_out.toTuple()->elements()[1].toTensor(), 1e-5)); -} \ No newline at end of file +} diff --git a/tests/cpp/test_compiled_modules.cpp b/tests/cpp/test_compiled_modules.cpp index 595dd7044f..3a81f0a531 100644 --- a/tests/cpp/test_compiled_modules.cpp +++ b/tests/cpp/test_compiled_modules.cpp @@ -42,7 +42,7 @@ TEST_P(CppAPITests, CompiledModuleIsClose) { for (size_t i = 0; i < trt_results.size(); i++) { ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[i], trt_results[i].reshape_as(jit_results[i]), threshold)); + torch_tensorrt::tests::util::cosineSimEqual(jit_results[i], trt_results[i].reshape_as(jit_results[i]), 0.99)); } } @@ -52,11 +52,7 @@ INSTANTIATE_TEST_SUITE_P( CompiledModuleForwardIsCloseSuite, CppAPITests, testing::Values( - PathAndInput({"tests/modules/resnet18_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/resnet50_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/mobilenet_v2_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), PathAndInput({"tests/modules/resnet18_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/resnet50_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), PathAndInput({"tests/modules/mobilenet_v2_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), PathAndInput({"tests/modules/efficientnet_b0_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 8e-3}), PathAndInput({"tests/modules/bert_base_uncased_traced.jit.pt", {{1, 14}, {1, 14}}, {at::kInt, at::kInt}, 8e-2}), diff --git a/tests/cpp/test_module_fallback.cpp b/tests/cpp/test_module_fallback.cpp index d1221cde4d..bfdfc46b04 100644 --- a/tests/cpp/test_module_fallback.cpp +++ b/tests/cpp/test_module_fallback.cpp @@ -30,7 +30,7 @@ TEST(CppAPITest, ResNetModuleFallbacksCorrectly) { auto jit_results = mod.forward(jit_inputs_ivalues).toTensor(); auto trt_mod = torch_tensorrt::ts::compile(mod, cfg); auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor(); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99)); } TEST(CppAPITest, MobileNetModuleFallbacksCorrectlyWithOneEngine) { @@ -69,6 +69,6 @@ TEST(CppAPITest, MobileNetModuleFallbacksCorrectlyWithOneEngine) { ASSERT_TRUE(trt_count == 1); auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor(); - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99)); } #endif diff --git a/tests/cpp/test_modules_as_engines.cpp b/tests/cpp/test_modules_as_engines.cpp index 4437b1218c..11b7a54fb0 100644 --- a/tests/cpp/test_modules_as_engines.cpp +++ b/tests/cpp/test_modules_as_engines.cpp @@ -14,41 +14,8 @@ TEST_P(CppAPITests, ModuleAsEngineIsClose) { jit_results.push_back(jit_results_ivalues.toTensor()); auto trt_results = torch_tensorrt::tests::util::RunModuleForwardAsEngine(mod, inputs); - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), threshold)); -} - -TEST_P(CppAPITests, ModuleToEngineToModuleIsClose) { - std::vector inputs; - std::vector inputs_ivalues; - for (uint64_t i = 0; i < input_shapes.size(); i++) { - inputs.push_back(at::randint(5, input_shapes[i], {at::kCUDA}).to(input_types[i])); - inputs_ivalues.push_back(inputs[inputs.size() - 1].clone()); - } - - torch::jit::IValue jit_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(mod, inputs_ivalues); - std::vector jit_results; - jit_results.push_back(jit_results_ivalues.toTensor()); - - std::vector> input_ranges; - for (auto in : inputs) { - input_ranges.push_back(in.sizes()); - } - - auto compile_spec = torch_tensorrt::ts::CompileSpec({input_ranges}); - int device_id = 0; - cudaGetDevice(&device_id); - compile_spec.device.device_type = torch_tensorrt::Device::DeviceType::kGPU; - compile_spec.device.gpu_id = device_id; - auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", input_ranges); - auto trt_mod = torch_tensorrt::ts::embed_engine_in_new_module(engine, compile_spec.device); - - torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, inputs_ivalues); - std::vector trt_results; - trt_results.push_back(trt_results_ivalues.toTensor()); - - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), threshold)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual( + jit_results[0], trt_results[0].reshape_as(jit_results[0]), threshold)); } #ifndef DISABLE_TEST_IN_CI @@ -57,12 +24,8 @@ INSTANTIATE_TEST_SUITE_P( ModuleAsEngineForwardIsCloseSuite, CppAPITests, testing::Values( - PathAndInput({"tests/modules/resnet18_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/resnet50_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/mobilenet_v2_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/resnet18_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/resnet50_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/mobilenet_v2_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}), - PathAndInput({"tests/modules/efficientnet_b0_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 1e-4}), - PathAndInput({"tests/modules/vit_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 8e-2}))); + PathAndInput({"tests/modules/resnet18_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 0.99}), + PathAndInput({"tests/modules/mobilenet_v2_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 0.99}), + PathAndInput({"tests/modules/efficientnet_b0_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 0.99}), + PathAndInput({"tests/modules/vit_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 0.99}))); #endif diff --git a/tests/cpp/test_multi_gpu_serde.cpp b/tests/cpp/test_multi_gpu_serde.cpp index 8672ae9517..0b3944125b 100644 --- a/tests/cpp/test_multi_gpu_serde.cpp +++ b/tests/cpp/test_multi_gpu_serde.cpp @@ -23,12 +23,12 @@ TEST_P(CppAPITests, CompiledModuleIsClose) { trt_results.push_back(trt_results_ivalues.toTensor()); for (size_t i = 0; i < trt_results.size(); i++) { - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual( - jit_results[i], trt_results[i].reshape_as(jit_results[i]).to(torch::Device("cuda:0")), 2e-5)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual( + jit_results[i], trt_results[i].reshape_as(jit_results[i]).to(torch::Device("cuda:0")), threshold)); } } INSTANTIATE_TEST_SUITE_P( CompiledModuleForwardIsCloseSuite, CppAPITests, - testing::Values(PathAndInput({"tests/modules/resnet18_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}))); + testing::Values(PathAndInput({"tests/modules/resnet18_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 0.99}))); diff --git a/tests/cpp/test_multiple_registered_engines.cpp b/tests/cpp/test_multiple_registered_engines.cpp index 2746687f68..658f59ca74 100644 --- a/tests/cpp/test_multiple_registered_engines.cpp +++ b/tests/cpp/test_multiple_registered_engines.cpp @@ -10,7 +10,7 @@ TEST(CppAPITest, CanRunMultipleEngines) { torch::jit::script::Module mod1; torch::jit::script::Module mod2; try { - mod1 = torch::jit::load("tests/modules/resnet50_traced.jit.pt"); + mod1 = torch::jit::load("tests/modules/resnet18_traced.jit.pt"); mod2 = torch::jit::load("tests/modules/resnet18_traced.jit.pt"); } catch (const c10::Error& e) { std::cerr << "error loading the model\n"; @@ -56,13 +56,13 @@ TEST(CppAPITest, CanRunMultipleEngines) { trt2_results.push_back(trt2_results_ivalues.toTensor()); for (size_t i = 0; i < trt1_results.size(); i++) { - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit1_results[i], trt1_results[i].reshape_as(jit1_results[i]), 2e-5)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual( + jit1_results[i], trt1_results[i].reshape_as(jit1_results[i]), 0.99)); } for (size_t i = 0; i < trt2_results.size(); i++) { - ASSERT_TRUE( - torch_tensorrt::tests::util::almostEqual(jit2_results[i], trt2_results[i].reshape_as(jit2_results[i]), 2e-5)); + ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual( + jit2_results[i], trt2_results[i].reshape_as(jit2_results[i]), 0.99)); } } #endif diff --git a/tests/py/api/test_collections.py b/tests/py/api/test_collections.py index dfae3f18c9..936a4d5c73 100644 --- a/tests/py/api/test_collections.py +++ b/tests/py/api/test_collections.py @@ -3,6 +3,7 @@ import torch import torchvision.models as models import os +from utils import cosine_similarity, COSINE_THRESHOLD def find_repo_root(max_depth=10): @@ -40,12 +41,13 @@ def test_compile(self): } trt_mod = torchtrt.ts.compile(self.model, **compile_spec) - same = ( - (trt_mod(self.input, self.input) - self.model(self.input, self.input)) - .abs() - .max() + cos_sim = cosine_similarity( + self.model(self.input, self.input), trt_mod(self.input, self.input) + ) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"standard_tensor_input_scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) - self.assertTrue(same < 2e-2) class TestTupleInput(unittest.TestCase): @@ -68,12 +70,13 @@ def test_compile(self): } trt_mod = torchtrt.ts.compile(self.model, **compile_spec) - same = ( - (trt_mod((self.input, self.input)) - self.model((self.input, self.input))) - .abs() - .max() + cos_sim = cosine_similarity( + self.model((self.input, self.input)), trt_mod((self.input, self.input)) + ) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"tuple_input_scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) - self.assertTrue(same < 2e-2) class TestListInput(unittest.TestCase): @@ -94,12 +97,13 @@ def test_compile(self): } trt_mod = torchtrt.ts.compile(self.model, **compile_spec) - same = ( - (trt_mod([self.input, self.input]) - self.model([self.input, self.input])) - .abs() - .max() + cos_sim = cosine_similarity( + self.model([self.input, self.input]), trt_mod([self.input, self.input]) + ) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"list_input_scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) - self.assertTrue(same < 2e-2) class TestTupleInputOutput(unittest.TestCase): @@ -124,8 +128,12 @@ def test_compile(self): trt_mod = torchtrt.ts.compile(self.model, **compile_spec) trt_out = trt_mod((self.input, self.input)) pyt_out = self.model((self.input, self.input)) - results = [(t - p).abs().max() < 2e-2 for (t, p) in zip(trt_out, pyt_out)] - self.assertTrue(all(results)) + for (t, p) in zip(trt_out, pyt_out): + cos_sim = cosine_similarity(t, p) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"tuple_input_output_scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) class TestListInputOutput(unittest.TestCase): @@ -150,8 +158,13 @@ def test_compile(self): trt_mod = torchtrt.ts.compile(self.model, **compile_spec) trt_out = trt_mod((self.input, self.input)) pyt_out = self.model((self.input, self.input)) - results = [(t - p).abs().max() < 2e-2 for (t, p) in zip(trt_out, pyt_out)] - self.assertTrue(all(results)) + + for (t, p) in zip(trt_out, pyt_out): + cos_sim = cosine_similarity(t, p) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"list_input_output_scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) class TestListInputTupleOutput(unittest.TestCase): @@ -176,8 +189,12 @@ def test_compile(self): trt_mod = torchtrt.ts.compile(self.model, **compile_spec) trt_out = trt_mod((self.input, self.input)) pyt_out = self.model((self.input, self.input)) - results = [(t - p).abs().max() < 2e-2 for (t, p) in zip(trt_out, pyt_out)] - self.assertTrue(all(results)) + for (t, p) in zip(trt_out, pyt_out): + cos_sim = cosine_similarity(t, p) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"list_input_tuple_output_scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) if __name__ == "__main__": diff --git a/tests/py/api/test_e2e_behavior.py b/tests/py/api/test_e2e_behavior.py index d1da3e0465..385fe916f4 100644 --- a/tests/py/api/test_e2e_behavior.py +++ b/tests/py/api/test_e2e_behavior.py @@ -6,102 +6,6 @@ from typing import Dict -class TestCompileHalf(unittest.TestCase): - def test_compile_script_half(self): - self.model = models.resnet18(pretrained=True).eval().to("cuda") - self.input = torch.randn((1, 3, 224, 224)).to("cuda") - self.scripted_model = torch.jit.script(self.model) - self.scripted_model.half() - - compile_spec = { - "inputs": [torchtrt.Input(shape=self.input.shape, dtype=torch.half)], - "device": { - "device_type": torchtrt.DeviceType.GPU, - "gpu_id": 0, - }, - "enabled_precisions": {torch.half}, - } - - trt_mod = torchtrt.ts.compile(self.scripted_model, **compile_spec) - same = ( - (trt_mod(self.input.half()) - self.scripted_model(self.input.half())) - .abs() - .max() - ) - torchtrt.logging.log(torchtrt.logging.Level.Debug, "Max diff: " + str(same)) - self.assertTrue(same < 3e-2) - - def test_compile_script_half_by_default(self): - self.model = models.resnet18(pretrained=True).eval().to("cuda") - self.input = torch.randn((1, 3, 224, 224)).to("cuda") - self.scripted_model = torch.jit.script(self.model) - self.scripted_model.half() - - compile_spec = { - "inputs": [torchtrt.Input(shape=self.input.shape)], - "device": { - "device_type": torchtrt.DeviceType.GPU, - "gpu_id": 0, - }, - "enabled_precisions": {torch.float, torch.half}, - } - - trt_mod = torchtrt.ts.compile(self.scripted_model, **compile_spec) - same = ( - (trt_mod(self.input.half()) - self.scripted_model(self.input.half())) - .abs() - .max() - ) - torchtrt.logging.log(torchtrt.logging.Level.Debug, "Max diff: " + str(same)) - self.assertTrue(same < 3e-2) - - -class TestFallbackToTorch(unittest.TestCase): - def test_fallback(self): - self.model = models.resnet18(pretrained=True).eval().to("cuda") - self.input = torch.randn((1, 3, 224, 224)).to("cuda") - self.scripted_model = torch.jit.script(self.model) - - compile_spec = { - "inputs": [torchtrt.Input(self.input.shape)], - "device": { - "device_type": torchtrt.DeviceType.GPU, - "gpu_id": 0, - "allow_gpu_fallback": False, - "disable_tf32": False, - }, - "require_full_compilation": False, - "torch_executed_ops": ["aten::max_pool2d"], - "min_block_size": 1, - } - - trt_mod = torchtrt.ts.compile(self.scripted_model, **compile_spec) - same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max() - self.assertTrue(same < 2e-3) - - def test_module_fallback(self): - self.model = models.resnet18(pretrained=True).eval().to("cuda") - self.input = torch.randn((1, 3, 224, 224)).to("cuda") - self.scripted_model = torch.jit.script(self.model) - - compile_spec = { - "inputs": [torchtrt.Input(self.input.shape)], - "device": { - "device_type": torchtrt.DeviceType.GPU, - "gpu_id": 0, - "allow_gpu_fallback": False, - "disable_tf32": False, - }, - "require_full_compilation": False, - "torch_executed_modules": ["torchvision.models.resnet.BasicBlock"], - "min_block_size": 1, - } - - trt_mod = torchtrt.ts.compile(self.scripted_model, **compile_spec) - same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max() - self.assertTrue(same < 2e-3) - - class TestInputTypeDefaultsFP32Model(unittest.TestCase): def test_input_use_default_fp32(self): self.model = models.resnet18(pretrained=True).eval().to("cuda") diff --git a/tests/py/api/test_embed_engines.py b/tests/py/api/test_embed_engines.py new file mode 100644 index 0000000000..d21e139eca --- /dev/null +++ b/tests/py/api/test_embed_engines.py @@ -0,0 +1,73 @@ +import unittest +import torch_tensorrt as torchtrt +import torch +import torchvision.models as models +import copy +import timm +from typing import Dict +from utils import cosine_similarity, COSINE_THRESHOLD + + +class TestModelToEngineToModel(unittest.TestCase): + def test_resnet50(self): + self.model = models.resnet50(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + } + + self.scripted_model = torch.jit.script(self.model) + trt_engine = torchtrt.ts.convert_method_to_trt_engine( + self.scripted_model, "forward", **compile_spec + ) + trt_mod = torchtrt.ts.embed_engine_in_new_module(trt_engine) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Resnet50 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + def test_efficientnet_b0(self): + self.model = ( + timm.create_model("efficientnet_b0", pretrained=True).eval().to("cuda") + ) + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + } + + self.scripted_model = torch.jit.script(self.model) + trt_engine = torchtrt.ts.convert_method_to_trt_engine( + self.scripted_model, "forward", **compile_spec + ) + trt_mod = torchtrt.ts.embed_engine_in_new_module(trt_engine) + + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"EfficientNet-B0 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/py/api/test_module_fallback.py b/tests/py/api/test_module_fallback.py new file mode 100644 index 0000000000..5eda2cdbfc --- /dev/null +++ b/tests/py/api/test_module_fallback.py @@ -0,0 +1,62 @@ +import unittest +import torch_tensorrt as torchtrt +import torch +import torchvision.models as models +import copy +from typing import Dict +from utils import cosine_similarity, COSINE_THRESHOLD + + +class TestModuleFallback(unittest.TestCase): + def test_fallback_resnet18(self): + self.model = models.resnet18(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + "torch_executed_modules": ["torchvision.models.resnet.BasicBlock"], + } + trt_mod = torchtrt.compile(self.model, **compile_spec) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Resnet18 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + def test_fallback_mobilenet_v2(self): + self.model = models.mobilenet_v2(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + "torch_executed_modules": [ + "torchvision.models.mobilenetv2.ConvBNActivation" + ], + "min_block_size": 5, + } + trt_mod = torchtrt.compile(self.model, **compile_spec) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Mobilenet V2 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/py/api/test_operator_fallback.py b/tests/py/api/test_operator_fallback.py new file mode 100644 index 0000000000..302a663e24 --- /dev/null +++ b/tests/py/api/test_operator_fallback.py @@ -0,0 +1,59 @@ +import unittest +import torch_tensorrt as torchtrt +import torch +import torchvision.models as models +import copy +from typing import Dict +from utils import cosine_similarity, COSINE_THRESHOLD + + +class TestFallbackModels(unittest.TestCase): + def test_fallback_resnet18(self): + self.model = models.resnet18(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + "torch_executed_ops": ["aten::add"], + } + trt_mod = torchtrt.compile(self.model, **compile_spec) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Resnet18 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + def test_fallback_mobilenet_v2(self): + self.model = models.mobilenet_v2(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + "torch_executed_ops": ["aten::hardtanh"], + } + trt_mod = torchtrt.compile(self.model, **compile_spec) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Mobilenet V2 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/py/api/test_ts_backend.py b/tests/py/api/test_ts_backend.py index d0654a8f75..e56ab4f902 100644 --- a/tests/py/api/test_ts_backend.py +++ b/tests/py/api/test_ts_backend.py @@ -4,6 +4,7 @@ import torchvision.models as models import copy from typing import Dict +from utils import cosine_similarity, COSINE_THRESHOLD class TestCompile(unittest.TestCase): @@ -26,8 +27,11 @@ def test_compile_traced(self): } trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"VGG16 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_compile_script(self): self.model = models.vgg16(pretrained=True).eval().to("cuda") @@ -40,8 +44,11 @@ def test_compile_script(self): device=torchtrt.Device(gpu_id=0), enabled_precisions={torch.float}, ) - same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"VGG16 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_compile_global(self): self.model = models.vgg16(pretrained=True).eval().to("cuda") @@ -53,21 +60,11 @@ def test_compile_global(self): device=torchtrt.Device(gpu_id=0), enabled_precisions={torch.float}, ) - same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) - - def test_compile_global_nn_mod(self): - self.model = models.vgg16(pretrained=True).eval().to("cuda") - self.input = torch.randn((1, 3, 224, 224)).to("cuda") - with torch.no_grad(): - trt_mod = torchtrt.compile( - self.model, - inputs=[self.input], - device=torchtrt.Device(gpu_id=0), - enabled_precisions={torch.float}, - ) - same = (trt_mod(self.input) - self.model(self.input)).abs().max() - self.assertTrue(same < 2e-2) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"VGG16 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_from_torch_tensor(self): self.model = models.vgg16(pretrained=True).eval().to("cuda") @@ -83,8 +80,11 @@ def test_from_torch_tensor(self): } trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"VGG16 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_device(self): self.model = models.vgg16(pretrained=True).eval().to("cuda") @@ -97,8 +97,11 @@ def test_device(self): } trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"VGG16 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_default_device(self): self.model = models.vgg16(pretrained=True).eval().to("cuda") @@ -107,51 +110,11 @@ def test_default_device(self): compile_spec = {"inputs": [self.input], "enabled_precisions": {torch.float}} trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) - - def test_compile_script_from_dict(self): - self.model = models.vgg16(pretrained=True).eval().to("cuda") - self.input = torch.randn((1, 3, 224, 224)).to("cuda") - self.traced_model = torch.jit.trace(self.model, [self.input]) - compile_spec = { - "inputs": [torchtrt.Input(shape=self.input.shape)], - "device": { - "device_type": torchtrt.DeviceType.GPU, - "gpu_id": 0, - }, - "enabled_precisions": {torch.float}, - } - - trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) - - -class TestPTtoTRTtoPT(unittest.TestCase): - def test_pt_to_trt_to_pt(self): - self.model = models.vgg16(pretrained=True).eval().to("cuda") - self.input = torch.randn((1, 3, 224, 224)).to("cuda") - self.ts_model = torch.jit.trace(self.model, [self.input]) - - compile_spec = { - "inputs": [torchtrt.Input(self.input.shape)], - "device": { - "device_type": torchtrt.DeviceType.GPU, - "gpu_id": 0, - "allow_gpu_fallback": False, - "disable_tf32": False, - }, - } - - trt_engine = torchtrt.ts.convert_method_to_trt_engine( - self.ts_model, "forward", **compile_spec - ) - trt_mod = torchtrt.ts.embed_engine_in_new_module( - trt_engine, torchtrt.Device("cuda:0") + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"VGG16 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) - same = (trt_mod(self.input) - self.ts_model(self.input)).abs().max() - self.assertTrue(same < 2e-3) class TestCheckMethodOpSupport(unittest.TestCase): diff --git a/tests/py/api/utils.py b/tests/py/api/utils.py new file mode 100644 index 0000000000..b1e6632ec3 --- /dev/null +++ b/tests/py/api/utils.py @@ -0,0 +1,15 @@ +import torch + +COSINE_THRESHOLD = 0.99 + + +def cosine_similarity(gt_tensor, pred_tensor): + gt_tensor = gt_tensor.flatten().to(torch.float32) + pred_tensor = pred_tensor.flatten().to(torch.float32) + if torch.sum(gt_tensor) == 0.0 or torch.sum(pred_tensor) == 0.0: + if torch.allclose(gt_tensor, pred_tensor, atol=1e-4, rtol=1e-4, equal_nan=True): + return 1.0 + res = torch.nn.functional.cosine_similarity(gt_tensor, pred_tensor, dim=0, eps=1e-6) + res = res.cpu().detach().item() + + return res diff --git a/tests/py/hw/test_api_dla.py b/tests/py/hw/test_api_dla.py index 57b149faa7..5328b92233 100644 --- a/tests/py/hw/test_api_dla.py +++ b/tests/py/hw/test_api_dla.py @@ -2,6 +2,7 @@ import torch_tensorrt as torchtrt import torch import torchvision.models as models +from utils import cosine_similarity, COSINE_THRESHOLD class ModelTestCaseOnDLA(unittest.TestCase): @@ -39,8 +40,11 @@ def test_compile_traced(self): } trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"ModelTestCaseOnDLA traced TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_compile_script(self): compile_spec = { @@ -55,8 +59,11 @@ def test_compile_script(self): } trt_mod = torchtrt.ts.compile(self.scripted_model, **compile_spec) - same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max() - self.assertTrue(same < 2e-2) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"ModelTestCaseOnDLA scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_suite(): diff --git a/tests/py/hw/test_multi_gpu.py b/tests/py/hw/test_multi_gpu.py index c068cc71b0..b6fa3f220b 100644 --- a/tests/py/hw/test_multi_gpu.py +++ b/tests/py/hw/test_multi_gpu.py @@ -35,9 +35,12 @@ def test_compile_traced(self): trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) torchtrt.set_device(self.target_gpu) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) torchtrt.set_device(0) - self.assertTrue(same < 2e-3) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"TestMultiGpuSwitching traced TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_compile_script(self): torchtrt.set_device(0) @@ -54,9 +57,12 @@ def test_compile_script(self): trt_mod = torchtrt.ts.compile(self.scripted_model, **compile_spec) torchtrt.set_device(self.target_gpu) - same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max() + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) torchtrt.set_device(0) - self.assertTrue(same < 2e-3) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"TestMultiGpuSwitching scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) class TestMultiGpuSerializeDeserializeSwitching(ModelTestCase): @@ -89,8 +95,11 @@ def test_compile_traced(self): trt_mod = torchtrt.ts.compile(self.traced_model, **compile_spec) # Changing the device ID deliberately. It should still run on correct device ID by context switching torchtrt.set_device(1) - same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max() - self.assertTrue(same < 2e-3) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"TestMultiGpuSerializeDeserializeSwitching traced TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_compile_script(self): torchtrt.set_device(0) @@ -108,8 +117,11 @@ def test_compile_script(self): trt_mod = torchtrt.ts.compile(self.scripted_model, **compile_spec) # Changing the device ID deliberately. It should still run on correct device ID by context switching torchtrt.set_device(1) - same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max() - self.assertTrue(same < 2e-3) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"TestMultiGpuSerializeDeserializeSwitching scripted TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) def test_suite(): diff --git a/tests/py/hw/utils.py b/tests/py/hw/utils.py new file mode 100644 index 0000000000..b1e6632ec3 --- /dev/null +++ b/tests/py/hw/utils.py @@ -0,0 +1,15 @@ +import torch + +COSINE_THRESHOLD = 0.99 + + +def cosine_similarity(gt_tensor, pred_tensor): + gt_tensor = gt_tensor.flatten().to(torch.float32) + pred_tensor = pred_tensor.flatten().to(torch.float32) + if torch.sum(gt_tensor) == 0.0 or torch.sum(pred_tensor) == 0.0: + if torch.allclose(gt_tensor, pred_tensor, atol=1e-4, rtol=1e-4, equal_nan=True): + return 1.0 + res = torch.nn.functional.cosine_similarity(gt_tensor, pred_tensor, dim=0, eps=1e-6) + res = res.cpu().detach().item() + + return res diff --git a/tests/py/integrations/test_to_backend_api.py b/tests/py/integrations/test_to_backend_api.py index 16d839b1b0..0f74a3af15 100644 --- a/tests/py/integrations/test_to_backend_api.py +++ b/tests/py/integrations/test_to_backend_api.py @@ -2,6 +2,7 @@ import torch_tensorrt as torchtrt import torch import torchvision.models as models +from utils import cosine_similarity, COSINE_THRESHOLD class TestToBackendLowering(unittest.TestCase): @@ -31,10 +32,11 @@ def setUp(self): def test_to_backend_lowering(self): trt_mod = torch._C._jit_to_backend("tensorrt", self.scripted_model, self.spec) - same = ( - (trt_mod.forward(self.input) - self.scripted_model(self.input)).abs().max() + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"TestToBackendLowering TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) - self.assertTrue(same < 2e-3) if __name__ == "__main__": diff --git a/tests/py/integrations/test_trt_intercompatibility.py b/tests/py/integrations/test_trt_intercompatibility.py index 96b47b7ccc..b938e4a1ac 100644 --- a/tests/py/integrations/test_trt_intercompatibility.py +++ b/tests/py/integrations/test_trt_intercompatibility.py @@ -3,6 +3,7 @@ import torch import torchvision.models as models import tensorrt as trt +from utils import cosine_similarity, COSINE_THRESHOLD class TestPyTorchToTRTEngine(unittest.TestCase): @@ -42,8 +43,11 @@ def test_pt_to_trt(self): device="cuda:0" ).cuda_stream, ) - same = (out - self.ts_model(self.input)).abs().max() - self.assertTrue(same < 2e-3) + cos_sim = cosine_similarity(self.model(self.input), out) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"TestPyTorchToTRTEngine TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) if __name__ == "__main__": diff --git a/tests/py/integrations/utils.py b/tests/py/integrations/utils.py new file mode 100644 index 0000000000..b1e6632ec3 --- /dev/null +++ b/tests/py/integrations/utils.py @@ -0,0 +1,15 @@ +import torch + +COSINE_THRESHOLD = 0.99 + + +def cosine_similarity(gt_tensor, pred_tensor): + gt_tensor = gt_tensor.flatten().to(torch.float32) + pred_tensor = pred_tensor.flatten().to(torch.float32) + if torch.sum(gt_tensor) == 0.0 or torch.sum(pred_tensor) == 0.0: + if torch.allclose(gt_tensor, pred_tensor, atol=1e-4, rtol=1e-4, equal_nan=True): + return 1.0 + res = torch.nn.functional.cosine_similarity(gt_tensor, pred_tensor, dim=0, eps=1e-6) + res = res.cpu().detach().item() + + return res diff --git a/tests/py/models/custom_models.py b/tests/py/models/custom_models.py new file mode 100644 index 0000000000..a19b9ca81c --- /dev/null +++ b/tests/py/models/custom_models.py @@ -0,0 +1,28 @@ +import torch +from transformers import BertModel, BertTokenizer, BertConfig + + +def BertModule(): + model_name = "bert-base-uncased" + enc = BertTokenizer.from_pretrained(model_name) + text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" + tokenized_text = enc.tokenize(text) + masked_index = 8 + tokenized_text[masked_index] = "[MASK]" + indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) + segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] + tokens_tensor = torch.tensor([indexed_tokens]) + segments_tensors = torch.tensor([segments_ids]) + config = BertConfig( + vocab_size_or_config_json_file=32000, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + torchscript=True, + ) + model = BertModel(config) + model.eval() + model = BertModel.from_pretrained(model_name, torchscript=True) + traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) + return traced_model diff --git a/tests/py/models/test_models.py b/tests/py/models/test_models.py new file mode 100644 index 0000000000..6cc9759626 --- /dev/null +++ b/tests/py/models/test_models.py @@ -0,0 +1,153 @@ +import unittest +import torch_tensorrt as torchtrt +import torch +import torchvision.models as models +import copy +import timm +import custom_models as cm +from typing import Dict +from utils import cosine_similarity, COSINE_THRESHOLD + + +class TestModels(unittest.TestCase): + def test_resnet18(self): + self.model = models.resnet18(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + } + + trt_mod = torchtrt.compile(self.model, **compile_spec) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Resnet50 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + def test_mobilenet_v2(self): + self.model = models.mobilenet_v2(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + } + + trt_mod = torchtrt.compile(self.model, **compile_spec) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Mobilenet v2 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + def test_efficientnet_b0(self): + self.model = ( + timm.create_model("efficientnet_b0", pretrained=True).eval().to("cuda") + ) + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + } + + trt_mod = torchtrt.compile(self.model, **compile_spec) + cos_sim = cosine_similarity(self.model(self.input), trt_mod(self.input)) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"EfficientNet-B0 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + def test_bert_base_uncased(self): + self.model = cm.BertModule().cuda() + self.input = torch.randint(0, 5, (1, 14), dtype=torch.int32).to("cuda") + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, + dtype=self.input.dtype, + format=torch.contiguous_format, + ), + torchtrt.Input( + self.input.shape, + dtype=self.input.dtype, + format=torch.contiguous_format, + ), + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + "truncate_long_and_double": True, + } + with torchtrt.logging.errors(): + trt_mod = torchtrt.ts.compile(self.model, **compile_spec) + + model_outputs = self.model(self.input, self.input) + trt_model_outputs = trt_mod(self.input, self.input) + for out, trt_out in zip(model_outputs, trt_model_outputs): + cos_sim = cosine_similarity(out, trt_out) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"HF BERT base-uncased TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + def test_resnet18_half(self): + self.model = models.resnet18(pretrained=True).eval().to("cuda") + self.input = torch.randn((1, 3, 224, 224)).to("cuda") + self.scripted_model = torch.jit.script(self.model) + self.scripted_model.half() + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input.shape, dtype=torch.half, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.half}, + } + + trt_mod = torchtrt.compile(self.scripted_model, **compile_spec) + cos_sim = cosine_similarity( + self.model.half()(self.input.half()), trt_mod(self.input.half()) + ) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Resnet50 Half TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/py/models/test_multiple_registered_engines.py b/tests/py/models/test_multiple_registered_engines.py new file mode 100644 index 0000000000..98f012597b --- /dev/null +++ b/tests/py/models/test_multiple_registered_engines.py @@ -0,0 +1,52 @@ +import unittest +import torch_tensorrt as torchtrt +import torch +import torchvision.models as models +import copy +import timm +import custom_models as cm +from typing import Dict +from utils import cosine_similarity, COSINE_THRESHOLD + + +class TestModelToEngineToModel(unittest.TestCase): + def test_multiple_engines(self): + self.resnet18 = models.resnet18(pretrained=True).eval().to("cuda") + self.resnet50 = models.resnet50(pretrained=True).eval().to("cuda") + self.input1 = torch.randn((1, 3, 224, 224)).to("cuda") + self.input2 = torch.randn((1, 3, 224, 224)).to("cuda") + + compile_spec = { + "inputs": [ + torchtrt.Input( + self.input1.shape, dtype=torch.float, format=torch.contiguous_format + ) + ], + "device": { + "device_type": torchtrt.DeviceType.GPU, + "gpu_id": 0, + }, + "enabled_precisions": {torch.float}, + } + rn18_trt_mod = torchtrt.compile(self.resnet18, **compile_spec) + rn50_trt_mod = torchtrt.compile(self.resnet50, **compile_spec) + + cos_sim = cosine_similarity( + self.resnet18(self.input1), rn18_trt_mod(self.input1) + ) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Resnet18 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + cos_sim = cosine_similarity( + self.resnet50(self.input1), rn50_trt_mod(self.input1) + ) + self.assertTrue( + cos_sim > COSINE_THRESHOLD, + msg=f"Resnet50 TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/py/models/utils.py b/tests/py/models/utils.py new file mode 100644 index 0000000000..b1e6632ec3 --- /dev/null +++ b/tests/py/models/utils.py @@ -0,0 +1,15 @@ +import torch + +COSINE_THRESHOLD = 0.99 + + +def cosine_similarity(gt_tensor, pred_tensor): + gt_tensor = gt_tensor.flatten().to(torch.float32) + pred_tensor = pred_tensor.flatten().to(torch.float32) + if torch.sum(gt_tensor) == 0.0 or torch.sum(pred_tensor) == 0.0: + if torch.allclose(gt_tensor, pred_tensor, atol=1e-4, rtol=1e-4, equal_nan=True): + return 1.0 + res = torch.nn.functional.cosine_similarity(gt_tensor, pred_tensor, dim=0, eps=1e-6) + res = res.cpu().detach().item() + + return res diff --git a/tests/py/ptq/test_ptq_dataloader_calibrator.py b/tests/py/ptq/test_ptq_dataloader_calibrator.py index 2ee1fa5b08..79c19dadbf 100644 --- a/tests/py/ptq/test_ptq_dataloader_calibrator.py +++ b/tests/py/ptq/test_ptq_dataloader_calibrator.py @@ -81,9 +81,6 @@ def test_compile_script(self): device=torch.device("cuda:0"), ) - fp32_test_acc = compute_accuracy(self.testing_dataloader, self.model) - log(Level.Info, "[Pyt FP32] Test Acc: {:.2f}%".format(100 * fp32_test_acc)) - compile_spec = { "inputs": [torchtrt.Input([1, 3, 32, 32])], "enabled_precisions": {torch.float, torch.int8}, @@ -96,8 +93,11 @@ def test_compile_script(self): "allow_gpu_fallback": False, }, } - trt_mod = torchtrt.ts.compile(self.model, **compile_spec) + + fp32_test_acc = compute_accuracy(self.testing_dataloader, self.model) + log(Level.Info, "[Pyt FP32] Test Acc: {:.2f}%".format(100 * fp32_test_acc)) + int8_test_acc = compute_accuracy(self.testing_dataloader, trt_mod) log(Level.Info, "[TRT INT8] Test Acc: {:.2f}%".format(100 * int8_test_acc)) acc_diff = fp32_test_acc - int8_test_acc diff --git a/tests/py/ptq/test_ptq_trt_calibrator.py b/tests/py/ptq/test_ptq_trt_calibrator.py index bda117d3a5..93596c895d 100644 --- a/tests/py/ptq/test_ptq_trt_calibrator.py +++ b/tests/py/ptq/test_ptq_trt_calibrator.py @@ -75,7 +75,7 @@ def get_batch(self, names): ): return None - batch = self.dataset_iterator.next() + batch = next(self.dataset_iterator) self.current_batch_idx += self.batch_size # Treat the first element as input and others as targets. if isinstance(batch, list): diff --git a/tests/py/utils.py b/tests/py/utils.py new file mode 100644 index 0000000000..b1e6632ec3 --- /dev/null +++ b/tests/py/utils.py @@ -0,0 +1,15 @@ +import torch + +COSINE_THRESHOLD = 0.99 + + +def cosine_similarity(gt_tensor, pred_tensor): + gt_tensor = gt_tensor.flatten().to(torch.float32) + pred_tensor = pred_tensor.flatten().to(torch.float32) + if torch.sum(gt_tensor) == 0.0 or torch.sum(pred_tensor) == 0.0: + if torch.allclose(gt_tensor, pred_tensor, atol=1e-4, rtol=1e-4, equal_nan=True): + return 1.0 + res = torch.nn.functional.cosine_similarity(gt_tensor, pred_tensor, dim=0, eps=1e-6) + res = res.cpu().detach().item() + + return res diff --git a/tests/util/util.cpp b/tests/util/util.cpp index 13d0d18566..8359d31576 100644 --- a/tests/util/util.cpp +++ b/tests/util/util.cpp @@ -1,10 +1,23 @@ #include "core/util/prelude.h" #include "torch/script.h" +#include "torch/torch.h" namespace torch_tensorrt { namespace tests { namespace util { +bool cosineSimEqual(const at::Tensor& computed_tensor, const at::Tensor& gt_tensor, float threshold = 0.99f) { + torch::Tensor cosine_sim = torch::nn::functional::cosine_similarity( + computed_tensor.flatten(), gt_tensor.flatten(), torch::nn::functional::CosineSimilarityFuncOptions().dim(0)); + std::ostringstream ss; + ss << computed_tensor << std::endl << gt_tensor << std::endl; + LOG_GRAPH(ss.str()); + LOG_GRAPH(std::string("Cosine Similarity score: ") + std::to_string(cosine_sim.item())); + LOG_GRAPH(std::string("Acceptable Threshold: ") + std::to_string(threshold)); + + return cosine_sim.item() >= threshold; +} + bool almostEqual(const at::Tensor& computed_tensor, const at::Tensor& gt_tensor, float atol = 1e-8, float rtol = 1e-5) { std::ostringstream ss; ss << computed_tensor << std::endl << gt_tensor << std::endl; diff --git a/tests/util/util.h b/tests/util/util.h index f39e2a5766..1ea62a16e0 100644 --- a/tests/util/util.h +++ b/tests/util/util.h @@ -11,6 +11,8 @@ namespace torch_tensorrt { namespace tests { namespace util { +bool cosineSimEqual(const at::Tensor& computed_tensor, const at::Tensor& gt_tensor, float threshold); + bool almostEqual(const at::Tensor& computed_tensor, const at::Tensor& gt_tensor, float atol = 1e-8, float rtol = 1e-5); bool exactlyEqual(const at::Tensor& a, const at::Tensor& b); diff --git a/tools/perf/README.md b/tools/perf/README.md index 4c4a58bfd0..45630b4f29 100644 --- a/tools/perf/README.md +++ b/tools/perf/README.md @@ -4,7 +4,9 @@ This is a comprehensive Python benchmark suite to run perf runs using different 1. Torch 2. Torch-TensorRT -3. TensorRT +3. FX-TRT +4. TensorRT + Note: Please note that for ONNX models, user can convert the ONNX model to TensorRT serialized engine and then use this package. @@ -25,21 +27,35 @@ Benchmark scripts depends on following Python packages in addition to requiremen │ └── vgg16.yml ├── models ├── perf_run.py +├── hub.py +├── custom_models.py +├── requirements.txt +├── benchmark.sh └── README.md ``` -Please save your configuration files at config directory. Similarly, place your model files at models path. + + +* `config` - Directory which contains sample yaml configuration files for VGG network. +* `models` - Model directory +* `perf_run.py` - Performance benchmarking script which supports torch, torch_tensorrt, fx2trt, tensorrt backends +* `hub.py` - Script to download torchscript models for VGG16, Resnet50, EfficientNet-B0, VIT, HF-BERT +* `custom_models.py` - Script which includes custom models other than torchvision and timm (eg: HF BERT) +* `utils.py` - utility functions script +* `benchmark.sh` - This is used for internal performance testing of VGG16, Resnet50, EfficientNet-B0, VIT, HF-BERT. ## Usage +There are two ways you can run a performance benchmark. + +### Using YAML config files + To run the benchmark for a given configuration file: -``` +```python python perf_run.py --config=config/vgg16.yml ``` -## Configuration - There are two sample configuration files added. * vgg16.yml demonstrates a configuration with all the supported backends (Torch, Torch-TensorRT, TensorRT) @@ -48,23 +64,17 @@ There are two sample configuration files added. ### Supported fields -| Name | Supported Values | Description | -| --- | --- | --- | -| backend | all, torch, torch_tensorrt, tensorrt | Supported backends for inference. | -| input | - | Input binding names. Expected to list shapes of each input bindings | -| model | - | Configure the model filename and name | -| filename | - | Model file name to load from disk. | -| name | - | Model name | -| runtime | - | Runtime configurations | -| device | 0 | Target device ID to run inference. Range depends on available GPUs | -| precision | fp32, fp16 or half, int8 | Target precision to run inference. int8 cannot be used with 'all' backend | -| calibration_cache | - | Calibration cache file expected for torch_tensorrt runtime in int8 precision | - -Note: -1. Please note that torch runtime perf is not supported for int8 yet. -2. Torchscript module filename should end with .jit.pt otherwise it will be treated as a TensorRT engine. - - +| Name | Supported Values | Description | +| ----------------- | ------------------------------------ | ------------------------------------------------------------ | +| backend | all, torch, torch_tensorrt, tensorrt | Supported backends for inference. | +| input | - | Input binding names. Expected to list shapes of each input bindings | +| model | - | Configure the model filename and name | +| filename | - | Model file name to load from disk. | +| name | - | Model name | +| runtime | - | Runtime configurations | +| device | 0 | Target device ID to run inference. Range depends on available GPUs | +| precision | fp32, fp16 or half, int8 | Target precision to run inference. int8 cannot be used with 'all' backend | +| calibration_cache | - | Calibration cache file expected for torch_tensorrt runtime in int8 precision | Additional sample use case: @@ -88,3 +98,41 @@ runtime: - fp32 - fp16 ``` + +Note: + +1. Please note that measuring INT8 performance is only supported via a `calibration cache` file or QAT mode for `torch_tensorrt` backend. +2. TensorRT engine filename should end with `.plan` otherwise it will be treated as Torchscript module. + +### Using CompileSpec options via CLI + +Here are the list of `CompileSpec` options that can be provided directly to compile the pytorch module + +* `--backends` : Comma separated string of backends. Eg: torch,torch_tensorrt, tensorrt or fx2trt +* `--model` : Name of the model file (Can be a torchscript module or a tensorrt engine (ending in `.plan` extension)). If the backend is `fx2trt`, the input should be a Pytorch module (instead of a torchscript module) and the options for model are (`vgg16` | `resnet50` | `efficientnet_b0`) +* `--inputs` : List of input shapes & dtypes. Eg: (1, 3, 224, 224)@fp32 for Resnet or (1, 128)@int32;(1, 128)@int32 for BERT +* `--batch_size` : Batch size +* `--precision` : Comma separated list of precisions to build TensorRT engine Eg: fp32,fp16 +* `--device` : Device ID +* `--truncate` : Truncate long and double weights in the network in Torch-TensorRT +* `--is_trt_engine` : Boolean flag to be enabled if the model file provided is a TensorRT engine. +* `--report` : Path of the output file where performance summary is written. + +Eg: + +``` + python perf_run.py --model ${MODELS_DIR}/vgg16_scripted.jit.pt \ + --precision fp32,fp16 --inputs="(1, 3, 224, 224)@fp32" \ + --batch_size 1 \ + --backends torch,torch_tensorrt,tensorrt \ + --report "vgg_perf_bs1.txt" +``` + +### Example models + +This tool benchmarks any pytorch model or torchscript module. As an example, we provide VGG16, Resnet50, EfficientNet-B0, VIT, HF-BERT models in `hub.py` that we internally test for performance. +The torchscript modules for these models can be generated by running +``` +python hub.py +``` +You can refer to `benchmark.sh` on how we run/benchmark these models. diff --git a/tools/perf/benchmark.sh b/tools/perf/benchmark.sh new file mode 100644 index 0000000000..b84061025d --- /dev/null +++ b/tools/perf/benchmark.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +MODELS_DIR="models" + +# Download the Torchscript models +python hub.py + +batch_sizes=(1 2 4 8 16 32 64 128 256) + +#Benchmark VGG16 model +echo "Benchmarking VGG16 model" +for bs in ${batch_sizes[@]} +do + python perf_run.py --model ${MODELS_DIR}/vgg16_scripted.jit.pt \ + --precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \ + --batch_size ${bs} \ + --backends torch,torch_tensorrt,tensorrt \ + --report "vgg_perf_bs${bs}.txt" +done + +# Benchmark Resnet50 model +echo "Benchmarking Resnet50 model" +for bs in ${batch_sizes[@]} +do + python perf_run.py --model ${MODELS_DIR}/resnet50_scripted.jit.pt \ + --precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \ + --batch_size ${bs} \ + --backends torch,torch_tensorrt,tensorrt \ + --report "rn50_perf_bs${bs}.txt" +done + +# Benchmark VIT model +echo "Benchmarking VIT model" +for bs in ${batch_sizes[@]} +do + python perf_run.py --model ${MODELS_DIR}/vit_scripted.jit.pt \ + --precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \ + --batch_size ${bs} \ + --backends torch,torch_tensorrt,tensorrt \ + --report "vit_perf_bs${bs}.txt" +done + +# Benchmark EfficientNet-B0 model +echo "Benchmarking EfficientNet-B0 model" +for bs in ${batch_sizes[@]} +do + python perf_run.py --model ${MODELS_DIR}/efficientnet_b0_scripted.jit.pt \ + --precision fp32,fp16 --inputs="(${bs}, 3, 224, 224)" \ + --batch_size ${bs} \ + --backends torch,torch_tensorrt,tensorrt \ + --report "eff_b0_perf_bs${bs}.txt" +done + +# Benchmark BERT model +echo "Benchmarking Huggingface BERT base model" +for bs in ${batch_sizes[@]} +do + python perf_run.py --model ${MODELS_DIR}/bert_base_uncased_traced.jit.pt \ + --precision fp32 --inputs="(${bs}, 128)@int32;(${bs}, 128)@int32" \ + --batch_size ${bs} \ + --backends torch,torch_tensorrt \ + --truncate \ + --report "bert_base_perf_bs${bs}.txt" +done diff --git a/tools/perf/config/vgg16.yml b/tools/perf/config/vgg16.yml index 458dc1b1f6..d88d489458 100755 --- a/tools/perf/config/vgg16.yml +++ b/tools/perf/config/vgg16.yml @@ -8,8 +8,9 @@ input: - 224 - 224 num_inputs: 1 + batch_size: 1 model: - filename: models/vgg16_traced.jit.pt + filename: models/vgg16_scripted.jit.pt name: vgg16 runtime: device: 0 diff --git a/tools/perf/custom_models.py b/tools/perf/custom_models.py new file mode 100644 index 0000000000..a8b8a5dae0 --- /dev/null +++ b/tools/perf/custom_models.py @@ -0,0 +1,30 @@ +import torch +import torch.nn as nn +from transformers import BertModel, BertTokenizer, BertConfig +import torch.nn.functional as F + + +def BertModule(): + model_name = "bert-base-uncased" + enc = BertTokenizer.from_pretrained(model_name) + text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" + tokenized_text = enc.tokenize(text) + masked_index = 8 + tokenized_text[masked_index] = "[MASK]" + indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) + segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] + tokens_tensor = torch.tensor([indexed_tokens]) + segments_tensors = torch.tensor([segments_ids]) + config = BertConfig( + vocab_size_or_config_json_file=32000, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + torchscript=True, + ) + model = BertModel(config) + model.eval() + model = BertModel.from_pretrained(model_name, torchscript=True) + traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) + return traced_model diff --git a/tools/perf/hub.py b/tools/perf/hub.py new file mode 100644 index 0000000000..e54734f8a1 --- /dev/null +++ b/tools/perf/hub.py @@ -0,0 +1,132 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision.models as models +import timm +from transformers import BertModel, BertTokenizer, BertConfig +import os +import json +import custom_models as cm + +torch.hub._validate_not_a_forked_repo = lambda a, b, c: True + +torch_version = torch.__version__ + +# Detect case of no GPU before deserialization of models on GPU +if not torch.cuda.is_available(): + raise Exception( + "No GPU found. Please check if installed torch version is compatible with CUDA version" + ) + +# Downloads all model files again if manifest file is not present +MANIFEST_FILE = "model_manifest.json" + +BENCHMARK_MODELS = { + "vgg16": {"model": models.vgg16(weights=None), "path": "script"}, + "resnet50": {"model": models.resnet50(weights=None), "path": "script"}, + "efficientnet_b0": { + "model": timm.create_model("efficientnet_b0", pretrained=True), + "path": "script", + }, + "vit": { + "model": timm.create_model("vit_base_patch16_224", pretrained=True), + "path": "script", + }, + "bert_base_uncased": {"model": cm.BertModule(), "path": "trace"}, +} + + +def get(n, m, manifest): + print("Downloading {}".format(n)) + traced_filename = "models/" + n + "_traced.jit.pt" + script_filename = "models/" + n + "_scripted.jit.pt" + x = torch.ones((1, 3, 300, 300)).cuda() + if n == "bert-base-uncased": + traced_model = m["model"] + torch.jit.save(traced_model, traced_filename) + manifest.update({n: [traced_filename]}) + else: + m["model"] = m["model"].eval().cuda() + if m["path"] == "both" or m["path"] == "trace": + trace_model = torch.jit.trace(m["model"], [x]) + torch.jit.save(trace_model, traced_filename) + manifest.update({n: [traced_filename]}) + if m["path"] == "both" or m["path"] == "script": + script_model = torch.jit.script(m["model"]) + torch.jit.save(script_model, script_filename) + if n in manifest.keys(): + files = list(manifest[n]) if type(manifest[n]) != list else manifest[n] + files.append(script_filename) + manifest.update({n: files}) + else: + manifest.update({n: [script_filename]}) + return manifest + + +def download_models(version_matches, manifest): + # Download all models if torch version is different than model version + if not version_matches: + for n, m in BENCHMARK_MODELS.items(): + manifest = get(n, m, manifest) + else: + for n, m in BENCHMARK_MODELS.items(): + scripted_filename = "models/" + n + "_scripted.jit.pt" + traced_filename = "models/" + n + "_traced.jit.pt" + # Check if model file exists on disk + if ( + ( + m["path"] == "both" + and os.path.exists(scripted_filename) + and os.path.exists(traced_filename) + ) + or (m["path"] == "script" and os.path.exists(scripted_filename)) + or (m["path"] == "trace" and os.path.exists(traced_filename)) + ): + print("Skipping {} ".format(n)) + continue + manifest = get(n, m, manifest) + + +def main(): + manifest = None + version_matches = False + manifest_exists = False + + # Check if Manifest file exists or is empty + if not os.path.exists(MANIFEST_FILE) or os.stat(MANIFEST_FILE).st_size == 0: + manifest = {"version": torch_version} + + # Creating an empty manifest file for overwriting post setup + os.system("touch {}".format(MANIFEST_FILE)) + else: + manifest_exists = True + + # Load manifest if already exists + with open(MANIFEST_FILE, "r") as f: + manifest = json.load(f) + if manifest["version"] == torch_version: + version_matches = True + else: + print( + "Torch version: {} mismatches \ + with manifest's version: {}. Re-downloading \ + all models".format( + torch_version, manifest["version"] + ) + ) + + # Overwrite the manifest version as current torch version + manifest["version"] = torch_version + + download_models(version_matches, manifest) + + # Write updated manifest file to disk + with open(MANIFEST_FILE, "r+") as f: + data = f.read() + f.seek(0) + record = json.dumps(manifest) + f.write(record) + f.truncate() + + +main() diff --git a/tools/perf/perf_run.py b/tools/perf/perf_run.py index f0386f4e5a..fbdf3b6c40 100644 --- a/tools/perf/perf_run.py +++ b/tools/perf/perf_run.py @@ -15,7 +15,17 @@ # Importing supported Backends import torch import torch_tensorrt as torchtrt +from torch_tensorrt.fx.lower import compile +from torch_tensorrt.fx.utils import LowerPrecision + import tensorrt as trt +from utils import ( + parse_inputs, + parse_backends, + precision_to_dtype, + parse_precisions, + BENCHMARK_MODELS, +) WARMUP_ITER = 10 results = [] @@ -49,8 +59,8 @@ def get(self, key, default_value=None): # Runs inference using Torch backend -def run_torch(model, input_tensors, params, precision): - print("Running Torch for precision: ", precision) +def run_torch(model, input_tensors, params, precision, batch_size): + print("Running Torch for precision: ", precision, " batch_size : ", batch_size) iters = params.get("iterations", 20) # Warm up @@ -69,19 +79,25 @@ def run_torch(model, input_tensors, params, precision): end_time = timeit.default_timer() meas_time = end_time - start_time timings.append(meas_time) - print("Iteration {}: {:.6f} s".format(i, end_time - start_time)) - printStats("Torch", timings, precision) + recordStats("Torch", timings, precision, batch_size) # Runs inference using Torch-TensorRT backend -def run_torch_tensorrt(model, input_tensors, params, precision): - print("Running Torch-TensorRT") - +def run_torch_tensorrt( + model, input_tensors, params, precision, truncate_long_and_double, batch_size +): + print( + "Running Torch-TensorRT for precision: ", + precision, + " batch_size : ", + batch_size, + ) # Compiling Torch-TensorRT model compile_settings = { "inputs": input_tensors, "enabled_precisions": {precision_to_dtype(precision)}, + "truncate_long_and_double": truncate_long_and_double, } if precision == "int8": @@ -106,9 +122,47 @@ def run_torch_tensorrt(model, input_tensors, params, precision): end_time = timeit.default_timer() meas_time = end_time - start_time timings.append(meas_time) - print("Iteration {}: {:.6f} s".format(i, end_time - start_time)) - printStats("Torch-TensorRT", timings, precision) + recordStats("Torch-TensorRT", timings, precision, batch_size) + + +# Runs inference using FX2TRT backend +def run_fx2trt(model, input_tensors, params, precision, batch_size): + print("Running FX2TRT for precision: ", precision, " batch_size : ", batch_size) + if precision == "fp32": + precision = LowerPrecision.FP32 + elif precision == "fp16": + precision = LowerPrecision.FP16 + model.half() + input_tensors = [tensor.half() for tensor in input_tensors] + # Run lowering eager mode benchmark + model = compile( + model, + input_tensors, + max_batch_size=batch_size, + lower_precision=precision, + verbose_log=False, + ) + + iters = params.get("iterations", 20) + # Warm up + with torch.no_grad(): + for _ in range(WARMUP_ITER): + features = model(*input_tensors) + + torch.cuda.synchronize() + + timings = [] + with torch.no_grad(): + for i in range(iters): + start_time = timeit.default_timer() + features = model(*input_tensors) + torch.cuda.synchronize() + end_time = timeit.default_timer() + meas_time = end_time - start_time + timings.append(meas_time) + + recordStats("FX-TensorRT", timings, precision, batch_size) def torch_dtype_from_trt(dtype): @@ -135,7 +189,15 @@ def torch_device_from_trt(device): return TypeError("%s is not supported by torch" % device) -def run_tensorrt(model, input_tensors, params, precision, is_trt_engine=False): +def run_tensorrt( + model, + input_tensors, + params, + precision, + truncate_long_and_double=False, + is_trt_engine=False, + batch_size=1, +): engine = None # If the model file is a TensorRT engine then directly deserialize and run inference @@ -144,10 +206,11 @@ def run_tensorrt(model, input_tensors, params, precision, is_trt_engine=False): compile_settings = { "inputs": input_tensors, "enabled_precisions": {precision_to_dtype(precision)}, + "truncate_long_and_double": truncate_long_and_double, } print("Converting method to TensorRT engine...") - with torch.no_grad(): + with torch.no_grad(), torchtrt.logging.errors(): model = torchtrt.ts.convert_method_to_trt_engine( model, "forward", **compile_settings ) @@ -156,17 +219,15 @@ def run_tensorrt(model, input_tensors, params, precision, is_trt_engine=False): with trt.Logger() as logger, trt.Runtime(logger) as runtime: engine = runtime.deserialize_cuda_engine(model) - print("Running TensorRT") + print("Running TensorRT for precision: ", precision, " batch_size : ", batch_size) iters = params.get("iterations", 20) - batch_size = params.get("batch", 1) # Compiling the bindings bindings = engine.num_bindings * [None] - k = 0 for idx, _ in enumerate(bindings): dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx)) - shape = (batch_size,) + tuple(engine.get_binding_shape(idx)) + shape = tuple(engine.get_binding_shape(idx)) device = torch_device_from_trt(engine.get_location(idx)) if not engine.binding_is_input(idx): # Output bindings @@ -180,29 +241,32 @@ def run_tensorrt(model, input_tensors, params, precision, is_trt_engine=False): timings = [] with engine.create_execution_context() as context: for i in range(WARMUP_ITER): - context.execute_async( - batch_size, bindings, torch.cuda.current_stream().cuda_stream - ) + context.execute_async_v2(bindings, torch.cuda.current_stream().cuda_stream) torch.cuda.synchronize() for i in range(iters): start_time = timeit.default_timer() - context.execute_async( - batch_size, bindings, torch.cuda.current_stream().cuda_stream - ) + context.execute_async_v2(bindings, torch.cuda.current_stream().cuda_stream) torch.cuda.synchronize() end_time = timeit.default_timer() meas_time = end_time - start_time timings.append(meas_time) - print("Iterations {}: {:.6f} s".format(i, end_time - start_time)) - printStats("TensorRT", timings, precision) + recordStats("TensorRT", timings, precision, batch_size) # Deploys inference run for different backend configurations -def run(model, input_tensors, params, precision, is_trt_engine=False): - for backend in params.get("backend"): - +def run( + model, + backends, + input_tensors, + params, + precision, + truncate_long_and_double=False, + batch_size=1, + is_trt_engine=False, +): + for backend in backends: if precision == "int8": if backend == "all" or backend == "torch": print( @@ -219,22 +283,55 @@ def run(model, input_tensors, params, precision, is_trt_engine=False): return False if backend == "all": - run_torch(model, input_tensors, params, precision) - run_torch_tensorrt(model, input_tensors, params, precision) - run_tensorrt(model, input_tensors, params, precision, is_trt_engine) + run_torch(model, input_tensors, params, precision, batch_size) + run_torch_tensorrt( + model, + input_tensors, + params, + precision, + truncate_long_and_double, + batch_size, + ) + run_tensorrt( + model, + input_tensors, + params, + precision, + truncate_long_and_double, + is_trt_engine, + batch_size, + ) elif backend == "torch": - run_torch(model, input_tensors, params, precision) + run_torch(model, input_tensors, params, precision, batch_size) elif backend == "torch_tensorrt": - run_torch_tensorrt(model, input_tensors, params, precision) + run_torch_tensorrt( + model, + input_tensors, + params, + precision, + truncate_long_and_double, + batch_size, + ) + + elif backend == "fx2trt": + run_fx2trt(model, input_tensors, params, precision, batch_size) elif backend == "tensorrt": - run_tensorrt(model, input_tensors, params, precision, is_trt_engine) + run_tensorrt( + model, + input_tensors, + params, + precision, + truncate_long_and_double, + is_trt_engine, + batch_size, + ) # Generate report -def printStats(backend, timings, precision, batch_size=1): +def recordStats(backend, timings, precision, batch_size=1): times = np.array(timings) steps = len(times) speeds = batch_size / times @@ -245,43 +342,16 @@ def printStats(backend, timings, precision, batch_size=1): speed_mean = np.mean(speeds) speed_med = np.median(speeds) - msg = ( - "\n%s =================================\n" - "batch size=%d, num iterations=%d\n" - " Median FPS: %.1f, mean: %.1f\n" - " Median latency: %.6f, mean: %.6f, 99th_p: %.6f, std_dev: %.6f\n" - ) % ( - backend, - batch_size, - steps, - speed_med, - speed_mean, - time_med, - time_mean, - time_99th, - time_std, - ) - print(msg) - meas = { + stats = { "Backend": backend, - "precision": precision, + "Precision": precision, + "Batch size": batch_size, "Median(FPS)": speed_med, "Mean(FPS)": speed_mean, - "Median-Latency(ms)": time_med, - "Mean-Latency(ms)": time_mean, - "99th_p": time_99th, - "std_dev": time_std, + "Median-Latency(ms)": time_med * 1000, + "Mean-Latency(ms)": time_mean * 1000, } - results.append(meas) - - -def precision_to_dtype(pr): - if pr == "fp32": - return torch.float - elif pr == "fp16" or pr == "half": - return torch.half - else: - return torch.int8 + results.append(stats) def load_model(params): @@ -289,15 +359,21 @@ def load_model(params): is_trt_engine = False # Load torch model traced/scripted model_file = params.get("model").get("filename") + try: + model_name = params.get("model").get("name") + except: + model_name = model_file - if model_file.endswith(".jit.pt"): - model = torch.jit.load(model_file).cuda() - else: + print("Loading model: ", model_file) + if model_file.endswith(".plan"): is_trt_engine = True # Read the TensorRT engine file with open(model_file, "rb") as fin: model = fin.read() - return model, is_trt_engine + else: + model = torch.jit.load(model_file).cuda() + + return model, model_name, is_trt_engine if __name__ == "__main__": @@ -306,57 +382,147 @@ def load_model(params): ) arg_parser.add_argument( "--config", + type=str, help="Load YAML based configuration file to run the inference. If this is used other params will be ignored", ) + # The following options are manual user provided settings + arg_parser.add_argument( + "--backends", + type=str, + help="Comma separated string of backends. Eg: torch,torch_tensorrt,fx2trt,tensorrt", + ) + arg_parser.add_argument("--model", type=str, help="Name of the model file") + arg_parser.add_argument( + "--inputs", + type=str, + help="List of input shapes. Eg: (1, 3, 224, 224)@fp32 for Resnet or (1, 128)@int32;(1, 128)@int32 for BERT", + ) + arg_parser.add_argument( + "--batch_size", type=int, default=1, help="Batch size to build and run" + ) + arg_parser.add_argument( + "--precision", + default="fp32", + type=str, + help="Comma separated list of precisions to build TensorRT engine Eg: fp32,fp16", + ) + arg_parser.add_argument( + "--calibration_cache", type=str, help="Name of the calibration cache file" + ) + arg_parser.add_argument("--device", type=int, help="device id") + arg_parser.add_argument( + "--truncate", + action="store_true", + help="Truncate long and double weights in the network in Torch-TensorRT", + ) + arg_parser.add_argument( + "--is_trt_engine", + action="store_true", + help="Boolean flag to determine if the user provided model is a TRT engine or not", + ) + arg_parser.add_argument( + "--report", + type=str, + help="Path of the output file where performance summary is written.", + ) args = arg_parser.parse_args() - parser = ConfigParser(args.config) - # Load YAML params - params = parser.read_config() - print("Loading model: ", params.get("model").get("filename")) - - model = None - - # Default device is set to 0. Configurable using yaml config file. - torch.cuda.set_device(params.get("runtime").get("device", 0)) - - # Load the model file from disk. If the loaded file is TensorRT engine then is_trt_engine is returned as True - model, is_trt_engine = load_model(params) cudnn.benchmark = True - # Create random input tensor of certain size torch.manual_seed(12345) + model_name = "Model" + if args.config: + parser = ConfigParser(args.config) + # Load YAML params + params = parser.read_config() + model, model_name, is_trt_engine = load_model(params) + + # Default device is set to 0. Configurable using yaml config file. + torch.cuda.set_device(params.get("runtime").get("device", 0)) + + num_input = params.get("input").get("num_inputs") + truncate_long_and_double = params.get("runtime").get( + "truncate_long_and_double", False + ) + batch_size = params.get("input").get("batch_size", 1) + for precision in params.get("runtime").get("precision", "fp32"): + input_tensors = [] + num_input = params.get("input").get("num_inputs", 1) + for i in range(num_input): + inp_tensor = params.get("input").get("input" + str(i)) + input_tensors.append( + torch.randint( + 0, + 2, + tuple(d for d in inp_tensor), + dtype=precision_to_dtype(precision), + ).cuda() + ) - num_input = params.get("input").get("num_inputs") - for precision in params.get("runtime").get("precision", "fp32"): - input_tensors = [] - num_input = params.get("input").get("num_inputs", 1) - for i in range(num_input): - inp_tensor = params.get("input").get("input" + str(i)) - input_tensors.append( - torch.randint( - 0, - 2, - tuple(d for d in inp_tensor), - dtype=precision_to_dtype(precision), - ).cuda() - ) + if is_trt_engine: + print( + "Warning, TensorRT engine file is configured. Please make sure the precision matches with the TRT engine for reliable results" + ) - if is_trt_engine: - print( - "Warning, TensorRT engine file is configured. Please make sure the precision matches with the TRT engine for reliable results" + if not is_trt_engine and (precision == "fp16" or precision == "half"): + # If model is TensorRT serialized engine then model.half will report failure + model = model.half() + + backends = params.get("backend") + # Run inference + status = run( + model, + backends, + input_tensors, + params, + precision, + truncate_long_and_double, + batch_size, + is_trt_engine, + ) + else: + params = vars(args) + model_name = params["model"] + if os.path.exists(model_name): + print("Loading user provided model: ", model_name) + model = torch.jit.load(model_name).cuda().eval() + elif model_name in BENCHMARK_MODELS: + model = BENCHMARK_MODELS[model_name]["model"].eval().cuda() + else: + raise ValueError( + "Invalid model name. Please provide a torchscript model file or model name (among the following options vgg16|resnet50|efficientnet_b0|vit)" ) - if not is_trt_engine and precision == "fp16" or precision == "half": - # If model is TensorRT serialized engine then model.half will report failure - model = model.half() + backends = parse_backends(params["backends"]) + truncate_long_and_double = params["truncate"] + batch_size = params["batch_size"] + is_trt_engine = params["is_trt_engine"] + precisions = parse_precisions(params["precision"]) - # Run inference - status = run(model, input_tensors, params, precision, is_trt_engine) - if status == False: - continue + for precision in precisions: + input_tensors = parse_inputs( + params["inputs"], precision_to_dtype(precision) + ) + if not is_trt_engine and (precision == "fp16" or precision == "half"): + # If model is TensorRT serialized engine then model.half will report failure + model = model.half() + status = run( + model, + backends, + input_tensors, + params, + precision, + truncate_long_and_double, + batch_size, + is_trt_engine, + ) # Generate report - print("Model Summary:") + print("Model Summary: ", model_name) summary = pd.DataFrame(results) print(summary) + if args.report: + with open(args.report, "w") as file: + file.write("Model Summary: " + model_name + "\n") + file.write(summary.to_string()) + file.close() diff --git a/tools/perf/utils.py b/tools/perf/utils.py new file mode 100644 index 0000000000..3d63dcd4b7 --- /dev/null +++ b/tools/perf/utils.py @@ -0,0 +1,61 @@ +import torch +import torch_tensorrt +import custom_models as cm +import torchvision.models as models +import timm + +BENCHMARK_MODELS = { + "vgg16": {"model": models.vgg16(pretrained=True), "path": "script"}, + "resnet50": { + "model": torch.hub.load("pytorch/vision:v0.9.0", "resnet50", pretrained=True), + "path": "script", + }, + "efficientnet_b0": { + "model": timm.create_model("efficientnet_b0", pretrained=True), + "path": "script", + }, + "vit": { + "model": timm.create_model("vit_base_patch16_224", pretrained=True), + "path": "script", + }, + "bert_base_uncased": {"model": cm.BertModule(), "path": "trace"}, +} + + +def precision_to_dtype(pr): + if pr == "fp32": + return torch.float + elif pr == "fp16" or pr == "half": + return torch.half + elif pr == "int32": + return torch.int32 + elif pr == "bool": + return torch.bool + else: + return torch.float32 + + +def parse_inputs(user_inputs, dtype): + parsed_inputs = user_inputs.split(";") + torchtrt_inputs = [] + for input in parsed_inputs: + input_shape = [] + input_shape_and_dtype = input.split("@") + dtype = ( + precision_to_dtype(input_shape_and_dtype[1]) + if len(input_shape_and_dtype) == 2 + else dtype + ) + for input_dim in input_shape_and_dtype[0][1:-1].split(","): + input_shape.append(int(input_dim)) + torchtrt_inputs.append(torch.randint(0, 5, input_shape, dtype=dtype).cuda()) + + return torchtrt_inputs + + +def parse_backends(backends): + return backends.split(",") + + +def parse_precisions(precisions): + return precisions.split(",")