diff --git a/.github/workflows/docker_builder.yml b/.github/workflows/docker_builder.yml index 87148156a8..c20a3d37a6 100644 --- a/.github/workflows/docker_builder.yml +++ b/.github/workflows/docker_builder.yml @@ -44,7 +44,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - # Automatically detect TensorRT and cuDNN default versions for Torch-TRT build + # Automatically detect TensorRT default versions for Torch-TRT build - name: Build Docker image env: DOCKER_TAG: ${{ env.DOCKER_REGISTRY }}/${{ steps.fix_slashes.outputs.container_name }} @@ -52,10 +52,8 @@ jobs: python3 -m pip install pyyaml TRT_VERSION=$(python3 -c "import versions; versions.tensorrt_version()") echo "TRT VERSION = ${TRT_VERSION}" - CUDNN_VERSION=$(python3 -c "import versions; versions.cudnn_version()") - echo "CUDNN VERSION = ${CUDNN_VERSION}" - DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=$TRT_VERSION --build-arg CUDNN_VERSION=$CUDNN_VERSION -f docker/Dockerfile --tag $DOCKER_TAG . + DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=$TRT_VERSION -f docker/Dockerfile --tag $DOCKER_TAG . - name: Push Docker image env: diff --git a/README.md b/README.md index d2b7c69a6b..3e74a79688 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Torch-TensorRT is distributed in the ready-to-run NVIDIA [NGC PyTorch Container] ## Building a docker container for Torch-TensorRT -We provide a `Dockerfile` in `docker/` directory. It expects a PyTorch NGC container as a base but can easily be modified to build on top of any container that provides, PyTorch, CUDA, cuDNN and TensorRT. The dependency libraries in the container can be found in the release notes. +We provide a `Dockerfile` in `docker/` directory. It expects a PyTorch NGC container as a base but can easily be modified to build on top of any container that provides, PyTorch, CUDA, and TensorRT. The dependency libraries in the container can be found in the release notes. Please follow this instruction to build a Docker container. @@ -152,14 +152,13 @@ bash ./compile.sh You need to start by having CUDA installed on the system, LibTorch will automatically be pulled for you by bazel, then you have two options. -#### 1. Building using cuDNN & TensorRT tarball distributions +#### 1. Building using TensorRT tarball distributions > This is recommended so as to build Torch-TensorRT hermetically and insures any bugs are not caused by version issues > Make sure when running Torch-TensorRT that these versions of the libraries are prioritized in your `$LD_LIBRARY_PATH` -1. You need to download the tarball distributions of TensorRT and cuDNN from the NVIDIA website. - - https://developer.nvidia.com/cudnn +1. You need to download the tarball distributions of TensorRT from the NVIDIA website. - https://developer.nvidia.com/tensorrt 2. Place these files in a directory (the directories `third_party/dist_dir/[x86_64-linux-gnu | aarch64-linux-gnu]` exist for this purpose) 3. Compile using: @@ -168,25 +167,16 @@ then you have two options. bazel build //:libtorchtrt --compilation_mode opt --distdir third_party/dist_dir/[x86_64-linux-gnu | aarch64-linux-gnu] ``` -#### 2. Building using locally installed cuDNN & TensorRT +#### 2. Building using locally installed TensorRT > If you find bugs and you compiled using this method please disclose you used this method in the issue > (an `ldd` dump would be nice too) -1. Install TensorRT, CUDA and cuDNN on the system before starting to compile. +1. Install TensorRT and CUDA on the system before starting to compile. 2. In `WORKSPACE` comment out ```py # Downloaded distributions to use with --distdir -http_archive( - name = "cudnn", - urls = ["",], - - build_file = "@//third_party/cudnn/archive:BUILD", - sha256 = "", - strip_prefix = "cuda" -) - http_archive( name = "tensorrt", urls = ["",], @@ -201,12 +191,6 @@ and uncomment ```py # Locally installed dependencies -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/cmake/Modules/FindcuDNN.cmake b/cmake/Modules/FindcuDNN.cmake deleted file mode 100644 index 593a9fcacf..0000000000 --- a/cmake/Modules/FindcuDNN.cmake +++ /dev/null @@ -1,243 +0,0 @@ -# Source: -# https://github.com/arrayfire/arrayfire/blob/master/CMakeModules/FindcuDNN.cmake -# -# Fetched the original content of this file from -# https://github.com/soumith/cudnn.torch -# -# Original Copyright: -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing for details. -# -# Copyright (c) 2021, ArrayFire -# All rights reserved. -# -# This file is distributed under 3-clause BSD license. -# The complete license agreement can be obtained at: -# http://arrayfire.com/licenses/BSD-3-Clause -# -# FindcuDNN -# ------- -# -# Find cuDNN library -# -# This module creates imported target cuDNN::cuDNN upon successfull -# lookup of cuDNN headers and libraries. -# -# Valiables that affect result: -# , , : as usual -# -# Usage -# ----- -# add_exectuable(helloworld main.cpp) -# target_link_libraries(helloworld PRIVATE cuDNN::cuDNN) -# -# Note: It is recommended to avoid using variables set by the find module. -# -# Result variables -# ---------------- -# -# This module will set the following variables in your project: -# -# ``cuDNN_INCLUDE_DIRS`` -# where to find cudnn.h. -# -# ``cuDNN_LINK_LIBRARY`` -# the libraries to link against to use cuDNN. Priot to cuDNN 8, this is a huge monolithic -# library. However, since cuDNN 8 it has been split into multiple shared libraries. If -# cuDNN version 8 if found, this variable contains the shared library that dlopens the -# other libraries: cuDNN_*_INFER_LINK_LIBRARY and cuDNN_*_TRAIN_LINK_LIBRARY as needed. -# For versions of cuDNN 7 or lower, cuDNN_*_INFER_LINK_LIBRARY and cuDNN_*_TRAIN_LINK_LIBRARY -# are not defined. -# -# ``cuDNN_ADV_INFER_LINK_LIBRARY`` -# the libraries to link directly to use advanced inference API from cuDNN. -# ``cuDNN_ADV_INFER_DLL_LIBRARY`` -# Corresponding advanced inference API Windows DLL. This is not set on non-Windows platforms. -# ``cuDNN_ADV_TRAIN_LINK_LIBRARY`` -# the libraries to link directly to use advanced training API from cuDNN. -# ``cuDNN_ADV_TRAIN_DLL_LIBRARY`` -# Corresponding advanced training API Windows DLL. This is not set on non-Windows platforms. -# -# ``cuDNN_CNN_INFER_LINK_LIBRARY`` -# the libraries to link directly to use convolutional nueral networks inference API from cuDNN. -# ``cuDNN_CNN_INFER_DLL_LIBRARY`` -# Corresponding CNN inference API Windows DLL. This is not set on non-Windows platforms. -# ``cuDNN_CNN_TRAIN_LINK_LIBRARY`` -# the libraries to link directly to use convolutional nueral networks training API from cuDNN. -# ``cuDNN_CNN_TRAIN_DLL_LIBRARY`` -# Corresponding CNN training API Windows DLL. This is not set on non-Windows platforms. -# -# ``cuDNN_OPS_INFER_LINK_LIBRARY`` -# the libraries to link directly to use starndard ML operations API from cuDNN. -# ``cuDNN_OPS_INFER_DLL_LIBRARY`` -# Corresponding OPS inference API Windows DLL. This is not set on non-Windows platforms. -# ``cuDNN_OPS_TRAIN_LINK_LIBRARY`` -# the libraries to link directly to use starndard ML operations API from cuDNN. -# ``cuDNN_OPS_TRAIN_DLL_LIBRARY`` -# Corresponding OPS inference API Windows DLL. This is not set on non-Windows platforms. -# -# ``cuDNN_FOUND`` -# If false, do not try to use cuDNN. -# ``cuDNN_VERSION`` -# Version of the cuDNN library found -# ``cuDNN_VERSION_MAJOR`` -# Major Version of the cuDNN library found -# ``cuDNN_VERSION_MINOR`` -# Minor Version of the cuDNN library found - -find_package(PkgConfig) -pkg_check_modules(PC_CUDNN QUIET cuDNN) - -find_package(CUDAToolkit QUIET) - -find_path(cuDNN_INCLUDE_DIRS - NAMES cudnn.h - HINTS - ${cuDNN_ROOT_DIR} - ${PC_CUDNN_INCLUDE_DIRS} - ${CUDA_TOOLKIT_INCLUDE} - PATH_SUFFIXES include - DOC "cuDNN include directory path." ) - -if(cuDNN_INCLUDE_DIRS) - file(READ ${cuDNN_INCLUDE_DIRS}/cudnn.h CUDNN_VERSION_FILE_CONTENTS) - string(REGEX MATCH "define CUDNN_MAJOR * +([0-9]+)" - CUDNN_MAJOR_VERSION "${CUDNN_VERSION_FILE_CONTENTS}") - list(LENGTH CUDNN_MAJOR_VERSION cudnn_ver_matches) - if(${cudnn_ver_matches} EQUAL 0) - file(READ ${cuDNN_INCLUDE_DIRS}/cudnn_version.h CUDNN_VERSION_FILE_CONTENTS) - string(REGEX MATCH "define CUDNN_MAJOR * +([0-9]+)" - CUDNN_MAJOR_VERSION "${CUDNN_VERSION_FILE_CONTENTS}") - endif() - string(REGEX REPLACE "define CUDNN_MAJOR * +([0-9]+)" "\\1" - CUDNN_MAJOR_VERSION "${CUDNN_MAJOR_VERSION}") - string(REGEX MATCH "define CUDNN_MINOR * +([0-9]+)" - CUDNN_MINOR_VERSION "${CUDNN_VERSION_FILE_CONTENTS}") - string(REGEX REPLACE "define CUDNN_MINOR * +([0-9]+)" "\\1" - CUDNN_MINOR_VERSION "${CUDNN_MINOR_VERSION}") - string(REGEX MATCH "define CUDNN_PATCHLEVEL * +([0-9]+)" - CUDNN_PATCH_VERSION "${CUDNN_VERSION_FILE_CONTENTS}") - string(REGEX REPLACE "define CUDNN_PATCHLEVEL * +([0-9]+)" "\\1" - CUDNN_PATCH_VERSION "${CUDNN_PATCH_VERSION}") - set(cuDNN_VERSION_MAJOR ${CUDNN_MAJOR_VERSION}) - set(cuDNN_VERSION_MINOR ${CUDNN_MINOR_VERSION}) - set(cuDNN_VERSION ${CUDNN_MAJOR_VERSION}.${CUDNN_MINOR_VERSION}) -endif() - -# Choose lib suffix to be exact major version if requested -# otherwise, just pick the one read from cudnn.h header -if(cuDNN_FIND_VERSION_EXACT) - set(cudnn_ver_suffix "${cuDNN_FIND_VERSION_MAJOR}") -else() - set(cudnn_ver_suffix "${CUDNN_MAJOR_VERSION}") -endif() - -if(cuDNN_INCLUDE_DIRS) - get_filename_component(libpath_cudart "${CUDA_CUDART_LIBRARY}" PATH) - - macro(af_find_cudnn_libs cudnn_lib_name_infix) - if("${cudnn_lib_name_infix}" STREQUAL "") - set(LIB_INFIX "") - else() - string(TOUPPER ${cudnn_lib_name_infix} LIB_INFIX) - endif() - find_library(cuDNN${LIB_INFIX}_LINK_LIBRARY - NAMES - libcudnn${cudnn_lib_name_infix}.so.${cudnn_ver_suffix} - libcudnn${cudnn_lib_name_infix}.${cudnn_ver_suffix}.dylib - cudnn${cudnn_lib_name_infix} - PATHS - ${cuDNN_ROOT_DIR} - ${PC_CUDNN_LIBRARY_DIRS} - $ENV{LD_LIBRARY_PATH} - ${libpath_cudart} - ${CMAKE_INSTALL_PREFIX} - PATH_SUFFIXES lib lib64 bin lib/x64 bin/x64 - DOC "cudnn${cudnn_lib_name_infix} link library." ) - mark_as_advanced(cuDNN${LIB_INFIX}_LINK_LIBRARY) - - if(WIN32 AND cuDNN_LINK_LIBRARY) - find_file(cuDNN${LIB_INFIX}_DLL_LIBRARY - NAMES cudnn${cudnn_lib_name_infix}64_${cudnn_ver_suffix}${CMAKE_SHARED_LIBRARY_SUFFIX} - PATHS - ${cuDNN_ROOT_DIR} - ${PC_CUDNN_LIBRARY_DIRS} - $ENV{PATH} - ${libpath_cudart} - ${CMAKE_INSTALL_PREFIX} - PATH_SUFFIXES lib lib64 bin lib/x64 bin/x64 - DOC "cudnn${cudnn_lib_name_infix} Windows DLL." ) - mark_as_advanced(cuDNN${LIB_INFIX}_DLL_LIBRARY) - endif() - endmacro() - - af_find_cudnn_libs("") # gets base cudnn shared library - if(cuDNN_VERSION_MAJOR VERSION_GREATER 8 OR cuDNN_VERSION_MAJOR VERSION_EQUAL 8) - af_find_cudnn_libs("_adv_infer") - af_find_cudnn_libs("_adv_train") - af_find_cudnn_libs("_cnn_infer") - af_find_cudnn_libs("_cnn_train") - af_find_cudnn_libs("_ops_infer") - af_find_cudnn_libs("_ops_train") - endif() -endif() - -# pytorch compatibility layer -set(CUDNN_LIBRARY_PATH ${cuDNN_LINK_LIBRARY}) -set(CUDNN_INCLUDE_PATH ${cuDNN_INCLUDE_DIRS}) - -find_package_handle_standard_args(cuDNN - REQUIRED_VARS cuDNN_LINK_LIBRARY cuDNN_INCLUDE_DIRS - VERSION_VAR cuDNN_VERSION) - -mark_as_advanced(cuDNN_LINK_LIBRARY cuDNN_INCLUDE_DIRS cuDNN_DLL_LIBRARY) - -if(cuDNN_FOUND) - if(NOT TARGET cuDNN::cuDNN) - add_library(cuDNN::cuDNN SHARED IMPORTED) - if(WIN32) - set_target_properties(cuDNN::cuDNN - PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGE "C" - INTERFACE_INCLUDE_DIRECTORIES "${cuDNN_INCLUDE_DIRS}" - IMPORTED_LOCATION "${cuDNN_DLL_LIBRARY}" - IMPORTED_IMPLIB "${cuDNN_LINK_LIBRARY}" - ) - else(WIN32) - set_target_properties(cuDNN::cuDNN - PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGE "C" - INTERFACE_INCLUDE_DIRECTORIES "${cuDNN_INCLUDE_DIRS}" - IMPORTED_LOCATION "${cuDNN_LINK_LIBRARY}" - ) - endif(WIN32) - if(cuDNN_VERSION_MAJOR VERSION_GREATER 8 OR cuDNN_VERSION_MAJOR VERSION_EQUAL 8) - macro(create_cudnn_target cudnn_target_name) - string(TOUPPER ${cudnn_target_name} target_infix) - add_library(cuDNN::${cudnn_target_name} SHARED IMPORTED) - if(WIN32) - set_target_properties(cuDNN::${cudnn_target_name} - PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGE "C" - INTERFACE_INCLUDE_DIRECTORIES "${cuDNN_INCLUDE_DIRS}" - IMPORTED_LOCATION "${cuDNN_${target_infix}_DLL_LIBRARY}" - IMPORTED_IMPLIB "${cuDNN_${target_infix}_LINK_LIBRARY}" - ) - else(WIN32) - set_target_properties(cuDNN::${cudnn_target_name} - PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGE "C" - INTERFACE_INCLUDE_DIRECTORIES "${cuDNN_INCLUDE_DIRS}" - IMPORTED_LOCATION "${cuDNN_${target_infix}_LINK_LIBRARY}" - ) - endif(WIN32) - endmacro() - create_cudnn_target(adv_infer) - create_cudnn_target(adv_train) - create_cudnn_target(cnn_infer) - create_cudnn_target(cnn_train) - create_cudnn_target(ops_infer) - create_cudnn_target(ops_train) - endif() - endif(NOT TARGET cuDNN::cuDNN) -endif(cuDNN_FOUND) diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index a2d994eb1a..4689f52734 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -7,11 +7,9 @@ endif() # If the custom finders are needed at this point, there are good chances that they will be needed when consuming the library as well install(FILES "${CMAKE_SOURCE_DIR}/cmake/Modules/FindTensorRT.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/torchtrt/Modules") -install(FILES "${CMAKE_SOURCE_DIR}/cmake/Modules/FindcuDNN.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/torchtrt/Modules") # CUDA find_package(CUDAToolkit REQUIRED) -find_package(cuDNN REQUIRED) # Headers are needed somewhere # libtorch find_package(Torch REQUIRED) diff --git a/core/plugins/CMakeLists.txt b/core/plugins/CMakeLists.txt index 2b1eaaf73f..90b039c94a 100644 --- a/core/plugins/CMakeLists.txt +++ b/core/plugins/CMakeLists.txt @@ -23,7 +23,6 @@ target_link_libraries(${lib_name} TensorRT::nvinfer_plugin torch core_util - cuDNN::cuDNN PRIVATE Threads::Threads ) diff --git a/dev_dep_versions.yml b/dev_dep_versions.yml index 97dd720a30..1c0e24ade4 100644 --- a/dev_dep_versions.yml +++ b/dev_dep_versions.yml @@ -1,4 +1,3 @@ __version__: "2.4.0.dev0" __cuda_version__: "12.1" -__cudnn_version__: "8.9" __tensorrt_version__: "10.0.1" diff --git a/docker/Dockerfile b/docker/Dockerfile index 16b92bbd17..f0b393c6f5 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -8,9 +8,6 @@ ENV BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04 ARG TENSORRT_VERSION ENV TENSORRT_VERSION=${TENSORRT_VERSION} RUN test -n "$TENSORRT_VERSION" || (echo "No tensorrt version specified, please use --build-arg TENSORRT_VERSION=x.y to specify a version." && exit 1) -ARG CUDNN_VERSION -ENV CUDNN_VERSION=${CUDNN_VERSION} -RUN test -n "$CUDNN_VERSION" || (echo "No cudnn version specified, please use --build-arg CUDNN_VERSION=x.y to specify a version." && exit 1) ARG PYTHON_VERSION=3.10 ENV PYTHON_VERSION=${PYTHON_VERSION} @@ -35,13 +32,12 @@ RUN wget -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-instal RUN pyenv install -v ${PYTHON_VERSION} RUN pyenv global ${PYTHON_VERSION} -# Install CUDNN + TensorRT + dependencies +# Install TensorRT + dependencies RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin RUN mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600 RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/7fa2af80.pub RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /" RUN apt-get update -RUN apt-get install -y libcudnn8=${CUDNN_VERSION}* libcudnn8-dev=${CUDNN_VERSION}* RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /" diff --git a/docker/README.md b/docker/README.md index 9f83f25134..cf83d38185 100644 --- a/docker/README.md +++ b/docker/README.md @@ -3,7 +3,7 @@ * Use `Dockerfile` to build a container which provides the exact development environment that our master branch is usually tested against. * The `Dockerfile` currently uses Bazelisk to select the Bazel version, and uses the exact library versions of Torch and CUDA listed in dependencies. - * The desired versions of CUDNN and TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b --build-arg CUDNN_VERSION=x.y` + * The desired version of TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b` * [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional * [**Optional**] Additionally, the desired Python version can be changed by explicitly setting a version, as in `--build-arg PYTHON_VERSION=3.10`, though this is optional as well. @@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch ### Instructions -- The example below uses CUDNN 8.9 and TensorRT 8.6 +- The example below uses TensorRT 8.6 - See dependencies for a list of current default dependencies. > From root of Torch-TensorRT repo Build: ``` -DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.9 -f docker/Dockerfile -t torch_tensorrt:latest . +DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 -f docker/Dockerfile -t torch_tensorrt:latest . ``` Run: diff --git a/docker/WORKSPACE.docker b/docker/WORKSPACE.docker index b4da144ddc..461a22147f 100755 --- a/docker/WORKSPACE.docker +++ b/docker/WORKSPACE.docker @@ -67,12 +67,6 @@ new_local_repository( # Locally installed dependencies (use in cases of custom dependencies or aarch64) #################################################################################### -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/docker/WORKSPACE.ngc b/docker/WORKSPACE.ngc index c3d9bea0fc..17b06bfa41 100755 --- a/docker/WORKSPACE.ngc +++ b/docker/WORKSPACE.ngc @@ -69,12 +69,6 @@ new_local_repository( build_file = "third_party/libtorch/BUILD" ) -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/docsrc/getting_started/getting_started_with_windows.rst b/docsrc/getting_started/getting_started_with_windows.rst index edb3262d66..c2091e354f 100644 --- a/docsrc/getting_started/getting_started_with_windows.rst +++ b/docsrc/getting_started/getting_started_with_windows.rst @@ -11,7 +11,6 @@ Prerequisite: * LibTorch * TensorRT * CUDA -* cuDNN Build configuration @@ -90,27 +89,24 @@ Building With Visual Studio Code > Also allows using Build Tools to develop and test Open Source Dependencies, to the minor extend of ensuring compatibility with Build Tools 3. Install CUDA (e.g. 11.7.1) -4. Install cuDNN (e.g. 8.5.0.96) - - Set ``cuDNN_ROOT_DIR`` - -5. Install `TensorRT` (e.g 8.5.1.7) +4. Install `TensorRT` (e.g 8.5.1.7) - Set ``TensorRT_ROOT`` - Add ``TensorRT_ROOT\lib`` to ``PATH`` -6. Install "libtorch-win-shared-with-deps-latest.zip" +5. Install "libtorch-win-shared-with-deps-latest.zip" - Select build targeting the appropriate CUDA version - Set ``Torch_DIR`` - Add ``Torch_DIR\lib`` to ``PATH`` -7. Clone TensorRT repo -8. Install C++ and CMake Tools extensions from MS +6. Clone TensorRT repo +7. Install C++ and CMake Tools extensions from MS - Change build to ``RelWithDebInfo`` -9. Update ``.vscode\settings.json`` +8. Update ``.vscode\settings.json`` - Clean, configure, build @@ -137,10 +133,6 @@ e.g. /.vscode/settings.json "type": "FILEPATH", "value": "X:\\path\\to\\tensorrt" }, - "cuDNN_ROOT_DIR": { - "type": "FILEPATH", - "value": "X:\\path\\to\\cudnn" - }, "CMAKE_CUDA_FLAGS": "-allow-unsupported-compiler" }, "cmake.buildDirectory": "${workspaceFolder}/torch_tensorrt_build" diff --git a/docsrc/getting_started/installation.rst b/docsrc/getting_started/installation.rst index fdd44a20cd..a70bc7838e 100644 --- a/docsrc/getting_started/installation.rst +++ b/docsrc/getting_started/installation.rst @@ -87,7 +87,7 @@ Dependencies for Compilation * Specify your CUDA version here if not the version used in the branch being built: https://github.com/pytorch/TensorRT/blob/4e5b0f6e860910eb510fa70a76ee3eb9825e7a4d/WORKSPACE#L46 -* The correct **LibTorch**, **cuDNN** and **TensorRT** versions will be pulled down for you by bazel. +* The correct **LibTorch** and **TensorRT** versions will be pulled down for you by bazel. NOTE: By default bazel will pull the latest nightly from pytorch.org. For building main, this is usually sufficient however if there is a specific PyTorch you are targeting, edit these locations with updated URLs/paths: @@ -95,25 +95,13 @@ Dependencies for Compilation * https://github.com/pytorch/TensorRT/blob/4e5b0f6e860910eb510fa70a76ee3eb9825e7a4d/WORKSPACE#L53C1-L53C1 -* **cuDNN and TensorRT** are not required to be installed on the system to build Torch-TensorRT, in fact this is preferable to ensure reproducable builds. If versions other than the default are needed - point the WORKSPACE file to the URL of the tarball or download the tarballs for cuDNN and TensorRT from https://developer.nvidia.com and update the paths in the WORKSPACE file here https://github.com/pytorch/TensorRT/blob/4e5b0f6e860910eb510fa70a76ee3eb9825e7a4d/WORKSPACE#L71 +* **TensorRT** is not required to be installed on the system to build Torch-TensorRT, in fact this is preferable to ensure reproducable builds. If versions other than the default are needed + point the WORKSPACE file to the URL of the tarball or download the tarball for TensorRT from https://developer.nvidia.com and update the paths in the WORKSPACE file here https://github.com/pytorch/TensorRT/blob/4e5b0f6e860910eb510fa70a76ee3eb9825e7a4d/WORKSPACE#L71 For example: .. code-block:: python - http_archive( - name = "cudnn", - build_file = "@//third_party/cudnn/archive:BUILD", - sha256 = "", # Optional but recommended - strip_prefix = "cudnn-linux-x86_64-_-archive", - urls = [ - "https://developer.nvidia.com/downloads/compute/cudnn/", - # OR - "file:////cudnn-linux-x86_64-_-archive.tar.xz" - ], - ) - http_archive( name = "tensorrt", build_file = "@//third_party/tensorrt/archive:BUILD", @@ -128,7 +116,7 @@ Dependencies for Compilation Remember at runtime, these libraries must be added to your ``LD_LIBRARY_PATH`` explicity -If you have a local version of cuDNN and TensorRT installed, this can be used as well by commenting out the above lines and uncommenting the following lines https://github.com/pytorch/TensorRT/blob/4e5b0f6e860910eb510fa70a76ee3eb9825e7a4d/WORKSPACE#L114C1-L124C3 +If you have a local version of TensorRT installed, this can be used as well by commenting out the above lines and uncommenting the following lines https://github.com/pytorch/TensorRT/blob/4e5b0f6e860910eb510fa70a76ee3eb9825e7a4d/WORKSPACE#L114C1-L124C3 Building the Package @@ -228,7 +216,7 @@ Begin by installing CMake. A few useful CMake options include: - * CMake finders for TensorRT and cuDNN are provided in `cmake/Modules`. In order for CMake to use them, pass + * CMake finders for TensorRT are provided in `cmake/Modules`. In order for CMake to use them, pass `-DCMAKE_MODULE_PATH=cmake/Modules` when configuring the project with CMake. * Libtorch provides its own CMake finder. In case CMake doesn't find it, pass the path to your install of libtorch with `-DTorch_DIR=/share/cmake/Torch` diff --git a/examples/int8/ptq/README.md b/examples/int8/ptq/README.md index 4ac147bb6e..329d4d021d 100644 --- a/examples/int8/ptq/README.md +++ b/examples/int8/ptq/README.md @@ -149,14 +149,13 @@ tar -xvzf libtorch_tensorrt.tar.gz unzip libtorch.zip ``` -> If cuDNN and TensorRT are not installed on your system / in your LD_LIBRARY_PATH then do the following as well +> If TensorRT is not installed on your system / in your LD_LIBRARY_PATH then do the following as well ```sh cd deps -mkdir cudnn && tar -xvzf --directory cudnn --strip-components=1 mkdir tensorrt && tar -xvzf --directory tensorrt --strip-components=1 cd .. -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:$(pwd)/deps/cudnn/lib64:/usr/local/cuda/lib +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:/usr/local/cuda/lib ``` 2) Build and run `ptq` diff --git a/examples/int8/qat/README.md b/examples/int8/qat/README.md index f47aea15b0..992121c705 100644 --- a/examples/int8/qat/README.md +++ b/examples/int8/qat/README.md @@ -43,14 +43,13 @@ tar -xvzf libtorch_tensorrt.tar.gz unzip libtorch.zip ``` -> If cuDNN and TensorRT are not installed on your system / in your LD_LIBRARY_PATH then do the following as well +> If TensorRT is not installed on your system / in your LD_LIBRARY_PATH then do the following as well ```sh cd deps -mkdir cudnn && tar -xvzf --directory cudnn --strip-components=1 mkdir tensorrt && tar -xvzf --directory tensorrt --strip-components=1 cd .. -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:$(pwd)/deps/cudnn/lib64:/usr/local/cuda/lib +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:/usr/local/cuda/lib ``` 2) Build and run `qat` diff --git a/examples/torchtrt_runtime_example/Makefile b/examples/torchtrt_runtime_example/Makefile index 9f8df64763..07fcc4c291 100644 --- a/examples/torchtrt_runtime_example/Makefile +++ b/examples/torchtrt_runtime_example/Makefile @@ -1,7 +1,7 @@ CXX=g++ DEP_DIR=$(PWD)/deps INCLUDE_DIRS=-I$(DEP_DIR)/libtorch/include -I$(DEP_DIR)/torch_tensorrt/include -LIB_DIRS=-L$(DEP_DIR)/torch_tensorrt/lib -L$(DEP_DIR)/libtorch/lib # -Wl,-rpath $(DEP_DIR)/tensorrt/lib -Wl,-rpath $(DEP_DIR)/cudnn/lib64 +LIB_DIRS=-L$(DEP_DIR)/torch_tensorrt/lib -L$(DEP_DIR)/libtorch/lib # -Wl,-rpath $(DEP_DIR)/tensorrt/lib LIBS=-Wl,--no-as-needed -ltorchtrt_runtime -Wl,--as-needed -ltorch -ltorch_cuda -ltorch_cpu -ltorch_global_deps -lbackend_with_compiler -lc10 -lc10_cuda SRCS=main.cpp diff --git a/examples/torchtrt_runtime_example/README.md b/examples/torchtrt_runtime_example/README.md index 9effed5046..15b740b0b0 100644 --- a/examples/torchtrt_runtime_example/README.md +++ b/examples/torchtrt_runtime_example/README.md @@ -26,17 +26,16 @@ tar -xvzf libtorch_tensorrt.tar.gz unzip libtorch-cxx11-abi-shared-with-deps-[PYTORCH_VERSION].zip ``` -> If cuDNN and TensorRT are not installed on your system / in your LD_LIBRARY_PATH then do the following as well +> If TensorRT is not installed on your system / in your LD_LIBRARY_PATH then do the following as well ```sh cd deps -mkdir cudnn && tar -xvzf --directory cudnn --strip-components=1 mkdir tensorrt && tar -xvzf --directory tensorrt --strip-components=1 cd .. -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:$(pwd)/deps/cudnn/lib64:/usr/local/cuda/lib +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:/usr/local/cuda/lib ``` -This gives maximum compatibility with system configurations for running this example but in general you are better off adding `-Wl,-rpath $(DEP_DIR)/tensorrt/lib -Wl,-rpath $(DEP_DIR)/cudnn/lib64` to your linking command for actual applications +This gives maximum compatibility with system configurations for running this example but in general you are better off adding `-Wl,-rpath $(DEP_DIR)/tensorrt/lib` to your linking command for actual applications 2) Build and run `torchtrt_runtime_example` @@ -48,6 +47,6 @@ To build and run the app cd examples/torchtrt_runtime_example make # If paths are different than the ones below, change as necessary -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:$(pwd)/deps/cudnn/lib64:/usr/local/cuda/lib +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/torch_tensorrt/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/tensorrt/lib:/usr/local/cuda/lib ./torchtrt_runtime_example $PWD/examples/torchtrt_runtime_example/norm.jit ``` diff --git a/notebooks/Resnet50-CPP.ipynb b/notebooks/Resnet50-CPP.ipynb index 3d5e66ee51..198ebc9911 100755 --- a/notebooks/Resnet50-CPP.ipynb +++ b/notebooks/Resnet50-CPP.ipynb @@ -172,19 +172,6 @@ "unzip libtorch-cxx11-abi-shared-with-deps-1.11.0+cu113.zip" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "198553b7", - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "cd deps\n", - "wget https://github.com/pytorch/TensorRT/releases/download/v1.1.0/libtorchtrt-v1.1.0-cudnn8.2-tensorrt8.2-cuda11.3-libtorch1.11.0.tar.gz\n", - "tar -xvzf libtorchtrt-v1.1.0-cudnn8.2-tensorrt8.2-cuda11.3-libtorch1.11.0.tar.gz" - ] - }, { "cell_type": "markdown", "id": "309696b6", diff --git a/notebooks/WORKSPACE.notebook b/notebooks/WORKSPACE.notebook index 14b655436c..9bf1adb718 100644 --- a/notebooks/WORKSPACE.notebook +++ b/notebooks/WORKSPACE.notebook @@ -69,12 +69,6 @@ http_archive( # Locally installed dependencies (use in cases of custom dependencies or aarch64) #################################################################################### -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/py/ci/build_whl.sh b/py/ci/build_whl.sh index 12f72a9351..35090445f0 100755 --- a/py/ci/build_whl.sh +++ b/py/ci/build_whl.sh @@ -100,9 +100,8 @@ libtorchtrt() { CUDA_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cuda_version()") TORCHTRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.torch_tensorrt_version_release()") TRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.tensorrt_version()") - CUDNN_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cudnn_version()") TORCH_VERSION=$(${PY_DIR}/bin/python -c "from torch import __version__;print(__version__.split('+')[0])") - cp ${PROJECT_DIR}/bazel-bin/libtorchtrt.tar.gz ${PROJECT_DIR}/py/wheelhouse/libtorchtrt-${TORCHTRT_VERSION}-cudnn${CUDNN_VERSION}-tensorrt${TRT_VERSION}-cuda${CUDA_VERSION}-libtorch${TORCH_VERSION}-x86_64-linux.tar.gz + cp ${PROJECT_DIR}/bazel-bin/libtorchtrt.tar.gz ${PROJECT_DIR}/py/wheelhouse/libtorchtrt-${TORCHTRT_VERSION}-tensorrt${TRT_VERSION}-cuda${CUDA_VERSION}-libtorch${TORCH_VERSION}-x86_64-linux.tar.gz } libtorchtrt_pre_cxx11_abi() { @@ -120,7 +119,6 @@ libtorchtrt_pre_cxx11_abi() { CUDA_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cuda_version()") TORCHTRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.torch_tensorrt_version_release()") TRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.tensorrt_version()") - CUDNN_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cudnn_version()") TORCH_VERSION=$(${PY_DIR}/bin/python -c "from torch import __version__;print(__version__.split('+')[0])") - cp ${PROJECT_DIR}/bazel-bin/libtorchtrt.tar.gz ${PROJECT_DIR}/py/wheelhouse/libtorchtrt-${TORCHTRT_VERSION}-pre-cxx11-abi-cudnn${CUDNN_VERSION}-tensorrt${TRT_VERSION}-cuda${CUDA_VERSION}-libtorch${TORCH_VERSION}-x86_64-linux.tar.gz + cp ${PROJECT_DIR}/bazel-bin/libtorchtrt.tar.gz ${PROJECT_DIR}/py/wheelhouse/libtorchtrt-${TORCHTRT_VERSION}-pre-cxx11-abi-tensorrt${TRT_VERSION}-cuda${CUDA_VERSION}-libtorch${TORCH_VERSION}-x86_64-linux.tar.gz } diff --git a/py/ci/soname_excludes.params b/py/ci/soname_excludes.params index a5eecb7c9a..53f7f48a65 100644 --- a/py/ci/soname_excludes.params +++ b/py/ci/soname_excludes.params @@ -30,7 +30,6 @@ --exclude libcuda.so.515 --exclude libcublasLt.so.11 --exclude libnvinfer.so.8 ---exclude libcudnn.so.8 --exclude libcublas.so.12 --exclude libcublasLt.so.12 --exclude libcublas.so.12.1.3.1 diff --git a/py/torch_tensorrt/__init__.py b/py/torch_tensorrt/__init__.py index b1de3c6c18..d12c32f8b1 100644 --- a/py/torch_tensorrt/__init__.py +++ b/py/torch_tensorrt/__init__.py @@ -6,7 +6,6 @@ from torch_tensorrt._version import ( # noqa: F401 __cuda_version__, - __cudnn_version__, __tensorrt_version__, __version__, ) @@ -40,11 +39,9 @@ def _find_lib(name: str, paths: List[str]) -> str: import tensorrt # noqa: F401 except ImportError: cuda_version = _parse_semver(__cuda_version__) - cudnn_version = _parse_semver(__cudnn_version__) tensorrt_version = _parse_semver(__tensorrt_version__) CUDA_MAJOR = cuda_version["major"] - CUDNN_MAJOR = cudnn_version["major"] TENSORRT_MAJOR = tensorrt_version["major"] if sys.platform.startswith("win"): diff --git a/py/torch_tensorrt/dynamo/conversion/impl/embedding.py b/py/torch_tensorrt/dynamo/conversion/impl/embedding.py index f4e98ac3ee..57d203b689 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/embedding.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/embedding.py @@ -184,7 +184,7 @@ def embedding_bag_with_ITensor_offsets( loop1 = ctx.net.add_loop() trip_limit1 = ctx.net.add_constant( shape=(), - weights=trt.Weights(np.array([offsets.shape[0] - 1], dtype=np.dtype("i"))), + weights=trt.Weights(np.array([offsets.shape[0] - 1], dtype=np.int32)), ).get_output(0) loop1.add_trip_limit(trip_limit1, trt.TripLimit.COUNT) @@ -205,7 +205,7 @@ def embedding_bag_with_ITensor_offsets( ###### Inner loop: traverse indices ###### loop2 = ctx.net.add_loop() trip_limit2 = ctx.net.add_constant( - shape=(), weights=trt.Weights(np.array([len_embed], dtype=np.dtype("i"))) + shape=(), weights=trt.Weights(np.array([len_embed], dtype=np.int32)) ).get_output(0) loop2.add_trip_limit(trip_limit2, trt.TripLimit.COUNT) rec2_j_tensor = loop2.add_recurrence(constant_0) diff --git a/setup.py b/setup.py index ba29433e5f..7e30847480 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,6 @@ __version__: str = "0.0.0" __cuda_version__: str = "0.0" -__cudnn_version__: str = "0.0" __tensorrt_version__: str = "0.0" LEGACY_BASE_VERSION_SUFFIX_PATTERN = re.compile("a0$") @@ -62,7 +61,6 @@ def get_base_version() -> str: def load_dep_info(): global __cuda_version__ - global __cudnn_version__ global __tensorrt_version__ with open("dev_dep_versions.yml", "r") as stream: versions = yaml.safe_load(stream) @@ -72,7 +70,6 @@ def load_dep_info(): ) else: __cuda_version__ = versions["__cuda_version__"] - __cudnn_version__ = versions["__cudnn_version__"] __tensorrt_version__ = versions["__tensorrt_version__"] @@ -230,7 +227,6 @@ def gen_version_file(): print("creating version file") f.write('__version__ = "' + __version__ + '"\n') f.write('__cuda_version__ = "' + __cuda_version__ + '"\n') - f.write('__cudnn_version__ = "' + __cudnn_version__ + '"\n') f.write('__tensorrt_version__ = "' + __tensorrt_version__ + '"\n') diff --git a/toolchains/ci_workspaces/WORKSPACE.sbsa b/toolchains/ci_workspaces/WORKSPACE.sbsa index d30d45b15a..ea8d72f347 100644 --- a/toolchains/ci_workspaces/WORKSPACE.sbsa +++ b/toolchains/ci_workspaces/WORKSPACE.sbsa @@ -74,12 +74,6 @@ new_local_repository( build_file = "third_party/libtorch/BUILD" ) -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl b/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl index acc4ecd51c..01bbd2130e 100644 --- a/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl +++ b/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl @@ -73,12 +73,6 @@ http_archive( # Locally installed dependencies (use in cases of custom dependencies or aarch64) #################################################################################### -new_local_repository( - name = "cudnn", - path = "C:/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "C:/", diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64 b/toolchains/ci_workspaces/WORKSPACE.x86_64 index e73597beb4..d8a346b680 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64 +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64 @@ -75,12 +75,6 @@ new_local_repository( build_file = "third_party/libtorch/BUILD" ) -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.cu118.release.rhel b/toolchains/ci_workspaces/WORKSPACE.x86_64.cu118.release.rhel index 79c7575d2b..edeeb76ff7 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64.cu118.release.rhel +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.cu118.release.rhel @@ -75,12 +75,6 @@ http_archive( # Locally installed dependencies (use in cases of custom dependencies or aarch64) #################################################################################### -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.cu121.release.rhel b/toolchains/ci_workspaces/WORKSPACE.x86_64.cu121.release.rhel index 2fc09e8219..8b179084e9 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64.cu121.release.rhel +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.cu121.release.rhel @@ -75,12 +75,6 @@ http_archive( # Locally installed dependencies (use in cases of custom dependencies or aarch64) #################################################################################### -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.legacy b/toolchains/ci_workspaces/WORKSPACE.x86_64.legacy index 31990b5e55..3825ee38e1 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64.legacy +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.legacy @@ -75,12 +75,6 @@ new_local_repository( build_file = "third_party/libtorch/BUILD" ) -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu index 745272d328..28e1f3184c 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu @@ -75,12 +75,6 @@ http_archive( # Locally installed dependencies (use in cases of custom dependencies or aarch64) #################################################################################### -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/toolchains/jp_workspaces/WORKSPACE.jp50 b/toolchains/jp_workspaces/WORKSPACE.jp50 index 3038c06117..ca44bd4dc5 100644 --- a/toolchains/jp_workspaces/WORKSPACE.jp50 +++ b/toolchains/jp_workspaces/WORKSPACE.jp50 @@ -81,12 +81,6 @@ new_local_repository( build_file = "third_party/libtorch/BUILD" ) -new_local_repository( - name = "cudnn", - path = "/usr/", - build_file = "@//third_party/cudnn/local:BUILD" -) - new_local_repository( name = "tensorrt", path = "/usr/", diff --git a/versions.py b/versions.py index db418a06d2..81dbe72794 100644 --- a/versions.py +++ b/versions.py @@ -8,7 +8,6 @@ __version__ = "0.0.0" __cuda_version__ = "0.0" -__cudnn_version__ = "0.0" __tensorrt_version__ = "0.0" @@ -104,7 +103,6 @@ def get_nightly_version(self) -> str: def load_dep_info(): global __cuda_version__ - global __cudnn_version__ global __tensorrt_version__ with open("dev_dep_versions.yml", "r") as stream: versions = yaml.safe_load(stream) @@ -115,7 +113,6 @@ def load_dep_info(): ) else: __cuda_version__ = versions["__cuda_version__"] - __cudnn_version__ = versions["__cudnn_version__"] __tensorrt_version__ = versions["__tensorrt_version__"] @@ -141,9 +138,5 @@ def cuda_version(): print(__cuda_version__) -def cudnn_version(): - print(__cudnn_version__) - - def tensorrt_version(): print(__tensorrt_version__)