diff --git a/.bazelversion b/.bazelversion index 91ff57278e..024b066c0b 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -5.2.0 +6.2.1 diff --git a/.circleci/config.yml b/.circleci/config.yml index d46c695678..78c6150d61 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,19 +4,19 @@ version: 2.1 commands: install-bazel: - description: "Install bazel" + description: "Install bazelisk" parameters: platform: type: string - default: "x86_64" + default: "amd64" version: type: string - default: "5.2.0" + default: "v1.17.0" steps: - run: - name: Install bazel + name: Install bazelisk command: | - sudo wget -q https://github.com/bazelbuild/bazel/releases/download/<< parameters.version >>/bazel-<< parameters.version >>-linux-<< parameters.platform >> -O /usr/bin/bazel + sudo wget -q https://github.com/bazelbuild/bazelisk/releases/download/<< parameters.version >>/bazelisk-linux-<< parameters.platform >> -O /usr/bin/bazel sudo chmod a+x /usr/bin/bazel install-cmake: @@ -93,6 +93,21 @@ commands: paths: - << parameters.install-dir >> + install-nvctk: + description: "Install NVIDIA Container Toolkit" + steps: + - run: + name: "Install NVIDIA Container Toolkit" + command: | + sudo apt-get update + sudo apt-get install -y nvidia-container-toolkit + sudo nvidia-ctk runtime configure --runtime=docker + sudo systemctl restart docker + - run: + name: "Test NVCTK" + command: | + sudo docker run --rm --runtime=nvidia --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi + install-cudnn: description: "Install CUDNN 8.8.1" parameters: @@ -190,12 +205,9 @@ commands: trt-version-long: type: string default: "8.6.1.6-1" - bazel-version: - type: string - default: "5.2.0" bazel-platform: type: string - default: "x86_64" + default: "amd64" steps: - install-cudnn: os: << parameters.os >> @@ -212,14 +224,13 @@ commands: sudo apt-get install libnvinfer8=<< parameters.trt-version-long >>+<< parameters.cuda-string-version >> libnvinfer-plugin8=<< parameters.trt-version-long>>+<< parameters.cuda-string-version >> libnvinfer-dev=<< parameters.trt-version-long >>+<< parameters.cuda-string-version >> libnvinfer-plugin-dev=<< parameters.trt-version-long>>+<< parameters.cuda-string-version >> libnvinfer-headers-dev=<< parameters.trt-version-long>>+<< parameters.cuda-string-version >> libnvinfer-headers-plugin-dev=<< parameters.trt-version-long>>+<< parameters.cuda-string-version >> - install-bazel: platform: << parameters.bazel-platform >> - version: << parameters.bazel-version >> setup-py-version: description: "Set python version" parameters: python-version: type: string - default: "3.9.4" + default: "3.10.9" steps: - run: name: Set python version @@ -267,11 +278,15 @@ commands: name: Install Torch command: | pip3 install --upgrade pip - pip3 install torch==<< parameters.torch-build >> torchvision==<< parameters.torchvision-build >> --extra-index-url << parameters.torch-build-index >> --extra-index-url << parameters.torchvision-build-index >> + pip3 install torch==<< parameters.torch-build >> torchvision==<< parameters.torchvision-build >> --extra-index-url https://pypi.nvidia.com --extra-index-url << parameters.torch-build-index >> --extra-index-url << parameters.torchvision-build-index >> build-py: description: "Build the torch-tensorrt python release (pre-cxx11-abi)" parameters: + cuda-version: + type: string + torch-build-index: + type: string platform: type: string default: "x86_64" @@ -279,19 +294,21 @@ commands: - run: name: Build torch-tensorrt python release (pre-cxx11-abi) command: | - export CUDA_HOME=/usr/local/cuda-12.1/ + export CUDA_HOME=/usr/local/cuda-<< parameters.cuda-version >>/ mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE - cd py - python3 -m pip install wheel setuptools - python3 -m pip install pybind11==2.6.2 - python3 setup.py bdist_wheel - python3 setup.py install + python3 -m pip install pip==22.0.2 + python3 -m pip wheel --no-deps --verbose --pre . --extra-index-url https://pypi.nvidia.com --extra-index-url << parameters.torch-build-index >> -w dist + python3 -m pip install dist/torch_tensorrt* mkdir -p /tmp/dist/builds - cp dist/* /tmp/dist/builds + cp dist/torch_tensorrt* /tmp/dist/builds build-py-legacy: description: "Build the torch-tensorrt python legacy release (pre-cxx11-abi)" parameters: + cuda-version: + type: string + torch-build-index: + type: string platform: type: string default: "x86_64" @@ -299,19 +316,21 @@ commands: - run: name: Build torch-tensorrt python legacy release (pre-cxx11-abi) command: | - export CUDA_HOME=/usr/local/cuda-12.1/ + export CUDA_HOME=/usr/local/cuda-<< parameters.cuda-version >>/ mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE - cd py - python3 -m pip install wheel setuptools - python3 -m pip install pybind11==2.6.2 - python3 setup.py bdist_wheel --legacy - python3 setup.py install --legacy + python3 -m pip install pip==22.0.2 + python3 -m pip wheel --no-deps --verbose --pre . --extra-index-url https://pypi.nvidia.com --extra-index-url << parameters.torch-build-index >> --config-setting="--build-option=--legacy" -w dist + python3 -m pip install dist/torch_tensorrt* mkdir -p /tmp/dist/builds - cp dist/* /tmp/dist/builds + cp dist/torch_tensorrt* /tmp/dist/builds build-py-cxx11-abi: description: "Build the torch-tensorrt python release (cxx11-abi)" parameters: + cuda-version: + type: string + torch-build-index: + type: string platform: type: string default: "x86_64" @@ -323,36 +342,38 @@ commands: name: Build setup command: | mv ~/project/toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> ~/project/WORKSPACE - python3 -m pip install wheel setuptools - python3 -m pip install pybind11==2.6.2 - when: condition: << parameters.release >> steps: - run: name: Build torch-tensorrt python release package command: | - export CUDA_HOME=/usr/local/cuda-12.1/ - cd ~/project/py - python3 setup.py bdist_wheel --use-cxx11-abi --release - python3 setup.py install --use-cxx11-abi --release + export CUDA_HOME=/usr/local/cuda-<< parameters.cuda-version >>/ + python3 -m pip install pip==22.0.2 + python3 -m pip wheel --no-deps --verbose --pre . --extra-index-url https://pypi.nvidia.com --extra-index-url << parameters.torch-build-index >> --config-setting="--build-option=--release" --config-setting="--build-option=--use-cxx11-abi" -w dist + python3 -m pip install dist/torch_tensorrt* mkdir -p /tmp/dist/builds - cp dist/* /tmp/dist/builds + cp dist/torch_tensorrt* /tmp/dist/builds - unless: condition: << parameters.release >> steps: - run: name: Build torch-tensorrt python package command: | - export CUDA_HOME=/usr/local/cuda-12.1/ - cd ~/project/py - python3 setup.py bdist_wheel --use-cxx11-abi - python3 setup.py install --use-cxx11-abi + export CUDA_HOME=/usr/local/cuda-<< parameters.cuda-version >>/ + python3 -m pip install pip==22.0.2 + python3 -m pip wheel --no-deps --verbose --pre . --extra-index-url https://pypi.nvidia.com --extra-index-url << parameters.torch-build-index >> --config-setting="--build-option=--use-cxx11-abi" -w dist + python3 -m pip install dist/torch_tensorrt* mkdir -p /tmp/dist/builds - cp dist/* /tmp/dist/builds + cp dist/torch_tensorrt* /tmp/dist/builds build-py-fx-only: description: "Build the torch-tensorrt python release with only the fx backend" parameters: + cuda-version: + type: string + torch-build-index: + type: string platform: type: string default: "x86_64" @@ -361,15 +382,13 @@ commands: - run: name: Build torch-tensorrt python release with only the fx backend command: | - export CUDA_HOME=/usr/local/cuda-12.1/ + export CUDA_HOME=/usr/local/cuda-<< parameters.cuda-version >>/ mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE - cd py - python3 -m pip install wheel setuptools - python3 -m pip install pybind11==2.6.2 - python3 setup.py bdist_wheel --fx-only - python3 setup.py install --fx-only + python3 -m pip install pip==22.0.2 + python3 -m pip wheel --no-deps --verbose --pre . --extra-index-url https://pypi.nvidia.com --extra-index-url << parameters.torch-build-index >> --config-setting="--build-option=--fx-only" -w dist + python3 -m pip install dist/torch_tensorrt* mkdir -p /tmp/dist/builds - cp dist/* /tmp/dist/builds + cp dist/torch_tensorrt* /tmp/dist/builds build-py-ngc: description: "Build the torch-tensorrt python release for NGC PyTorch (cxx11-abi)" @@ -400,7 +419,6 @@ commands: export BAZEL_VERSION="$(cat /home/circleci/project/.bazelversion)" docker exec ngc_build_container bash -c "wget https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-linux-x86_64 -O /usr/bin/bazel && chmod a+x /usr/bin/bazel" docker exec ngc_build_container bash -c "mv /workspace/docker/WORKSPACE.ngc /workspace/WORKSPACE" - docker exec ngc_build_container bash -c "cd /workspace/py && pip install wheel setuptools pybind11==2.6.2" docker exec ngc_build_container bash -c "git config --global --add safe.directory /workspace" - when: condition: << parameters.release >> @@ -408,14 +426,14 @@ commands: - run: name: Build torch-tensorrt release build for NGC command: | - docker exec ngc_build_container bash -c "cd /workspace/py && python3 setup.py bdist_wheel --use-cxx11-abi --release" + docker exec ngc_build_container bash -c "cd /workspace/ && python3 -m pip wheel --no-deps --verbose --pre . --config-setting="--build-option=--release" --config-setting="--build-option=--use-cxx11-abi" -w dist" - unless: condition: << parameters.release >> steps: - run: name: Build torch-tensorrt debug build for NGC command: | - docker exec ngc_build_container bash -c "cd /workspace/py && python3 setup.py bdist_wheel --use-cxx11-abi" + docker exec ngc_build_container bash -c "cd /workspace/py && python3 -m pip wheel --no-deps --verbose --pre . --config-setting="--build-option=--use-cxx11-abi" -w dist" - run: name: Collect builds command: | @@ -428,15 +446,17 @@ commands: platform: type: string default: "x86_64" + cuda-version: + type: string steps: - run: name: Build torch-tensorrt library with CMake command: | mkdir build - export PATH=$PATH:/usr/local/cuda-12.1/bin + export PATH=$PATH:/usr/local/<< parameters.cuda-version >>/bin ~/cmake/bin/cmake -S. -Bbuild \ -DCMAKE_MODULE_PATH=cmake/Module \ - -DTorch_DIR=/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch/share/cmake/Torch \ + -DTorch_DIR=/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch/share/cmake/Torch \ -DTensorRT_ROOT=/usr \ -DCMAKE_BUILD_TYPE=Debug cmake --build build -- -j12 @@ -485,7 +505,7 @@ commands: name: Run core / C++ tests no_output_timeout: 15m environment: - LD_LIBRARY_PATH: "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch_tensorrt.libs:/home/circleci/project/bazel-project/external/libtorch_pre_cxx11_abi/lib/:/home/circleci/project/bazel-project/external/tensorrt/lib/:/usr/local/cuda-12.1/lib64/:$LD_LIBRARY_PATH" + LD_LIBRARY_PATH: "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch_tensorrt.libs:/home/circleci/project/bazel-project/external/libtorch_pre_cxx11_abi/lib/:/home/circleci/project/bazel-project/external/tensorrt/lib/:/usr/local/cuda-12.1/lib64/:$LD_LIBRARY_PATH" command: | set -e mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE @@ -810,6 +830,8 @@ jobs: type: string python-version: type: string + cuda-version: + type: string cxx11-abi: type: boolean default: false @@ -828,8 +850,7 @@ jobs: platform: "x86_64" cudnn-version: << pipeline.parameters.cudnn-version >> trt-version-short: << pipeline.parameters.trt-version-short >> - bazel-version: << pipeline.parameters.bazel-version >> - bazel-platform: "x86_64" + bazel-platform: "amd64" - install-torch-from-index: torch-build: << parameters.torch-build >> torchvision-build: << parameters.torchvision-build >> @@ -837,18 +858,24 @@ jobs: - when: condition: << parameters.cxx11-abi >> steps: - - build-py-cxx11-abi + - build-py-cxx11-abi: + cuda-version: << parameters.cuda-version >> + torch-build-index: << parameters.torch-build-index >> - unless: condition: << parameters.cxx11-abi >> steps: - when: condition: << parameters.legacy >> steps: - - build-py-legacy + - build-py-legacy: + cuda-version: << parameters.cuda-version >> + torch-build-index: << parameters.torch-build-index >> - unless: condition: << parameters.legacy >> steps: - - build-py + - build-py: + cuda-version: << parameters.cuda-version >> + torch-build-index: << parameters.torch-build-index >> - run: name: Move to build dir command: | @@ -891,8 +918,7 @@ jobs: platform: "x86_64" cudnn-version: << parameters.cudnn-version >> trt-version-short: << parameters.trt-version-short >> - bazel-version: << pipeline.parameters.bazel-version >> - bazel-platform: "x86_64" + bazel-platform: "amd64" - create-py-env: trt-version-long: << parameters.trt-version-long >> - install-torch-from-index: @@ -903,7 +929,7 @@ jobs: at: /tmp/dist - run: name: "Install torch-tensorrt" - command: pip3 install /tmp/dist/x86_64-linux/*cp39-cp39*.whl + command: pip3 install /tmp/dist/x86_64-linux/*cp310-cp310*.whl - dump-test-env - test-ts-core @@ -936,7 +962,7 @@ jobs: torch-build-index: << parameters.torch-build-index >> - run: name: "Install torch-tensorrt" - command: pip3 install --pre /tmp/dist/x86_64-linux/*cp39-cp39*.whl + command: pip3 install --pre /tmp/dist/x86_64-linux/*cp310-cp310*.whl - dump-test-env - test-ts-py-api @@ -971,7 +997,7 @@ jobs: - install-cudnn - run: name: "Install torch-tensorrt" - command: pip3 install --pre /tmp/dist/x86_64-linux/*cp39-cp39*.whl + command: pip3 install --pre /tmp/dist/x86_64-linux/*cp310-cp310*.whl # We install torch after torch-trt because pip automatically enforces the version constraint otherwise - dump-test-env - test-fx @@ -1010,7 +1036,7 @@ jobs: # command: export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:$LD_LIBRARY_PATH - run: name: "Install torch-tensorrt" - command: pip3 install --pre /tmp/dist/x86_64-linux/*cp39-cp39*.whl + command: pip3 install --pre /tmp/dist/x86_64-linux/*cp310-cp310*.whl # We install torch after torch-trt because pip automatically enforces the version constraint otherwise - dump-test-env - test-fx-no-aten @@ -1048,7 +1074,7 @@ jobs: # command: export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:$LD_LIBRARY_PATH - run: name: "Install torch-tensorrt" - command: pip3 install --pre /tmp/dist/x86_64-linux/*cp39-cp39*.whl + command: pip3 install --pre /tmp/dist/x86_64-linux/*cp310-cp310*.whl # We install torch after torch-trt because pip automatically enforces the version constraint otherwise - dump-test-env - test-dynamo-converters @@ -1074,6 +1100,7 @@ jobs: condition: << parameters.enabled >> steps: - checkout + - install-nvctk - run: name: "Build packaging container" command: | @@ -1127,8 +1154,7 @@ jobs: platform: "x86_64" cudnn-version: << pipeline.parameters.cudnn-version >> trt-version-short: << pipeline.parameters.trt-version-short >> - bazel-version: << pipeline.parameters.bazel-version >> - bazel-platform: "x86_64" + bazel-platform: "amd64" - install-torch-from-index: torch-build: << parameters.torch-build >> torch-build-index: << parameters.torch-build-index >> @@ -1146,8 +1172,8 @@ jobs: TRT_VERSION=$(python3 -c "from versions import __tensorrt_version__;print(__tensorrt_version__)") CUDNN_VERSION=$(python3 -c "from versions import __cudnn_version__;print(__cudnn_version__)") TORCH_VERSION=$(python3 -c "from torch import __version__;print(__version__.split('+')[0])") - pip3 install --upgrade pip - pip3 install -r ~/project/py/requirements.txt + python3 -m pip install pip==22.0.2 + python3 -m pip install -r ~/project/py/requirements.txt TORCH_VERSION=$(python3 -c "from torch import __version__;print(__version__.split('+')[0])") mkdir -p ~/project/py/dist/ cp ~/project/bazel-bin/libtorchtrt.tar.gz ~/project/py/dist/libtorchtrt-${TORCHTRT_VERSION}-cudnn${CUDNN_VERSION}-tensorrt${TRT_VERSION}-cuda${CUDA_VERSION}-libtorch${TORCH_VERSION}-x86_64-linux.tar.gz @@ -1199,7 +1225,6 @@ jobs: architecture: "arm64" cudnn-version: << pipeline.parameters.cudnn-jetson-version >> trt-version-short: << pipeline.parameters.trt-jetson-version-short >> - bazel-version: << pipeline.parameters.bazel-version >> bazel-platform: "arm64" - run: name: Set python version @@ -1240,6 +1265,8 @@ jobs: build-x86_64-linux-cmake: parameters: + cuda-version: + type: string cudnn-version: type: string trt-version-short: @@ -1262,13 +1289,14 @@ jobs: platform: "x86_64" cudnn-version: << parameters.cudnn-version >> trt-version-short: << parameters.trt-version-short >> - bazel-platform: "x86_64" + bazel-platform: "amd64" - install-cmake: version: 3.24.1 - install-torch-from-index: torch-build: << parameters.torch-build >> torch-build-index: << parameters.torch-build-index >> - - build-cmake + - build-cmake: + cuda-version: << parameters.cuda-version >> - run: name: Move to cmake build dir command: | @@ -1341,14 +1369,14 @@ jobs: command: echo -e "Packaging stage not enabled" parameters: - bazel-version: - type: string - default: "5.2.0" python-version: type: string - default: "3.9.4" + default: "3.10.9" # Nightly platform config + cuda-version: + type: string + default: "12.1" torch-build: type: string default: "2.1.0.dev20230703+cu121" @@ -1358,6 +1386,20 @@ parameters: torch-build-index: type: string default: "https://download.pytorch.org/whl/nightly/cu121" + cudnn-version: + type: string + default: "8.8.1.3" + trt-version-short: + type: string + default: "8.6.1" + trt-version-long: + type: string + default: "8.6.1" + + # Legacy platform config + cuda-version-legacy: + type: string + default: "11.7" torch-build-legacy: type: string default: "1.13.1+cu117" @@ -1367,20 +1409,20 @@ parameters: torch-build-index-legacy: type: string default: "https://download.pytorch.org/whl/cu117" - enable-legacy: - type: boolean - default: true - cudnn-version: + cudnn-version-legacy: type: string default: "8.8.1.3" - trt-version-short: + trt-version-short-legacy: type: string default: "8.6.1" - trt-version-long: + trt-version-long-legacy: type: string default: "8.6.1" # Jetson platform config + cuda-jetson-version: + type: string + default: "11.8" torch-jetson-build: type: string default: "torch-1.13.0a0+d0d6b1f2.nv22.09-cp38-cp38-linux_aarch64.whl" @@ -1409,6 +1451,10 @@ parameters: type: boolean default: false + enable-legacy: + type: boolean + default: true + # Invoke jobs via workflows # See: https://circleci.com/docs/2.0/configuration-reference/#workflows workflows: @@ -1428,6 +1474,7 @@ workflows: torchvision-build: << pipeline.parameters.torchvision-build >> torch-build-index: << pipeline.parameters.torch-build-index >> python-version: << pipeline.parameters.python-version >> + cuda-version: << pipeline.parameters.cuda-version >> - test-core-cpp-x86_64-linux: torch-build: << pipeline.parameters.torch-build >> @@ -1469,6 +1516,7 @@ workflows: - build-x86_64-linux: name: build-x86_64-linux-legacy + cuda-version: << pipeline.parameters.cuda-version-legacy >> torch-build: << pipeline.parameters.torch-build-legacy >> torchvision-build: << pipeline.parameters.torchvision-build-legacy >> torch-build-index: << pipeline.parameters.torch-build-index-legacy >> @@ -1480,9 +1528,9 @@ workflows: torch-build: << pipeline.parameters.torch-build-legacy >> torchvision-build: << pipeline.parameters.torchvision-build-legacy >> torch-build-index: << pipeline.parameters.torch-build-index-legacy >> - trt-version-short: << pipeline.parameters.trt-version-short >> - trt-version-long: << pipeline.parameters.trt-version-long >> - cudnn-version: << pipeline.parameters.cudnn-version >> + trt-version-short: << pipeline.parameters.trt-version-short-legacy >> + trt-version-long: << pipeline.parameters.trt-version-long-legacy >> + cudnn-version: << pipeline.parameters.cudnn-version-legacy >> python-version: << pipeline.parameters.python-version >> requires: - build-x86_64-linux-legacy @@ -1492,7 +1540,7 @@ workflows: torch-build: << pipeline.parameters.torch-build-legacy >> torchvision-build: << pipeline.parameters.torchvision-build-legacy >> torch-build-index: << pipeline.parameters.torch-build-index-legacy >> - trt-version-long: << pipeline.parameters.trt-version-long >> + trt-version-long: << pipeline.parameters.trt-version-long-legacy >> python-version: << pipeline.parameters.python-version >> requires: - build-x86_64-linux-legacy @@ -1501,7 +1549,7 @@ workflows: torch-build: << pipeline.parameters.torch-build-legacy >> torchvision-build: << pipeline.parameters.torchvision-build-legacy >> torch-build-index: << pipeline.parameters.torch-build-index-legacy >> - trt-version-long: << pipeline.parameters.trt-version-long >> + trt-version-long: << pipeline.parameters.trt-version-long-legacy >> python-version: << pipeline.parameters.python-version >> requires: - build-x86_64-linux-legacy @@ -1561,6 +1609,7 @@ workflows: on-push: jobs: - build-x86_64-linux: + cuda-version: << pipeline.parameters.cuda-version >> torch-build: << pipeline.parameters.torch-build >> torchvision-build: << pipeline.parameters.torchvision-build >> torch-build-index: << pipeline.parameters.torch-build-index >> @@ -1605,6 +1654,7 @@ workflows: - build-x86_64-linux - build-x86_64-linux-cmake: + cuda-version: << pipeline.parameters.cuda-version >> torch-build: << pipeline.parameters.torch-build >> torch-build-index: << pipeline.parameters.torch-build-index >> trt-version-short: << pipeline.parameters.trt-version-short >> diff --git a/.gitignore b/.gitignore index 929998c3d3..3a7a3b462d 100644 --- a/.gitignore +++ b/.gitignore @@ -66,4 +66,5 @@ bazel-tensorrt *.cache *cifar-10-batches-py* bazel-project -build/ \ No newline at end of file +build/ +wheelhouse/ \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f754c915d..bc3884a5d2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,6 +30,10 @@ repos: args: - --warnings=all - id: buildifier-lint + - repo: https://github.com/abravalheri/validate-pyproject + rev: v0.13 + hooks: + - id: validate-pyproject - repo: local hooks: - id: dont-commit-upstream diff --git a/WORKSPACE b/WORKSPACE index 33fa05a5ba..ae98bc949c 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,22 +1,24 @@ workspace(name = "Torch-TensorRT") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") http_archive( name = "rules_python", - sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz", + sha256 = "863ba0fa944319f7e3d695711427d9ad80ba92c6edd0b7c7443b84e904689539", + strip_prefix = "rules_python-0.22.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.22.0/rules_python-0.22.0.tar.gz", ) -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() http_archive( name = "rules_pkg", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", ], ) @@ -24,11 +26,11 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") rules_pkg_dependencies() -git_repository( +http_archive( name = "googletest", - commit = "703bd9caab50b139428cea1aaff9974ebee5742e", - remote = "https://github.com/google/googletest", - shallow_since = "1570114335 -0400", + sha256 = "755f9a39bc7205f5a0c428e920ddad092c33c8a1b46997def3f1d4a82aded6e1", + strip_prefix = "googletest-5ab508a01f9eb089207ee87fd547d290da39d015", + urls = ["https://github.com/google/googletest/archive/5ab508a01f9eb089207ee87fd547d290da39d015.zip"], ) # External dependency for torch_tensorrt if you already have precompiled binaries. @@ -127,7 +129,13 @@ http_archive( # Development Dependencies (optional - comment out on aarch64) ######################################################################### -pip_install( +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( name = "devtools_deps", requirements = "//:requirements-dev.txt", ) + +load("@devtools_deps//:requirements.bzl", "install_deps") + +install_deps() diff --git a/docker/Dockerfile b/docker/Dockerfile index d16dfc1887..aa3623f32b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,3 +1,5 @@ +# syntax=docker/dockerfile:1 + # Base image starts with CUDA ARG BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04 FROM ${BASE_IMG} as base @@ -50,7 +52,7 @@ RUN apt-get update RUN apt-get install -y libnvinfer8=${TENSORRT_VERSION}.* libnvinfer-plugin8=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers8=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.* libnvparsers8=${TENSORRT_VERSION}.* libnvparsers-dev=${TENSORRT_VERSION}.* # Setup Bazel via Bazelisk -RUN wget -q https://github.com/bazelbuild/bazelisk/releases/download/v1.16.0/bazelisk-linux-amd64 -O /usr/bin/bazel &&\ +RUN wget -q https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 -O /usr/bin/bazel &&\ chmod a+x /usr/bin/bazel # Build Torch-TensorRT in an auxillary container @@ -99,12 +101,12 @@ COPY . /opt/torch_tensorrt RUN mkdir -p "/opt/python3/" &&\ ln -s "`pyenv which python | xargs dirname | xargs dirname`/lib/python$PYTHON_VERSION/site-packages" "/opt/python3/" -COPY --from=torch-tensorrt-builder /workspace/torch_tensorrt/src/py/dist/ . +COPY --from=torch-tensorrt-builder /workspace/torch_tensorrt/src/dist/ . RUN cp /opt/torch_tensorrt/docker/WORKSPACE.docker /opt/torch_tensorrt/WORKSPACE RUN pip install -r /opt/torch_tensorrt/py/requirements.txt RUN pip install tensorrt==${TENSORRT_VERSION}.* -RUN pip install *.whl && rm -fr /workspace/torch_tensorrt/py/dist/* *.whl +RUN pip install *.whl && rm -fr /workspace/torch_tensorrt/dist/* *.whl WORKDIR /opt/torch_tensorrt diff --git a/docker/Dockerfile.docs b/docker/Dockerfile.docs index 05429435ec..47f1492433 100644 --- a/docker/Dockerfile.docs +++ b/docker/Dockerfile.docs @@ -1,6 +1,6 @@ FROM nvcr.io/nvidia/tensorrt:22.06-py3 -ARG BAZEL_VERSION=5.2.0 +ARG BAZEL_VERSION=6.2.1 RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add - RUN echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list diff --git a/docker/WORKSPACE.docker b/docker/WORKSPACE.docker index dd5ff3cd9a..b4da144ddc 100755 --- a/docker/WORKSPACE.docker +++ b/docker/WORKSPACE.docker @@ -1,22 +1,24 @@ workspace(name = "Torch-TensorRT") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") http_archive( name = "rules_python", - sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz", + sha256 = "863ba0fa944319f7e3d695711427d9ad80ba92c6edd0b7c7443b84e904689539", + strip_prefix = "rules_python-0.22.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.22.0/rules_python-0.22.0.tar.gz", ) -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() http_archive( name = "rules_pkg", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", ], ) @@ -24,13 +26,14 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") rules_pkg_dependencies() -git_repository( +http_archive( name = "googletest", - commit = "703bd9caab50b139428cea1aaff9974ebee5742e", - remote = "https://github.com/google/googletest", - shallow_since = "1570114335 -0400", + sha256 = "755f9a39bc7205f5a0c428e920ddad092c33c8a1b46997def3f1d4a82aded6e1", + strip_prefix = "googletest-5ab508a01f9eb089207ee87fd547d290da39d015", + urls = ["https://github.com/google/googletest/archive/5ab508a01f9eb089207ee87fd547d290da39d015.zip"], ) + # CUDA should be installed on the system locally new_local_repository( name = "cuda", @@ -79,7 +82,14 @@ new_local_repository( ######################################################################### # Testing Dependencies (optional - comment out on aarch64) ######################################################################### -pip_install( - name = "pylinter_deps", - requirements = "//tools/linter:requirements.txt", + +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "devtools_deps", + requirements = "//:requirements-dev.txt", ) + +load("@devtools_deps//:requirements.bzl", "install_deps") + +install_deps() diff --git a/docker/dist-build.sh b/docker/dist-build.sh index 4c0f4d6b7c..0bc2fc09f9 100755 --- a/docker/dist-build.sh +++ b/docker/dist-build.sh @@ -3,22 +3,22 @@ TOP_DIR=$(cd $(dirname $0); pwd)/.. if [[ -z "${USE_CXX11}" ]]; then - BUILD_CMD="python setup.py bdist_wheel" + BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -w dist" else - BUILD_CMD="python setup.py bdist_wheel --use-cxx11-abi" + BUILD_CMD="python -m pip wheel . --config-setting="--build-option=--use-cxx11-abi" --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -w dist" fi +# TensorRT restricts our pip version cd ${TOP_DIR} \ - && mkdir -p dist && cd py \ - && pip install -r requirements.txt \ - && pip install wheel + && python -m pip install "pip<=23.1" wheel \ + && python -m pip install -r py/requirements.txt # Build Torch-TRT -MAX_JOBS=1 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 ${BUILD_CMD} $* || exit 1 +MAX_JOBS=4 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 ${BUILD_CMD} $* || exit 1 -pip3 install ipywidgets --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org +python -m pip install ipywidgets --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org jupyter nbextension enable --py widgetsnbextension -pip3 install timm +python -m pip install timm # test install -pip3 uninstall -y torch_tensorrt && pip3 install ${TOP_DIR}/py/dist/*.whl +python -m pip uninstall -y torch_tensorrt && python -m pip install ${TOP_DIR}/dist/*.whl diff --git a/py/Dockerfile b/py/Dockerfile deleted file mode 100644 index 3852b2ab4e..0000000000 --- a/py/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM pytorch/manylinux-builder:cuda11.6 - -RUN yum install -y ninja-build - -RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64 \ - && mv bazelisk-linux-amd64 /usr/bin/bazel \ - && chmod +x /usr/bin/bazel - -RUN mkdir /workspace - -WORKDIR /workspace diff --git a/py/build_whl.sh b/py/build_whl.sh deleted file mode 100755 index 38de2aa02b..0000000000 --- a/py/build_whl.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Example usage: docker run -it -v$(pwd)/..:/workspace/TRTorch build_trtorch_wheel /bin/bash /workspace/TRTorch/py/build_whl.sh - -cd /workspace/Torch-TensorRT/py - -export CXX=g++ -export CUDA_HOME=/usr/local/cuda-11.3 - -build_py37() { - /opt/python/cp37-cp37m/bin/python -m pip install -r requirements.txt - /opt/python/cp37-cp37m/bin/python setup.py bdist_wheel --release - #auditwheel repair --plat manylinux2014_x86_64 -} - -build_py38() { - /opt/python/cp38-cp38/bin/python -m pip install -r requirements.txt - /opt/python/cp38-cp38/bin/python setup.py bdist_wheel --release - #auditwheel repair --plat manylinux2014_x86_64 -} - -build_py39() { - /opt/python/cp39-cp39/bin/python -m pip install -r requirements.txt - /opt/python/cp39-cp39/bin/python setup.py bdist_wheel --release - #auditwheel repair --plat manylinux2014_x86_64 -} - -build_py310() { - /opt/python/cp310-cp310/bin/python -m pip install -r requirements.txt - /opt/python/cp310-cp310/bin/python setup.py bdist_wheel --release - #auditwheel repair --plat manylinux2014_x86_64 -} - -#build_py311() { -# /opt/python/cp311-cp311/bin/python -m pip install -r requirements.txt -# /opt/python/cp311-cp311/bin/python setup.py bdist_wheel --release - #auditwheel repair --plat manylinux2014_x86_64 -#} - -build_py37 -build_py38 -build_py39 -build_py310 -#build_py311 diff --git a/py/ci/Dockerfile.ci b/py/ci/Dockerfile.ci index 918eaaef07..96f2b90200 100644 --- a/py/ci/Dockerfile.ci +++ b/py/ci/Dockerfile.ci @@ -1,11 +1,11 @@ -FROM pytorch/manylinux-builder:cuda11.7 +FROM pytorch/manylinux-builder:cuda12.1 ARG trt_version RUN echo -e "Installing with TensorRT ${trt_version}" RUN yum install -y ninja-build tensorrt-${trt_version}.* -RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64 \ +RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \ && mv bazelisk-linux-amd64 /usr/bin/bazel \ && chmod +x /usr/bin/bazel diff --git a/py/ci/build_manifest.txt b/py/ci/build_manifest.txt index 7f1ff88237..1cb0277187 100644 --- a/py/ci/build_manifest.txt +++ b/py/ci/build_manifest.txt @@ -1,4 +1,5 @@ py38 py39 py310 +py311 libtorchtrt_pre_cxx11_abi diff --git a/py/ci/build_whl.sh b/py/ci/build_whl.sh index 3249900318..0998e87384 100755 --- a/py/ci/build_whl.sh +++ b/py/ci/build_whl.sh @@ -8,70 +8,73 @@ export PROJECT_DIR=/workspace/project cp -r $CUDA_HOME /usr/local/cuda -py37() { - cd /workspace/project/py - PY_BUILD_CODE=cp37-cp37m - PY_VERSION=3.7 - PY_NAME=python${PY_VERSION} - PY_DIR=/opt/python/${PY_BUILD_CODE} - PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ - ${PY_DIR}/bin/python -m pip install --upgrade pip - ${PY_DIR}/bin/python -m pip install -r requirements.txt - ${PY_DIR}/bin/python -m pip install setuptools wheel auditwheel - ${PY_DIR}/bin/python setup.py bdist_wheel --release --ci - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${PY_PKG_DIR}/torch/lib:${PY_PKG_DIR}/tensorrt/:${CUDA_HOME}/lib64:${CUDA_HOME}/lib64/stubs ${PY_DIR}/bin/python -m auditwheel repair $(cat ${PROJECT_DIR}/py/ci/soname_excludes.params) --plat manylinux_2_17_x86_64 dist/torch_tensorrt-*-${PY_BUILD_CODE}-linux_x86_64.whl +build_wheel() { + $1/bin/python -m pip install --upgrade pip + $1/bin/python -m pip wheel . --config-setting="--build-option=--release" --config-setting="--build-option=--ci" -w dist +} + +patch_wheel() { + $2/bin/python -m pip install auditwheel + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$1/torch/lib:$1/tensorrt/:${CUDA_HOME}/lib64:${CUDA_HOME}/lib64/stubs $2/bin/python -m auditwheel repair $(cat ${PROJECT_DIR}/py/ci/soname_excludes.params) --plat manylinux_2_34_x86_64 dist/torch_tensorrt-*-$3-linux_x86_64.whl } py38() { - cd /workspace/project/py + cd /workspace/project PY_BUILD_CODE=cp38-cp38 PY_VERSION=3.8 PY_NAME=python${PY_VERSION} PY_DIR=/opt/python/${PY_BUILD_CODE} PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ - ${PY_DIR}/bin/python -m pip install --upgrade pip - ${PY_DIR}/bin/python -m pip install -r requirements.txt - ${PY_DIR}/bin/python -m pip install setuptools wheel auditwheel - ${PY_DIR}/bin/python setup.py bdist_wheel --release --ci - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${PY_PKG_DIR}/torch/lib:${PY_PKG_DIR}/tensorrt/:${CUDA_HOME}/lib64:${CUDA_HOME}/lib64/stubs ${PY_DIR}/bin/python -m auditwheel repair $(cat ${PROJECT_DIR}/py/ci/soname_excludes.params) --plat manylinux_2_17_x86_64 dist/torch_tensorrt-*-${PY_BUILD_CODE}-linux_x86_64.whl + build_wheel ${PY_DIR} + patch_wheel ${PY_PKG_DIR} ${PY_DIR} ${PY_BUILD_CODE} } py39() { - cd /workspace/project/py + cd /workspace/project PY_BUILD_CODE=cp39-cp39 PY_VERSION=3.9 PY_NAME=python${PY_VERSION} PY_DIR=/opt/python/${PY_BUILD_CODE} PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ - ${PY_DIR}/bin/python -m pip install --upgrade pip - ${PY_DIR}/bin/python -m pip install -r requirements.txt - ${PY_DIR}/bin/python -m pip install setuptools wheel auditwheel - ${PY_DIR}/bin/python setup.py bdist_wheel --release --ci - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${PY_PKG_DIR}/torch/lib:${PY_PKG_DIR}/tensorrt/:${CUDA_HOME}/lib64:${CUDA_HOME}/lib64/stubs ${PY_DIR}/bin/python -m auditwheel repair $(cat ${PROJECT_DIR}/py/ci/soname_excludes.params) --plat manylinux_2_17_x86_64 dist/torch_tensorrt-*-${PY_BUILD_CODE}-linux_x86_64.whl + build_wheel ${PY_DIR} + patch_wheel ${PY_PKG_DIR} ${PY_DIR} ${PY_BUILD_CODE} } py310() { - cd /workspace/project/py + cd /workspace/project PY_BUILD_CODE=cp310-cp310 PY_VERSION=3.10 PY_NAME=python${PY_VERSION} PY_DIR=/opt/python/${PY_BUILD_CODE} PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ - ${PY_DIR}/bin/python -m pip install --upgrade pip - ${PY_DIR}/bin/python -m pip install -r requirements.txt - ${PY_DIR}/bin/python -m pip install setuptools wheel auditwheel - ${PY_DIR}/bin/python setup.py bdist_wheel --release --ci - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${PY_PKG_DIR}/torch/lib:${PY_PKG_DIR}/tensorrt/:${CUDA_HOME}/lib64:${CUDA_HOME}/lib64/stubs ${PY_DIR}/bin/python -m auditwheel repair $(cat ${PROJECT_DIR}/py/ci/soname_excludes.params) --plat manylinux_2_17_x86_64 dist/torch_tensorrt-*-${PY_BUILD_CODE}-linux_x86_64.whl + build_wheel ${PY_DIR} + patch_wheel ${PY_PKG_DIR} ${PY_DIR} ${PY_BUILD_CODE} } -#build_py311() { -# /opt/python/cp311-cp311/bin/python -m pip install -r requirements.txt -# /opt/python/cp311-cp311/bin/python setup.py bdist_wheel --release --ci -# #auditwheel repair --plat manylinux2014_x86_64 -#} +py311() { + cd /workspace/project + PY_BUILD_CODE=cp311-cp311 + PY_VERSION=3.11 + PY_NAME=python${PY_VERSION} + PY_DIR=/opt/python/${PY_BUILD_CODE} + PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ + build_wheel ${PY_DIR} + patch_wheel ${PY_PKG_DIR} ${PY_DIR} ${PY_BUILD_CODE} +} + +py312() { + cd /workspace/project + PY_BUILD_CODE=cp312-cp312 + PY_VERSION=3.12 + PY_NAME=python${PY_VERSION} + PY_DIR=/opt/python/${PY_BUILD_CODE} + PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ + build_wheel ${PY_DIR} + patch_wheel ${PY_PKG_DIR} ${PY_DIR} ${PY_BUILD_CODE} +} libtorchtrt() { - cd /workspace/project/py + cd /workspace/project mkdir -p /workspace/project/py/wheelhouse PY_BUILD_CODE=cp310-cp310 PY_VERSION=3.10 @@ -79,13 +82,13 @@ libtorchtrt() { PY_DIR=/opt/python/${PY_BUILD_CODE} PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ ${PY_DIR}/bin/python -m pip install --upgrade pip - ${PY_DIR}/bin/python -m pip install -r requirements.txt + ${PY_DIR}/bin/python -m pip install -r py/requirements.txt ${PY_DIR}/bin/python -m pip install setuptools wheel auditwheel bazel build //:libtorchtrt --platforms //toolchains:ci_rhel_x86_64_linux -c opt --noshow_progress - CUDA_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __cuda_version__;print(__cuda_version__)") - TORCHTRT_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __version__;print(__version__)") - TRT_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __tensorrt_version__;print(__tensorrt_version__)") - CUDNN_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __cudnn_version__;print(__cudnn_version__)") + CUDA_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cuda_version()") + TORCHTRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.torch_tensorrt_version()") + TRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.tensorrt_version()") + CUDNN_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cudnn_version()") TORCH_VERSION=$(${PY_DIR}/bin/python -c "from torch import __version__;print(__version__.split('+')[0])") cp ${PROJECT_DIR}/bazel-bin/libtorchtrt.tar.gz ${PROJECT_DIR}/py/wheelhouse/libtorchtrt-${TORCHTRT_VERSION}-cudnn${CUDNN_VERSION}-tensorrt${TRT_VERSION}-cuda${CUDA_VERSION}-libtorch${TORCH_VERSION}-x86_64-linux.tar.gz } @@ -99,13 +102,13 @@ libtorchtrt_pre_cxx11_abi() { PY_DIR=/opt/python/${PY_BUILD_CODE} PY_PKG_DIR=${PY_DIR}/lib/${PY_NAME}/site-packages/ ${PY_DIR}/bin/python -m pip install --upgrade pip - ${PY_DIR}/bin/python -m pip install -r requirements.txt + ${PY_DIR}/bin/python -m pip install -r ${PROJECT_DIR}/py/requirements.txt ${PY_DIR}/bin/python -m pip install setuptools wheel auditwheel bazel build //:libtorchtrt --config pre_cxx11_abi --platforms //toolchains:ci_rhel_x86_64_linux -c opt --noshow_progress - CUDA_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __cuda_version__;print(__cuda_version__)") - TORCHTRT_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __version__;print(__version__)") - TRT_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __tensorrt_version__;print(__tensorrt_version__)") - CUDNN_VERSION=$(cd ${PROJECT_DIR}/py && ${PY_DIR}/bin/python3 -c "from versions import __cudnn_version__;print(__cudnn_version__)") + CUDA_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cuda_version()") + TORCHTRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.torch_tensorrt_version()") + TRT_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.tensorrt_version()") + CUDNN_VERSION=$(cd ${PROJECT_DIR} && ${PY_DIR}/bin/python3 -c "import versions; versions.cudnn_version()") TORCH_VERSION=$(${PY_DIR}/bin/python -c "from torch import __version__;print(__version__.split('+')[0])") cp ${PROJECT_DIR}/bazel-bin/libtorchtrt.tar.gz ${PROJECT_DIR}/py/wheelhouse/libtorchtrt-${TORCHTRT_VERSION}-pre-cxx11-abi-cudnn${CUDNN_VERSION}-tensorrt${TRT_VERSION}-cuda${CUDA_VERSION}-libtorch${TORCH_VERSION}-x86_64-linux.tar.gz } diff --git a/py/ci/soname_excludes.params b/py/ci/soname_excludes.params index 7b3e802f26..a5eecb7c9a 100644 --- a/py/ci/soname_excludes.params +++ b/py/ci/soname_excludes.params @@ -31,3 +31,9 @@ --exclude libcublasLt.so.11 --exclude libnvinfer.so.8 --exclude libcudnn.so.8 +--exclude libcublas.so.12 +--exclude libcublasLt.so.12 +--exclude libcublas.so.12.1.3.1 +--exclude libcublasLt.so.12.1.3.1 +--exclude libcudart.so.11.8.89 +--exclude libcudart.so.11 \ No newline at end of file diff --git a/py/requirements.txt b/py/requirements.txt index 542e837215..5c91e3684e 100644 --- a/py/requirements.txt +++ b/py/requirements.txt @@ -6,3 +6,4 @@ torch==2.1.0.dev20230703+cu121 torchvision==0.16.0.dev20230703+cu121 --extra-index-url https://pypi.ngc.nvidia.com tensorrt==8.6.1 +pyyaml \ No newline at end of file diff --git a/py/torch_tensorrt/dynamo/conversion/SourceIR.py b/py/torch_tensorrt/dynamo/_SourceIR.py similarity index 100% rename from py/torch_tensorrt/dynamo/conversion/SourceIR.py rename to py/torch_tensorrt/dynamo/_SourceIR.py diff --git a/py/torch_tensorrt/dynamo/__init__.py b/py/torch_tensorrt/dynamo/__init__.py index 5918bad806..cf9344d94a 100644 --- a/py/torch_tensorrt/dynamo/__init__.py +++ b/py/torch_tensorrt/dynamo/__init__.py @@ -10,3 +10,4 @@ dynamo_tensorrt_converter, ) from .compile import compile + from ._SourceIR import SourceIR diff --git a/py/torch_tensorrt/dynamo/conversion/__init__.py b/py/torch_tensorrt/dynamo/conversion/__init__.py index d201665a5b..16b7f61bca 100644 --- a/py/torch_tensorrt/dynamo/conversion/__init__.py +++ b/py/torch_tensorrt/dynamo/conversion/__init__.py @@ -1,4 +1,3 @@ -from .SourceIR import SourceIR from .aten_ops_converters import * from .trt_interpreter import * from .conversion import * diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py index 38f8692852..6cb3a30abb 100644 --- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py +++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py @@ -7,7 +7,8 @@ from torch.fx.node import Argument, Target, Node from torch_tensorrt.fx.types import TRTNetwork, TRTTensor -from torch_tensorrt.dynamo.conversion import SourceIR, impl +from torch_tensorrt.dynamo._SourceIR import SourceIR +from torch_tensorrt.dynamo.conversion import impl from torch_tensorrt.dynamo.conversion.converter_utils import cast_trt_tensor from torch_tensorrt.dynamo.conversion.converter_utils import cast_int_int_div_trt_tensor diff --git a/py/torch_tensorrt/dynamo/conversion/impl/activation.py b/py/torch_tensorrt/dynamo/conversion/impl/activation.py index ec3e078820..6a15454f54 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/activation.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/activation.py @@ -6,13 +6,13 @@ import torch from torch.fx.node import Target +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.impl.activation import * from torch_tensorrt.fx.converters.converter_utils import ( mark_as_int8_layer, set_layer_name, get_trt_plugin, ) -from torch_tensorrt.dynamo.conversion import SourceIR from torch_tensorrt.fx.types import ( TRTNetwork, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py index 79472fa2e7..803e60a2b9 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py @@ -6,7 +6,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.dynamo.conversion.converter_utils import broadcastable from torch_tensorrt.fx.converters.converter_utils import ( broadcast, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/base.py b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/base.py index 9b15ebd4c4..a8e4067493 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/base.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/base.py @@ -11,7 +11,7 @@ unified_dtype_converter, Frameworks, ) -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.dynamo.conversion.converter_utils import ( cast_trt_tensor, ) diff --git a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/clamp.py b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/clamp.py index 59e1b0f723..8fc9df586c 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/clamp.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/clamp.py @@ -3,7 +3,7 @@ import tensorrt as trt from torch.fx.node import Target -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.utils import ( unified_dtype_converter, Frameworks, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py index 089fcf223c..a1ec485c31 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py @@ -8,7 +8,7 @@ unified_dtype_converter, Frameworks, ) -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import get_trt_tensor from torch_tensorrt.dynamo.conversion.impl.elementwise.base import ( diff --git a/py/torch_tensorrt/dynamo/conversion/impl/embedding.py b/py/torch_tensorrt/dynamo/conversion/impl/embedding.py index a68d2455ee..7e914a1d89 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/embedding.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/embedding.py @@ -8,10 +8,9 @@ import torch from torch.fx.node import Target +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.types import TRTNetwork, TRTTensor - from torch_tensorrt.fx.converters.converter_utils import ( - SourceIR, set_layer_name, ) diff --git a/py/torch_tensorrt/dynamo/conversion/impl/matmul.py b/py/torch_tensorrt/dynamo/conversion/impl/matmul.py index 846f4ab2ee..9907d3e40d 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/matmul.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/matmul.py @@ -9,7 +9,7 @@ unified_dtype_converter, Frameworks, ) -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( get_trt_tensor, broadcast, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/normalization/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/normalization/ops.py index 9d193fdf92..70f71055d1 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/normalization/ops.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/normalization/ops.py @@ -10,7 +10,7 @@ from torch_tensorrt.fx.types import TRTNetwork, TRTTensor from torch_tensorrt.fx.utils import get_dynamic_dims -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( get_trt_plugin, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/permutation.py b/py/torch_tensorrt/dynamo/conversion/impl/permutation.py index 492e35ba97..34fc36de0e 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/permutation.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/permutation.py @@ -4,7 +4,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( set_layer_name, get_positive_dim, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/select.py b/py/torch_tensorrt/dynamo/conversion/impl/select.py index 26ad175104..ae8a72592a 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/select.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/select.py @@ -4,7 +4,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor, Shape -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( get_positive_dim, has_dynamic_shape, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/shape.py b/py/torch_tensorrt/dynamo/conversion/impl/shape.py index 7f122f5646..aff161c560 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/shape.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/shape.py @@ -7,7 +7,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( set_layer_name, to_numpy, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/slice/base.py b/py/torch_tensorrt/dynamo/conversion/impl/slice/base.py index 97cc0d1404..71a31b746e 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/slice/base.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/slice/base.py @@ -3,7 +3,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor, Shape -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( has_dynamic_shape, set_layer_name, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/slice/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/slice/ops.py index 848e13ba4b..1405486b4a 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/slice/ops.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/slice/ops.py @@ -4,7 +4,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor, Shape -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( get_positive_dim, has_dynamic_shape, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/squeeze.py b/py/torch_tensorrt/dynamo/conversion/impl/squeeze.py index 4c5ad200ad..16a086754e 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/squeeze.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/squeeze.py @@ -3,7 +3,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( get_positive_dim, set_layer_name, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/unary/base.py b/py/torch_tensorrt/dynamo/conversion/impl/unary/base.py index 0ee1185850..b738b05591 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/unary/base.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/unary/base.py @@ -7,7 +7,7 @@ TRTNetwork, TRTTensor, ) -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import set_layer_name diff --git a/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py index e0a255f800..c1d490104d 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py @@ -8,7 +8,7 @@ TRTTensor, ) -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.dynamo.conversion.impl.elementwise.base import ( diff --git a/py/torch_tensorrt/dynamo/conversion/impl/unsqueeze.py b/py/torch_tensorrt/dynamo/conversion/impl/unsqueeze.py index d1559ef324..d67f790701 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/unsqueeze.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/unsqueeze.py @@ -3,7 +3,7 @@ from torch.fx.node import Target from torch_tensorrt.fx.types import TRTNetwork, TRTTensor -from torch_tensorrt.dynamo.conversion import SourceIR +from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.fx.converters.converter_utils import ( get_positive_dim, get_trt_tensor, diff --git a/py/torch_tensorrt/dynamo/lowering/substitutions/einsum.py b/py/torch_tensorrt/dynamo/lowering/substitutions/einsum.py index c4a29b507e..e1c7a5d68e 100644 --- a/py/torch_tensorrt/dynamo/lowering/substitutions/einsum.py +++ b/py/torch_tensorrt/dynamo/lowering/substitutions/einsum.py @@ -33,6 +33,7 @@ def einsum_generic( ) +# TODO: @gs-olive Port to dynamo converter @tensorrt_converter(torch.ops.tensorrt.einsum.default) def aten_ops_einsum( network: TRTNetwork, diff --git a/py/versions.py b/py/versions.py deleted file mode 100644 index c114d09325..0000000000 --- a/py/versions.py +++ /dev/null @@ -1,4 +0,0 @@ -__version__ = "2.0.0.dev0" -__cuda_version__ = "12.1" -__cudnn_version__ = "8.8" -__tensorrt_version__ = "8.6" diff --git a/pyproject.toml b/pyproject.toml index 3b54e05791..ae130dc08a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,25 +1,79 @@ [build-system] requires = [ - "setuptools", - "wheel", + "setuptools>=68.0.0", + "packaging>=23.1", + "wheel>=0.40.0", + "ninja>=1.11.0", + "pyyaml>=6.0", + "cffi>=1.15.1", + "typing-extensions>=4.7.0", + "future>=0.18.3", + "tensorrt>=8.6,<8.7", + #"torch >=2.0.0,<2.1.0", + "torch==2.1.0.dev20230703+cu121", + "pybind11==2.6.2", "numpy", - "ninja", - "pyyaml", - "setuptools", - "cffi", - "typing_extensions", - "future", - "tensorrt >=8.6,<8.7" ] +build-backend = "setuptools.build_meta" -# Use legacy backend to import local packages in setup.py -#build-backend = "setuptools.build_meta:__legacy__" +[project] +name = "torch_tensorrt" +authors = [ + {name="NVIDIA Corporation", email="narens@nvidia.com"} +] +description = "Torch-TensorRT is a package which allows users to automatically compile PyTorch and TorchScript modules to TensorRT while remaining in PyTorch" +license = {file = "LICENSE"} +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Environment :: GPU :: NVIDIA CUDA", + "License :: OSI Approved :: BSD License", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Operating System :: POSIX :: Linux", + "Programming Language :: C++", + "Programming Language :: Python", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", +] +readme = {file = "py/README.md", content-type = "text/markdown"} +requires-python = ">=3.8" +keywords = ["pytorch", "torch", "tensorrt", "trt", "ai", "artificial intelligence", "ml", "machine learning", "dl", "deep learning", "compiler", "dynamo", "torchscript", "inference"] +dependencies = [ + #"torch>=2.0.0,<2.1.0", + "torch==2.1.0.dev20230703+cu121", + "tensorrt>=8.6,<8.7", + "packaging>=23", + "numpy", + "typing-extensions>=4.7.0", +] +dynamic = ["version"] + +[project.optional-dependencies] +torchvision = ["torchvision >=0.16.dev,<0.17.0"] + +[project.urls] +Homepage = "https://pytorch.org/tensorrt" +Documentation = "https://pytorch.org/tensorrt" +Repository = "https://github.com/pytorch/tensorrt.git" +Changelog = "https://github.com/pytorch/tensorrt/releases" [tool.black] # Uncomment if pyproject.toml worked fine to ensure consistency with flake8 # line-length = 120 -target-versions = ["py38", "py39", "py310"] +target-versions = ["py38", "py39", "py310", "py311", "py312"] force-exclude = """ elu_converter/setup.py """ + +[tool.mypy] +show_error_codes = true +disable_error_code = "attr-defined" +no_implicit_optional = true + +[tool.setuptools] +package-dir = {"" = "py"} +include-package-data = false \ No newline at end of file diff --git a/py/setup.py b/setup.py similarity index 50% rename from py/setup.py rename to setup.py index e07e904f87..8e5c5330b7 100644 --- a/py/setup.py +++ b/setup.py @@ -1,11 +1,14 @@ import os import sys import glob +import yaml +from dataclasses import dataclass import setuptools from setuptools import setup, Extension, find_namespace_packages from setuptools.command.build_ext import build_ext from setuptools.command.develop import develop from setuptools.command.install import install +from setuptools.command.editable_wheel import editable_wheel from distutils.cmd import Command from wheel.bdist_wheel import bdist_wheel @@ -16,27 +19,36 @@ import platform import warnings -from versions import ( - __version__, - __cuda_version__, - __cudnn_version__, - __tensorrt_version__, -) - -dir_path = os.path.dirname(os.path.realpath(__file__)) +dir_path = os.path.dirname(os.path.realpath(__file__)) + "/py" CXX11_ABI = False - JETPACK_VERSION = None - FX_ONLY = False - LEGACY = False - RELEASE = False - CI_RELEASE = False +__version__: str = "0.0.0" +__cuda_version__: str = "0.0" +__cudnn_version__: str = "0.0" +__tensorrt_version__: str = "0.0" + + +def load_version_info(): + global __version__ + global __cuda_version__ + global __cudnn_version__ + global __tensorrt_version__ + with open("versions.yml", "r") as stream: + versions = yaml.safe_load(stream) + __version__ = versions["__version__"] + __cuda_version__ = versions["__cuda_version__"] + __cudnn_version__ = versions["__cudnn_version__"] + __tensorrt_version__ = versions["__tensorrt_version__"] + + +load_version_info() + def get_git_revision_short_hash() -> str: return ( @@ -182,7 +194,9 @@ def copy_libtorchtrt(multilinux=False): ) else: os.system( - "tar -xzf ../bazel-bin/libtorchtrt.tar.gz --strip-components=2 -C " + "tar -xzf " + + dir_path + + "/../bazel-bin/libtorchtrt.tar.gz --strip-components=1 -C " + dir_path + "/torch_tensorrt" ) @@ -247,6 +261,27 @@ def run(self): bdist_wheel.run(self) +class EditableWheelCommand(editable_wheel): + description = "Builds the package in development mode" + + def initialize_options(self): + editable_wheel.initialize_options(self) + + def finalize_options(self): + editable_wheel.finalize_options(self) + + def run(self): + if FX_ONLY: + gen_version_file() + editable_wheel.run(self) + else: + global CXX11_ABI + build_libtorchtrt_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI) + gen_version_file() + copy_libtorchtrt() + editable_wheel.run(self) + + class CleanCommand(Command): """Custom clean command to tidy up the project root.""" @@ -299,171 +334,196 @@ def run(self): os.remove(path) -ext_modules = [ - cpp_extension.CUDAExtension( - "torch_tensorrt._C", - [ - "torch_tensorrt/csrc/torch_tensorrt_py.cpp", - "torch_tensorrt/csrc/tensorrt_backend.cpp", - "torch_tensorrt/csrc/tensorrt_classes.cpp", - "torch_tensorrt/csrc/register_tensorrt_classes.cpp", - ], - library_dirs=[ - (dir_path + "/torch_tensorrt/lib/"), - "/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu", - ], - libraries=["torchtrt"], - include_dirs=[ - dir_path + "torch_tensorrt/csrc", - dir_path + "torch_tensorrt/include", - dir_path + "/../bazel-TRTorch/external/tensorrt/include", - dir_path + "/../bazel-Torch-TensorRT/external/tensorrt/include", - dir_path + "/../bazel-TensorRT/external/tensorrt/include", - dir_path + "/../bazel-tensorrt/external/tensorrt/include", - dir_path + "/../", - ], - extra_compile_args=[ - "-Wno-deprecated", - "-Wno-deprecated-declarations", - ] - + ( - ["-D_GLIBCXX_USE_CXX11_ABI=1"] - if CXX11_ABI - else ["-D_GLIBCXX_USE_CXX11_ABI=0"] - ), - extra_link_args=[ - "-Wno-deprecated", - "-Wno-deprecated-declarations", - "-Wl,--no-as-needed", - "-ltorchtrt", - "-Wl,-rpath,$ORIGIN/lib", - "-lpthread", - "-ldl", - "-lutil", - "-lrt", - "-lm", - "-Xlinker", - "-export-dynamic", - ] - + ( - ["-D_GLIBCXX_USE_CXX11_ABI=1"] - if CXX11_ABI - else ["-D_GLIBCXX_USE_CXX11_ABI=0"] - ), - undef_macros=["NDEBUG"], - ) +ext_modules = [] + +packages = [ + "torch_tensorrt", + "torch_tensorrt.dynamo", + "torch_tensorrt.dynamo.backend", + "torch_tensorrt.dynamo.conversion", + "torch_tensorrt.dynamo.conversion.impl", + "torch_tensorrt.dynamo.conversion.impl.condition", + "torch_tensorrt.dynamo.conversion.impl.elementwise", + "torch_tensorrt.dynamo.conversion.impl.normalization", + "torch_tensorrt.dynamo.conversion.impl.slice", + "torch_tensorrt.dynamo.conversion.impl.unary", + "torch_tensorrt.dynamo.lowering", + "torch_tensorrt.dynamo.lowering.substitutions", + "torch_tensorrt.dynamo.runtime", + "torch_tensorrt.fx", + "torch_tensorrt.fx.converters", + "torch_tensorrt.fx.converters.impl", + "torch_tensorrt.fx.passes", + "torch_tensorrt.fx.tools", + "torch_tensorrt.fx.tracer", + "torch_tensorrt.fx.tracer.acc_tracer", + "torch_tensorrt.fx.tracer.dispatch_tracer", ] -if FX_ONLY: - ext_modules = None - packages = [ - "torch_tensorrt.fx", - "torch_tensorrt.fx.converters", - "torch_tensorrt.fx.passes", - "torch_tensorrt.fx.tools", - "torch_tensorrt.fx.tracer.acc_tracer", - "torch_tensorrt.fx.tracer.dispatch_tracer", - "torch_tensorrt.dynamo", - "torch_tensorrt.dynamo.fx_ts_compat", - "torch_tensorrt.dynamo.fx_ts_compat.passes", - "torch_tensorrt.dynamo.fx_ts_compat.tools", - ] - package_dir = { - "torch_tensorrt.fx": "torch_tensorrt/fx", - "torch_tensorrt.fx.converters": "torch_tensorrt/fx/converters", - "torch_tensorrt.fx.passes": "torch_tensorrt/fx/passes", - "torch_tensorrt.fx.tools": "torch_tensorrt/fx/tools", - "torch_tensorrt.fx.tracer.acc_tracer": "torch_tensorrt/fx/tracer/acc_tracer", - "torch_tensorrt.fx.tracer.dispatch_tracer": "torch_tensorrt/fx/tracer/dispatch_tracer", - "torch_tensorrt.dynamo": "torch_tensorrt/dynamo", - "torch_tensorrt.dynamo.fx_ts_compat": "torch_tensorrt/dynamo/fx_ts_compat", - "torch_tensorrt.dynamo.fx_ts_compat.passes": "torch_tensorrt/dynamo/fx_ts_compat/passes", - "torch_tensorrt.dynamo.fx_ts_compat.tools": "torch_tensorrt/dynamo/fx_ts_compat/tools", - } -with open("README.md", "r", encoding="utf-8") as fh: - long_description = fh.read() +package_dir = { + "torch_tensorrt": "py/torch_tensorrt", + "torch_tensorrt.dynamo": "py/torch_tensorrt/dynamo", + "torch_tensorrt.dynamo.backend": "py/torch_tensorrt/dynamo/backend", + "torch_tensorrt.dynamo.conversion": "py/torch_tensorrt/dynamo/conversion", + "torch_tensorrt.dynamo.conversion.impl": "py/torch_tensorrt/dynamo/conversion/impl", + "torch_tensorrt.dynamo.conversion.impl.condition": "py/torch_tensorrt/dynamo/conversion/impl/condition", + "torch_tensorrt.dynamo.conversion.impl.elementwise": "py/torch_tensorrt/dynamo/conversion/impl/elementwise", + "torch_tensorrt.dynamo.conversion.impl.normalization": "py/torch_tensorrt/dynamo/conversion/impl/normalization", + "torch_tensorrt.dynamo.conversion.impl.slice": "py/torch_tensorrt/dynamo/conversion/impl/slice", + "torch_tensorrt.dynamo.conversion.impl.unary": "py/torch_tensorrt/dynamo/conversion/impl/unary", + "torch_tensorrt.dynamo.lowering": "py/torch_tensorrt/dynamo/lowering", + "torch_tensorrt.dynamo.lowering.substitutions": "py/torch_tensorrt/dynamo/lowering/substitutions", + "torch_tensorrt.dynamo.runtime": "py/torch_tensorrt/dynamo/runtime", + "torch_tensorrt.fx": "py/torch_tensorrt/fx", + "torch_tensorrt.fx.converters": "py/torch_tensorrt/fx/converters", + "torch_tensorrt.fx.converters.impl": "py/torch_tensorrt/fx/converters/impl", + "torch_tensorrt.fx.passes": "py/torch_tensorrt/fx/passes", + "torch_tensorrt.fx.tools": "py/torch_tensorrt/fx/tools", + "torch_tensorrt.fx.tracer": "py/torch_tensorrt/fx/tracer", + "torch_tensorrt.fx.tracer.acc_tracer": "py/torch_tensorrt/fx/tracer/acc_tracer", + "torch_tensorrt.fx.tracer.dispatch_tracer": "py/torch_tensorrt/fx/tracer/dispatch_tracer", +} + +package_data = {} -if FX_ONLY: - package_data_list = [ - "_Input.py", +if not FX_ONLY: + ext_modules += [ + cpp_extension.CUDAExtension( + "torch_tensorrt._C", + [ + "py/" + f + for f in [ + "torch_tensorrt/csrc/torch_tensorrt_py.cpp", + "torch_tensorrt/csrc/tensorrt_backend.cpp", + "torch_tensorrt/csrc/tensorrt_classes.cpp", + "torch_tensorrt/csrc/register_tensorrt_classes.cpp", + ] + ], + library_dirs=[ + (dir_path + "/torch_tensorrt/lib/"), + "/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu", + ], + libraries=["torchtrt"], + include_dirs=[ + dir_path + "torch_tensorrt/csrc", + dir_path + "torch_tensorrt/include", + dir_path + "/../bazel-TRTorch/external/tensorrt/include", + dir_path + "/../bazel-Torch-TensorRT/external/tensorrt/include", + dir_path + "/../bazel-TensorRT/external/tensorrt/include", + dir_path + "/../bazel-tensorrt/external/tensorrt/include", + dir_path + "/../", + ], + extra_compile_args=[ + "-Wno-deprecated", + "-Wno-deprecated-declarations", + ] + + ( + ["-D_GLIBCXX_USE_CXX11_ABI=1"] + if CXX11_ABI + else ["-D_GLIBCXX_USE_CXX11_ABI=0"] + ), + extra_link_args=[ + "-Wno-deprecated", + "-Wno-deprecated-declarations", + "-Wl,--no-as-needed", + "-ltorchtrt", + "-Wl,-rpath,$ORIGIN/lib", + "-lpthread", + "-ldl", + "-lutil", + "-lrt", + "-lm", + "-Xlinker", + "-export-dynamic", + ] + + ( + ["-D_GLIBCXX_USE_CXX11_ABI=1"] + if CXX11_ABI + else ["-D_GLIBCXX_USE_CXX11_ABI=0"] + ), + undef_macros=["NDEBUG"], + ) ] -else: - package_data_list = [ - "lib/*", - "include/torch_tensorrt/*.h", - "include/torch_tensorrt/core/*.h", - "include/torch_tensorrt/core/conversion/*.h", - "include/torch_tensorrt/core/conversion/conversionctx/*.h", - "include/torch_tensorrt/core/conversion/converters/*.h", - "include/torch_tensorrt/core/conversion/evaluators/*.h", - "include/torch_tensorrt/core/conversion/tensorcontainer/*.h", - "include/torch_tensorrt/core/conversion/var/*.h", - "include/torch_tensorrt/core/ir/*.h", - "include/torch_tensorrt/core/lowering/*.h", - "include/torch_tensorrt/core/lowering/passes/*.h", - "include/torch_tensorrt/core/partitioning/*.h", - "include/torch_tensorrt/core/partitioning/segmentedblock/*.h", - "include/torch_tensorrt/core/partitioning/partitioninginfo/*.h", - "include/torch_tensorrt/core/partitioning/partitioningctx/*.h", - "include/torch_tensorrt/core/plugins/*.h", - "include/torch_tensorrt/core/plugins/impl/*.h", - "include/torch_tensorrt/core/runtime/*.h", - "include/torch_tensorrt/core/util/*.h", - "include/torch_tensorrt/core/util/logging/*.h", - "bin/*", - "BUILD", - "WORKSPACE", + + packages += [ + "torch_tensorrt.ts", ] + package_dir.update( + { + "torch_tensorrt.ts": "py/torch_tensorrt/ts", + } + ) + + package_data.update( + { + "torch_tensorrt": [ + "BUILD", + "WORKSPACE", + "include/torch_tensorrt/*.h", + "include/torch_tensorrt/core/*.h", + "include/torch_tensorrt/core/conversion/*.h", + "include/torch_tensorrt/core/conversion/conversionctx/*.h", + "include/torch_tensorrt/core/conversion/converters/*.h", + "include/torch_tensorrt/core/conversion/evaluators/*.h", + "include/torch_tensorrt/core/conversion/tensorcontainer/*.h", + "include/torch_tensorrt/core/conversion/var/*.h", + "include/torch_tensorrt/core/ir/*.h", + "include/torch_tensorrt/core/lowering/*.h", + "include/torch_tensorrt/core/lowering/passes/*.h", + "include/torch_tensorrt/core/partitioning/*.h", + "include/torch_tensorrt/core/partitioning/segmentedblock/*.h", + "include/torch_tensorrt/core/partitioning/partitioninginfo/*.h", + "include/torch_tensorrt/core/partitioning/partitioningctx/*.h", + "include/torch_tensorrt/core/plugins/*.h", + "include/torch_tensorrt/core/plugins/impl/*.h", + "include/torch_tensorrt/core/runtime/*.h", + "include/torch_tensorrt/core/util/*.h", + "include/torch_tensorrt/core/util/logging/*.h", + "bin/*", + "lib/*", + ] + } + ) + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + setup( name="torch_tensorrt", - version=__version__, - author="NVIDIA", - author_email="narens@nvidia.com", - url="https://nvidia.github.io/torch-tensorrt", - description="Torch-TensorRT is a package which allows users to automatically compile PyTorch and TorchScript modules to TensorRT while remaining in PyTorch", - long_description_content_type="text/markdown", - long_description=long_description, ext_modules=ext_modules, - install_requires=[ - "torch >=2.1.dev,<2.2" if not LEGACY else "torch >=1.13.0,<2.0", - "pyyaml", - "packaging", - ], - setup_requires=[], + version=__version__, cmdclass={ "install": InstallCommand, "clean": CleanCommand, "develop": DevelopCommand, "build_ext": cpp_extension.BuildExtension, "bdist_wheel": BdistCommand, + "editable_wheel": EditableWheelCommand, }, zip_safe=False, - license="BSD", - packages=packages if FX_ONLY else find_namespace_packages(), - package_dir=package_dir if FX_ONLY else {}, - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Environment :: GPU :: NVIDIA CUDA", - "License :: OSI Approved :: BSD License", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "Operating System :: POSIX :: Linux", - "Programming Language :: C++", - "Programming Language :: Python", - "Programming Language :: Python :: Implementation :: CPython", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Software Development", - "Topic :: Software Development :: Libraries", - ], - python_requires=">=3.8", - include_package_data=True, - package_data={ - "torch_tensorrt": package_data_list, - }, + packages=packages, + package_dir=package_dir, + include_package_data=False, + package_data=package_data, exclude_package_data={ - "": ["*.cpp"], - "torch_tensorrt": ["csrc/*.cpp"], + "": [ + "py/torch_tensorrt/csrc/*.cpp", + "py/torch_tensorrt/fx/test*", + "torch_tensorrt/csrc/*.cpp", + "torch_tensorrt/fx/test*", + "test*", + "*.cpp", + ], + "torch_tensorrt": [ + "py/torch_tensorrt/csrc/*.cpp", + "py/torch_tensorrt/fx/test*", + "torch_tensorrt/csrc/*.cpp", + "torch_tensorrt/fx/test*", + "test*", + "*.cpp", + ], + "torch_tensorrt.dynamo": ["test/*.py"], + "torch_tensorrt.fx": ["test/*.py"], }, ) diff --git a/tests/cpp/test_compiled_modules.cpp b/tests/cpp/test_compiled_modules.cpp index d982c8ec6a..7a2a80f2a2 100644 --- a/tests/cpp/test_compiled_modules.cpp +++ b/tests/cpp/test_compiled_modules.cpp @@ -45,7 +45,11 @@ TEST_P(CppAPITests, CompiledModuleIsClose) { } } -#ifndef DISABLE_TEST_IN_CI +#ifdef DISABLE_TEST_IN_CI + +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(CppAPITests); + +#else INSTANTIATE_TEST_SUITE_P( CompiledModuleForwardIsCloseSuite, diff --git a/tests/cpp/test_modules_as_engines.cpp b/tests/cpp/test_modules_as_engines.cpp index b76bae7333..b2832298a3 100644 --- a/tests/cpp/test_modules_as_engines.cpp +++ b/tests/cpp/test_modules_as_engines.cpp @@ -17,7 +17,11 @@ TEST_P(CppAPITests, ModuleAsEngineIsClose) { ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]))); } -#ifndef DISABLE_TEST_IN_CI +#ifdef DISABLE_TEST_IN_CI + +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(CppAPITests); + +#else INSTANTIATE_TEST_SUITE_P( ModuleAsEngineForwardIsCloseSuite, diff --git a/toolchains/ci_workspaces/WORKSPACE.sbsa b/toolchains/ci_workspaces/WORKSPACE.sbsa index cf45a596c3..d30d45b15a 100644 --- a/toolchains/ci_workspaces/WORKSPACE.sbsa +++ b/toolchains/ci_workspaces/WORKSPACE.sbsa @@ -1,22 +1,24 @@ workspace(name = "Torch-TensorRT") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") http_archive( name = "rules_python", - sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz", + sha256 = "863ba0fa944319f7e3d695711427d9ad80ba92c6edd0b7c7443b84e904689539", + strip_prefix = "rules_python-0.22.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.22.0/rules_python-0.22.0.tar.gz", ) -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() http_archive( name = "rules_pkg", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", ], ) @@ -24,17 +26,17 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") rules_pkg_dependencies() -git_repository( +http_archive( name = "googletest", - commit = "703bd9caab50b139428cea1aaff9974ebee5742e", - remote = "https://github.com/google/googletest", - shallow_since = "1570114335 -0400", + sha256 = "755f9a39bc7205f5a0c428e920ddad092c33c8a1b46997def3f1d4a82aded6e1", + strip_prefix = "googletest-5ab508a01f9eb089207ee87fd547d290da39d015", + urls = ["https://github.com/google/googletest/archive/5ab508a01f9eb089207ee87fd547d290da39d015.zip"], ) # External dependency for torch_tensorrt if you already have precompiled binaries. local_repository( name = "torch_tensorrt", - path = "/opt/circleci/.pyenv/versions/3.8.10/lib/python3.8/site-packages/torch_tensorrt" + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch_tensorrt" ) # CUDA should be installed on the system locally @@ -62,13 +64,13 @@ new_local_repository( new_local_repository( name = "libtorch", - path = "/opt/circleci/.pyenv/versions/3.8.10/lib/python3.8/site-packages/torch", + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch", build_file = "third_party/libtorch/BUILD" ) new_local_repository( name = "libtorch_pre_cxx11_abi", - path = "/opt/circleci/.pyenv/versions/3.8.10/lib/python3.8/site-packages/torch", + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch", build_file = "third_party/libtorch/BUILD" ) @@ -84,7 +86,13 @@ new_local_repository( build_file = "@//third_party/tensorrt/local:BUILD" ) -pip_install( - name = "pylinter_deps", - requirements = "//tools/linter:requirements.txt", +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "devtools_deps", + requirements = "//:requirements-dev.txt", ) + +load("@devtools_deps//:requirements.bzl", "install_deps") + +install_deps() diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64 b/toolchains/ci_workspaces/WORKSPACE.x86_64 index ff95a22b2f..e73597beb4 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64 +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64 @@ -1,22 +1,24 @@ workspace(name = "Torch-TensorRT") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") http_archive( name = "rules_python", - sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz", + sha256 = "863ba0fa944319f7e3d695711427d9ad80ba92c6edd0b7c7443b84e904689539", + strip_prefix = "rules_python-0.22.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.22.0/rules_python-0.22.0.tar.gz", ) -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() http_archive( name = "rules_pkg", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", ], ) @@ -24,17 +26,17 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") rules_pkg_dependencies() -git_repository( +http_archive( name = "googletest", - commit = "703bd9caab50b139428cea1aaff9974ebee5742e", - remote = "https://github.com/google/googletest", - shallow_since = "1570114335 -0400", + sha256 = "755f9a39bc7205f5a0c428e920ddad092c33c8a1b46997def3f1d4a82aded6e1", + strip_prefix = "googletest-5ab508a01f9eb089207ee87fd547d290da39d015", + urls = ["https://github.com/google/googletest/archive/5ab508a01f9eb089207ee87fd547d290da39d015.zip"], ) # External dependency for torch_tensorrt if you already have precompiled binaries. local_repository( name = "torch_tensorrt", - path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch_tensorrt" + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch_tensorrt" ) # CUDA should be installed on the system locally @@ -63,13 +65,13 @@ new_local_repository( new_local_repository( name = "libtorch", - path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch", + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch", build_file = "third_party/libtorch/BUILD" ) new_local_repository( name = "libtorch_pre_cxx11_abi", - path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch", + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch", build_file = "third_party/libtorch/BUILD" ) @@ -85,7 +87,13 @@ new_local_repository( build_file = "@//third_party/tensorrt/local:BUILD" ) -pip_install( - name = "pylinter_deps", - requirements = "//tools/linter:requirements.txt", +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "devtools_deps", + requirements = "//:requirements-dev.txt", ) + +load("@devtools_deps//:requirements.bzl", "install_deps") + +install_deps() \ No newline at end of file diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel index b25d9b492e..8d6ff234e8 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel @@ -1,22 +1,24 @@ workspace(name = "Torch-TensorRT") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") http_archive( name = "rules_python", - sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz", + sha256 = "863ba0fa944319f7e3d695711427d9ad80ba92c6edd0b7c7443b84e904689539", + strip_prefix = "rules_python-0.22.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.22.0/rules_python-0.22.0.tar.gz", ) -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() http_archive( name = "rules_pkg", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", ], ) @@ -24,17 +26,17 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") rules_pkg_dependencies() -git_repository( +http_archive( name = "googletest", - commit = "703bd9caab50b139428cea1aaff9974ebee5742e", - remote = "https://github.com/google/googletest", - shallow_since = "1570114335 -0400", + sha256 = "755f9a39bc7205f5a0c428e920ddad092c33c8a1b46997def3f1d4a82aded6e1", + strip_prefix = "googletest-5ab508a01f9eb089207ee87fd547d290da39d015", + urls = ["https://github.com/google/googletest/archive/5ab508a01f9eb089207ee87fd547d290da39d015.zip"], ) # External dependency for torch_tensorrt if you already have precompiled binaries. local_repository( name = "torch_tensorrt", - path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch_tensorrt" + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch_tensorrt" ) # CUDA should be installed on the system locally @@ -89,7 +91,13 @@ new_local_repository( # # Testing Dependencies (optional - comment out on aarch64) # ######################################################################### -pip_install( - name = "pylinter_deps", - requirements = "//tools/linter:requirements.txt", +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "devtools_deps", + requirements = "//:requirements-dev.txt", ) + +load("@devtools_deps//:requirements.bzl", "install_deps") + +install_deps() diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu index b25d9b492e..60e29f2b60 100644 --- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu +++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu @@ -1,22 +1,24 @@ workspace(name = "Torch-TensorRT") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") http_archive( name = "rules_python", - sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz", + sha256 = "863ba0fa944319f7e3d695711427d9ad80ba92c6edd0b7c7443b84e904689539", + strip_prefix = "rules_python-0.22.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.22.0/rules_python-0.22.0.tar.gz", ) -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() http_archive( name = "rules_pkg", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", ], ) @@ -24,17 +26,17 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") rules_pkg_dependencies() -git_repository( +http_archive( name = "googletest", - commit = "703bd9caab50b139428cea1aaff9974ebee5742e", - remote = "https://github.com/google/googletest", - shallow_since = "1570114335 -0400", -) + sha256 = "755f9a39bc7205f5a0c428e920ddad092c33c8a1b46997def3f1d4a82aded6e1", + strip_prefix = "googletest-5ab508a01f9eb089207ee87fd547d290da39d015", + urls = ["https://github.com/google/googletest/archive/5ab508a01f9eb089207ee87fd547d290da39d015.zip"], +) shallow_since = "1570114335 -0400", # External dependency for torch_tensorrt if you already have precompiled binaries. local_repository( name = "torch_tensorrt", - path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch_tensorrt" + path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch_tensorrt" ) # CUDA should be installed on the system locally @@ -89,7 +91,13 @@ new_local_repository( # # Testing Dependencies (optional - comment out on aarch64) # ######################################################################### -pip_install( - name = "pylinter_deps", - requirements = "//tools/linter:requirements.txt", +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "devtools_deps", + requirements = "//:requirements-dev.txt", ) + +load("@devtools_deps//:requirements.bzl", "install_deps") + +install_deps() \ No newline at end of file diff --git a/toolchains/jp_workspaces/WORKSPACE.jp50 b/toolchains/jp_workspaces/WORKSPACE.jp50 index a5c054dd23..3038c06117 100644 --- a/toolchains/jp_workspaces/WORKSPACE.jp50 +++ b/toolchains/jp_workspaces/WORKSPACE.jp50 @@ -3,20 +3,27 @@ workspace(name = "Torch-TensorRT") load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +workspace(name = "Torch-TensorRT") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + http_archive( name = "rules_python", - sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz", + sha256 = "863ba0fa944319f7e3d695711427d9ad80ba92c6edd0b7c7443b84e904689539", + strip_prefix = "rules_python-0.22.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.22.0/rules_python-0.22.0.tar.gz", ) -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() http_archive( name = "rules_pkg", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", ], ) @@ -24,11 +31,11 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") rules_pkg_dependencies() -git_repository( +http_archive( name = "googletest", - commit = "703bd9caab50b139428cea1aaff9974ebee5742e", - remote = "https://github.com/google/googletest", - shallow_since = "1570114335 -0400", + sha256 = "755f9a39bc7205f5a0c428e920ddad092c33c8a1b46997def3f1d4a82aded6e1", + strip_prefix = "googletest-5ab508a01f9eb089207ee87fd547d290da39d015", + urls = ["https://github.com/google/googletest/archive/5ab508a01f9eb089207ee87fd547d290da39d015.zip"], ) # External dependency for torch_tensorrt if you already have precompiled binaries. diff --git a/versions.py b/versions.py new file mode 100644 index 0000000000..f038aabb24 --- /dev/null +++ b/versions.py @@ -0,0 +1,38 @@ +import yaml + +__version__: str = "0.0.0" +__cuda_version__: str = "0.0" +__cudnn_version__: str = "0.0" +__tensorrt_version__: str = "0.0" + + +def load_version_info(): + global __version__ + global __cuda_version__ + global __cudnn_version__ + global __tensorrt_version__ + with open("versions.yml", "r") as stream: + versions = yaml.safe_load(stream) + __version__ = versions["__version__"] + __cuda_version__ = versions["__cuda_version__"] + __cudnn_version__ = versions["__cudnn_version__"] + __tensorrt_version__ = versions["__tensorrt_version__"] + + +load_version_info() + + +def torch_tensorrt_version(): + print(__version__) + + +def cuda_version(): + print(__cuda_version__) + + +def cudnn_version(): + print(__cudnn_version__) + + +def tensorrt_version(): + print(__tensorrt_version__) diff --git a/versions.yml b/versions.yml new file mode 100644 index 0000000000..8a8202ec04 --- /dev/null +++ b/versions.yml @@ -0,0 +1,4 @@ +__version__: "2.0.0.dev0" +__cuda_version__: "12.1" +__cudnn_version__: "8.8" +__tensorrt_version__: "8.6"