diff --git a/.github/scripts/install-torch-tensorrt-windows.sh b/.github/scripts/install-torch-tensorrt-windows.sh
deleted file mode 100644
index 534eb3fcba..0000000000
--- a/.github/scripts/install-torch-tensorrt-windows.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-set -eou pipefail
-# Source conda so it's available to the script environment
-source ${BUILD_ENV_FILE}
-export EXTRA_INDEX_URL="https://download.pytorch.org/whl/nightly/${CU_VERSION}"
-
-# Install all the dependencies required for Torch-TensorRT
-${CONDA_RUN} pip install --pre -r ${PWD}/tests/py/requirements.txt --use-deprecated=legacy-resolver --extra-index-url=${EXTRA_INDEX_URL}
-
-# Install Torch-TensorRT
-${CONDA_RUN} pip install ${RUNNER_ARTIFACT_DIR}/torch_tensorrt*.whl
-
-echo -e "Running test script";
\ No newline at end of file
diff --git a/.github/scripts/install-torch-tensorrt.sh b/.github/scripts/install-torch-tensorrt.sh
index 428d45f5d9..9a6b2a8b8b 100644
--- a/.github/scripts/install-torch-tensorrt.sh
+++ b/.github/scripts/install-torch-tensorrt.sh
@@ -1,13 +1,18 @@
-#!/usr/bin/env bash
 set -eou pipefail
-# Source conda so it's available to the script environment
-source ${BUILD_ENV_FILE}
-export EXTRA_INDEX_URL="https://download.pytorch.org/whl/nightly/${CU_VERSION}"
+
+TORCH_TORCHVISION=$(grep "^torch" ${PWD}/py/requirements.txt)
+INDEX_URL=https://download.pytorch.org/whl/${CHANNEL}/${CU_VERSION}
+PLATFORM=$(python -c "import sys; print(sys.platform)")
 
 # Install all the dependencies required for Torch-TensorRT
-${CONDA_RUN} pip install --pre -r ${PWD}/tests/py/requirements.txt --use-deprecated=legacy-resolver --extra-index-url=${EXTRA_INDEX_URL}
+pip install --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL}
+pip install --pre -r ${PWD}/tests/py/requirements.txt --use-deprecated legacy-resolver
 
 # Install Torch-TensorRT
-${CONDA_RUN} pip install /opt/torch-tensorrt-builds/torch_tensorrt*.whl
+if [[ ${PLATFORM} == win32 ]]; then
+    pip install ${RUNNER_ARTIFACT_DIR}/torch_tensorrt*.whl
+else
+    pip install /opt/torch-tensorrt-builds/torch_tensorrt*.whl
+fi
 
-echo -e "Running test script";
\ No newline at end of file
+echo -e "Running test script";
diff --git a/.github/workflows/build-test-linux.yml b/.github/workflows/build-test-linux.yml
index ac15abb23f..0bd570155e 100644
--- a/.github/workflows/build-test-linux.yml
+++ b/.github/workflows/build-test-linux.yml
@@ -80,13 +80,13 @@ jobs:
         export LD_LIBRARY_PATH=/usr/lib64:$LD_LIBRARY_PATH
         pushd .
         cd tests/modules
-        ${CONDA_RUN} python hub.py
+        python hub.py
         popd
         pushd .
         cd tests/py/ts
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_api_test_results.xml api/
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_models_test_results.xml models/
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_integrations_test_results.xml integrations/
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_api_test_results.xml api/
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_models_test_results.xml models/
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_integrations_test_results.xml integrations/
         popd
 
   tests-py-dynamo-converters:
@@ -114,7 +114,7 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml -n 10 conversion/
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml -n 10 conversion/
         popd
 
   tests-py-dynamo-fe:
@@ -142,8 +142,8 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_fe_test_results.xml --ir dynamo models/test_models_export.py
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dyn_models_export.xml --ir dynamo models/test_dyn_models.py
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_fe_test_results.xml --ir dynamo models/test_models_export.py
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dyn_models_export.xml --ir dynamo models/test_dyn_models.py
         popd
 
   tests-py-dynamo-serde:
@@ -171,7 +171,7 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/export_serde_test_results.xml --ir dynamo models/test_export_serde.py
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/export_serde_test_results.xml --ir dynamo models/test_export_serde.py
         popd
 
   tests-py-torch-compile-be:
@@ -199,9 +199,9 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest -n 10 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_be_test_results.xml backend/
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_comple_be_e2e_test_results.xml --ir torch_compile models/test_models.py
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_dyn_models_export.xml --ir torch_compile models/test_dyn_models.py
+        python -m pytest -n 10 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_be_test_results.xml backend/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_comple_be_e2e_test_results.xml --ir torch_compile models/test_models.py
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_dyn_models_export.xml --ir torch_compile models/test_dyn_models.py
         popd
 
   tests-py-dynamo-core:
@@ -229,9 +229,9 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_runtime_test_results.xml runtime/
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_partitioning_test_results.xml partitioning/
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_lowering_test_results.xml lowering/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_runtime_test_results.xml runtime/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_partitioning_test_results.xml partitioning/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_lowering_test_results.xml lowering/
         popd
 
   tests-py-core:
@@ -259,7 +259,7 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/core
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_core_test_results.xml .
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_core_test_results.xml .
         popd
 
 concurrency:
diff --git a/.github/workflows/build-test-windows.yml b/.github/workflows/build-test-windows.yml
index 7aae60c5fa..1bdb52ae8a 100644
--- a/.github/workflows/build-test-windows.yml
+++ b/.github/workflows/build-test-windows.yml
@@ -24,8 +24,19 @@ jobs:
       with-rocm: false
       with-cpu: false
 
-  build:
+  substitute-runner:
     needs: generate-matrix
+    outputs:
+      matrix: ${{ steps.substitute.outputs.matrix }}
+    runs-on: ubuntu-latest
+    steps:
+      - name: Substitute runner
+        id: substitute
+        run: |
+          echo matrix="$(echo '${{ needs.generate-matrix.outputs.matrix }}' | sed -e 's/windows.8xlarge.nvidia.gpu/windows.g5.4xlarge.nvidia.gpu/g')" >> ${GITHUB_OUTPUT}
+
+  build:
+    needs: substitute-runner
     permissions:
       id-token: write
       contents: read
@@ -35,6 +46,7 @@ jobs:
         include:
           - repository: pytorch/tensorrt
             pre-script: packaging/pre_build_script_windows.sh
+            env-script: packaging/vc_env_helper.bat
             smoke-test-script: packaging/smoke_test_windows.py
             package-name: torch_tensorrt
     name: Build torch-tensorrt whl package
@@ -44,12 +56,44 @@ jobs:
       ref: ""
       test-infra-repository: pytorch/test-infra
       test-infra-ref: main
-      build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
+      build-matrix: ${{ needs.substitute-runner.outputs.matrix }}
       pre-script: ${{ matrix.pre-script }}
+      env-script: ${{ matrix.env-script }}
       smoke-test-script: ${{ matrix.smoke-test-script }}
       package-name: ${{ matrix.package-name }}
       trigger-event: ${{ github.event_name }}
 
+  tests-py-torchscript-fe:
+    name: Test torchscript frontend [Python]
+    needs: [generate-matrix, build]
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          - repository: pytorch/tensorrt
+            package-name: torch_tensorrt
+    uses: ./.github/workflows/windows-test.yml
+    with:
+      job-name: tests-py-torchscript-fe
+      repository: ${{ matrix.repository }}
+      ref: ""
+      test-infra-repository: pytorch/test-infra
+      test-infra-ref: main
+      build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
+      pre-script: packaging/driver_upgrade.bat
+      script: |
+        export USE_HOST_DEPS=1
+        pushd .
+        cd tests/modules
+        python hub.py
+        popd
+        pushd .
+        cd tests/py/ts
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_api_test_results.xml api/
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_models_test_results.xml models/
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_integrations_test_results.xml integrations/
+        popd
+
   tests-py-dynamo-converters:
     name: Test dynamo converters [Python]
     needs: [generate-matrix, build]
@@ -62,7 +106,7 @@ jobs:
     uses: ./.github/workflows/windows-test.yml
     with:
       job-name: tests-py-dynamo-converters
-      repository: "pytorch/tensorrt"
+      repository: ${{ matrix.repository }}
       ref: ""
       test-infra-repository: pytorch/test-infra
       test-infra-ref: main
@@ -72,7 +116,7 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml -n 10 conversion/
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml -n 10 conversion/
         popd
 
   tests-py-dynamo-fe:
@@ -87,7 +131,33 @@ jobs:
     uses: ./.github/workflows/windows-test.yml
     with:
       job-name: tests-py-dynamo-fe
-      repository: "pytorch/tensorrt"
+      repository: ${{ matrix.repository }}
+      ref: ""
+      test-infra-repository: pytorch/test-infra
+      test-infra-ref: main
+      build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
+      pre-script: packaging/driver_upgrade.bat
+      script: |
+        export USE_HOST_DEPS=1
+        pushd .
+        cd tests/py/dynamo
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_fe_test_results.xml --ir dynamo models/test_models_export.py
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dyn_models_export.xml --ir dynamo models/test_dyn_models.py
+        popd
+
+  tests-py-dynamo-serde:
+    name: Test dynamo export serde [Python]
+    needs: [generate-matrix, build]
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          - repository: pytorch/tensorrt
+            package-name: torch_tensorrt
+    uses: ./.github/workflows/windows-test.yml
+    with:
+      job-name: tests-py-dynamo-serde
+      repository: ${{ matrix.repository }}
       ref: ""
       test-infra-repository: pytorch/test-infra
       test-infra-ref: main
@@ -97,8 +167,7 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_fe_test_results.xml --ir dynamo models/test_models_export.py
-        ${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/dyn_models_export.xml --ir dynamo models/test_dyn_models.py
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/export_serde_test_results.xml --ir dynamo models/test_export_serde.py
         popd
 
   tests-py-torch-compile-be:
@@ -117,14 +186,15 @@ jobs:
       ref: ""
       test-infra-repository: pytorch/test-infra
       test-infra-ref: main
-      pre-script: packaging/driver_upgrade.bat
       build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
+      pre-script: packaging/driver_upgrade.bat
       script: |
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest -n 10 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_be_test_results.xml backend/
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_comple_be_e2e_test_results.xml --ir torch_compile models/test_models.py
+        python -m pytest -n 10 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_be_test_results.xml backend/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_comple_be_e2e_test_results.xml --ir torch_compile models/test_models.py
+        python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_dyn_models_export.xml --ir torch_compile models/test_dyn_models.py
         popd
 
   tests-py-dynamo-core:
@@ -139,7 +209,7 @@ jobs:
     uses: ./.github/workflows/windows-test.yml
     with:
       job-name: tests-py-dynamo-core
-      repository: "pytorch/tensorrt"
+      repository: ${{ matrix.repository }}
       ref: ""
       test-infra-repository: pytorch/test-infra
       test-infra-ref: main
@@ -149,11 +219,36 @@ jobs:
         export USE_HOST_DEPS=1
         pushd .
         cd tests/py/dynamo
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_runtime_test_results.xml runtime/
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_partitioning_test_results.xml partitioning/
-        ${CONDA_RUN} python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_lowering_test_results.xml lowering/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_runtime_test_results.xml runtime/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_partitioning_test_results.xml partitioning/
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_lowering_test_results.xml lowering/
+        popd
+
+  tests-py-core:
+    name: Test core [Python]
+    needs: [generate-matrix, build]
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          - repository: pytorch/tensorrt
+            package-name: torch_tensorrt
+    uses: ./.github/workflows/windows-test.yml
+    with:
+      job-name: tests-py-core
+      repository: ${{ matrix.repository }}
+      ref: ""
+      test-infra-repository: pytorch/test-infra
+      test-infra-ref: main
+      build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
+      pre-script: packaging/driver_upgrade.bat
+      script: |
+        export USE_HOST_DEPS=1
+        pushd .
+        cd tests/py/core
+        python -m pytest -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_core_test_results.xml .
         popd
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ inputs.repository }}-${{ github.event_name == 'workflow_dispatch' }}-${{ inputs.job-name }}
-  cancel-in-progress: true
\ No newline at end of file
+  cancel-in-progress: true
diff --git a/.github/workflows/windows-test.yml b/.github/workflows/windows-test.yml
index aa7d461e14..7b8548ae78 100644
--- a/.github/workflows/windows-test.yml
+++ b/.github/workflows/windows-test.yml
@@ -118,7 +118,7 @@ jobs:
           {
             echo "${SCRIPT}";
           } > "user_script"
-          cat .github/scripts/install-torch-tensorrt-windows.sh user_script > exec_script
+          cat .github/scripts/install-torch-tensorrt.sh user_script > exec_script
       - name: Run script
         uses: ./test-infra/.github/actions/run-script-with-cache
         with:
diff --git a/packaging/pre_build_script_windows.sh b/packaging/pre_build_script_windows.sh
index dd768b8662..9724e18327 100644
--- a/packaging/pre_build_script_windows.sh
+++ b/packaging/pre_build_script_windows.sh
@@ -1,12 +1,18 @@
-python -m pip install -U numpy packaging pyyaml setuptools wheel
+set -eou pipefail
 
-# Install TRT from PyPi
-TRT_VERSION=$(${CONDA_RUN} python -c "import yaml; print(yaml.safe_load(open('dev_dep_versions.yml', 'r'))['__tensorrt_version__'])")
+pip install -U numpy packaging pyyaml setuptools wheel
 
-python -m pip install tensorrt==${TRT_VERSION} tensorrt-${CU_VERSION::4}==${TRT_VERSION} tensorrt-${CU_VERSION::4}-bindings==${TRT_VERSION} tensorrt-${CU_VERSION::4}-libs==${TRT_VERSION} --extra-index-url https://pypi.nvidia.com
+# Install TRT from PyPI
+TRT_VERSION=$(python -c "import yaml; print(yaml.safe_load(open('dev_dep_versions.yml', 'r'))['__tensorrt_version__'])")
+pip install tensorrt==${TRT_VERSION} tensorrt-${CU_VERSION::4}-bindings==${TRT_VERSION} tensorrt-${CU_VERSION::4}-libs==${TRT_VERSION} --extra-index-url https://pypi.nvidia.com
 
 choco install bazelisk -y
 
+curl -Lo TensorRT.zip https://developer.download.nvidia.com/compute/machine-learning/tensorrt/10.0.1/zip/TensorRT-10.0.1.6.Windows10.win10.cuda-12.4.zip
+unzip -o TensorRT.zip -d C:/
+
+export CUDA_HOME="$(echo ${CUDA_PATH} | sed -e 's#\\#\/#g')"
+
 cat toolchains/ci_workspaces/WORKSPACE.win.release.tmpl | envsubst > WORKSPACE
 
 echo "RELEASE=1" >> ${GITHUB_ENV}
diff --git a/packaging/vc_env_helper.bat b/packaging/vc_env_helper.bat
new file mode 100644
index 0000000000..33605856b7
--- /dev/null
+++ b/packaging/vc_env_helper.bat
@@ -0,0 +1,40 @@
+@echo on
+
+set VC_VERSION_LOWER=17
+set VC_VERSION_UPPER=18
+
+for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do (
+    if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
+        set "BAZEL_VC=%%i\VC"
+        set "VS15INSTALLDIR=%%i"
+        set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat"
+        goto vswhere
+    )
+)
+
+:vswhere
+if "%VSDEVCMD_ARGS%" == "" (
+    call "%VS15VCVARSALL%" x64 || exit /b 1
+) else (
+    call "%VS15VCVARSALL%" x64 %VSDEVCMD_ARGS% || exit /b 1
+)
+
+@echo on
+
+set DISTUTILS_USE_SDK=1
+
+set args=%1
+shift
+:start
+if [%1] == [] goto done
+set args=%args% %1
+shift
+goto start
+
+:done
+if "%args%" == "" (
+    echo Usage: vc_env_helper.bat [command] [args]
+    echo e.g. vc_env_helper.bat cl /c test.cpp
+)
+
+%args% || exit /b 1
diff --git a/py/torch_tensorrt/_features.py b/py/torch_tensorrt/_features.py
index dde99cbaf6..02e2108591 100644
--- a/py/torch_tensorrt/_features.py
+++ b/py/torch_tensorrt/_features.py
@@ -1,4 +1,5 @@
 import os
+import sys
 from collections import namedtuple
 
 from torch_tensorrt._utils import sanitized_torch_version
@@ -15,10 +16,23 @@
     ],
 )
 
-_TS_FE_AVAIL = os.path.isfile(os.path.dirname(__file__) + "/lib/libtorchtrt.so")
-_TORCHTRT_RT_AVAIL = _TS_FE_AVAIL or os.path.isfile(
-    os.path.dirname(__file__) + "/lib/libtorchtrt_runtime.so"
+trtorch_dir = os.path.dirname(__file__)
+linked_file = os.path.join(
+    "lib", "torchtrt.dll" if sys.platform.startswith("win") else "libtorchtrt.so"
 )
+linked_file_runtime = os.path.join(
+    "lib",
+    (
+        "torchtrt_runtime.dll"
+        if sys.platform.startswith("win")
+        else "libtorchtrt_runtime.so"
+    ),
+)
+linked_file_full_path = os.path.join(trtorch_dir, linked_file)
+linked_file_runtime_full_path = os.path.join(trtorch_dir, linked_file_runtime)
+
+_TS_FE_AVAIL = os.path.isfile(linked_file_full_path)
+_TORCHTRT_RT_AVAIL = _TS_FE_AVAIL or os.path.isfile(linked_file_runtime_full_path)
 _DYNAMO_FE_AVAIL = version.parse(sanitized_torch_version()) >= version.parse("2.1.dev")
 _FX_FE_AVAIL = True
 
diff --git a/pyproject.toml b/pyproject.toml
index 4313383431..e8e16d5f3d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -43,6 +43,8 @@ keywords = ["pytorch", "torch", "tensorrt", "trt", "ai", "artificial intelligenc
 dependencies = [
     "torch >=2.4.0.dev,<2.5.0",
     "tensorrt==10.0.1",
+    "tensorrt-cu12_bindings==10.0.1",
+    "tensorrt-cu12_libs==10.0.1",
     "packaging>=23",
     "numpy",
     "typing-extensions>=4.7.0",
diff --git a/setup.py b/setup.py
index 7e30847480..d1ec6c2f0b 100644
--- a/setup.py
+++ b/setup.py
@@ -15,6 +15,7 @@
 from typing import List
 
 import setuptools
+import torch
 import yaml
 from setuptools import Extension, find_namespace_packages, setup
 from setuptools.command.build_ext import build_ext
@@ -79,8 +80,7 @@ def load_dep_info():
 
 CXX11_ABI = False
 JETPACK_VERSION = None
-# TODO: Remove once C++ Runtime is integrated in Windows
-PY_ONLY = IS_WINDOWS
+PY_ONLY = False
 NO_TS = False
 LEGACY = False
 RELEASE = False
@@ -238,7 +238,7 @@ def copy_libtorchtrt(multilinux=False, rt_only=False):
     if IS_WINDOWS:
         copyfile(
             dir_path + "/../bazel-bin/cpp/lib/torchtrt.dll",
-            dir_path + "/torch_tensorrt/torchtrt.dll",
+            dir_path + "/torch_tensorrt/lib/torchtrt.dll",
         )
         copyfile(
             dir_path + "/../bazel-bin/cpp/lib/torchtrt.dll.if.lib",
@@ -379,7 +379,6 @@ class CleanCommand(Command):
     ]
     PY_CLEAN_FILES = [
         os.path.join(".", "torch_tensorrt", "*.so"),
-        os.path.join(".", "torch_tensorrt", "*.dll"),
         os.path.join(".", "torch_tensorrt", "_version.py"),
         os.path.join(".", "torch_tensorrt", "BUILD"),
         os.path.join(".", "torch_tensorrt", "WORKSPACE"),
@@ -506,6 +505,7 @@ def run(self):
             ],
             extra_compile_args=(
                 [
+                    f'/DPYBIND11_BUILD_ABI=\\"{torch._C._PYBIND11_BUILD_ABI}\\"',
                     "/GS-",
                     "/permissive-",
                 ]
@@ -584,7 +584,6 @@ def run(self):
                 "include/torch_tensorrt/core/util/logging/*.h",
                 "bin/*",
                 "lib/*",
-                "*.dll",
             ]
         }
     )
diff --git a/tests/py/dynamo/models/test_export_serde.py b/tests/py/dynamo/models/test_export_serde.py
index c13f43987f..58e0115886 100644
--- a/tests/py/dynamo/models/test_export_serde.py
+++ b/tests/py/dynamo/models/test_export_serde.py
@@ -1,3 +1,5 @@
+import os
+import tempfile
 import unittest
 
 import pytest
@@ -8,6 +10,8 @@
 
 assertions = unittest.TestCase()
 
+trt_ep_path = os.path.join(tempfile.gettempdir(), "trt.ep")
+
 
 @pytest.mark.unit
 def test_base_full_compile(ir):
@@ -42,9 +46,9 @@ def forward(self, x):
 
     exp_program = torchtrt.dynamo.trace(model, **compile_spec)
     trt_module = torchtrt.dynamo.compile(exp_program, **compile_spec)
-    torchtrt.save(trt_module, "/tmp/trt.ep", inputs=[input])
+    torchtrt.save(trt_module, trt_ep_path, inputs=[input])
     # TODO: Enable this serialization issues are fixed
-    # deser_trt_module = torchtrt.load("/tmp/trt.ep").module()
+    # deser_trt_module = torchtrt.load(trt_ep_path).module()
     # Check Pyt and TRT exported program outputs
     cos_sim = cosine_similarity(model(input), trt_module(input)[0])
     assertions.assertTrue(
@@ -94,9 +98,9 @@ def forward(self, x):
 
     exp_program = torchtrt.dynamo.trace(model, **compile_spec)
     trt_module = torchtrt.dynamo.compile(exp_program, **compile_spec)
-    torchtrt.save(trt_module, "./trt.ep", inputs=[input])
+    torchtrt.save(trt_module, trt_ep_path, inputs=[input])
     # TODO: Enable this serialization issues are fixed
-    # deser_trt_module = torchtrt.load("./trt.ep").module()
+    # deser_trt_module = torchtrt.load(trt_ep_path).module()
     # Check Pyt and TRT exported program outputs
     outputs_pyt = model(input)
     outputs_trt = trt_module(input)
@@ -151,9 +155,9 @@ def forward(self, x):
 
     exp_program = torchtrt.dynamo.trace(model, **compile_spec)
     trt_module = torchtrt.dynamo.compile(exp_program, **compile_spec)
-    torchtrt.save(trt_module, "./trt.ep", inputs=[input])
+    torchtrt.save(trt_module, trt_ep_path, inputs=[input])
     # TODO: Enable this serialization issues are fixed
-    # deser_trt_module = torchtrt.load("./trt.ep").module()
+    # deser_trt_module = torchtrt.load(trt_ep_path).module()
     # Check Pyt and TRT exported program outputs
     outputs_pyt = model(input)
     outputs_trt = trt_module(input)
@@ -211,9 +215,9 @@ def forward(self, x):
 
     exp_program = torchtrt.dynamo.trace(model, **compile_spec)
     trt_module = torchtrt.dynamo.compile(exp_program, **compile_spec)
-    torchtrt.save(trt_module, "./trt.ep", inputs=[input])
+    torchtrt.save(trt_module, trt_ep_path, inputs=[input])
     # TODO: Enable this serialization issues are fixed
-    # deser_trt_module = torchtrt.load("./trt.ep").module()
+    # deser_trt_module = torchtrt.load(trt_ep_path).module()
     outputs_pyt = model(input)
     outputs_trt = trt_module(input)
     for idx in range(len(outputs_pyt)):
@@ -253,9 +257,9 @@ def test_resnet18(ir):
 
     exp_program = torchtrt.dynamo.trace(model, **compile_spec)
     trt_module = torchtrt.dynamo.compile(exp_program, **compile_spec)
-    torchtrt.save(trt_module, "./trt.ep", inputs=[input])
+    torchtrt.save(trt_module, trt_ep_path, inputs=[input])
     # TODO: Enable this serialization issues are fixed
-    # deser_trt_module = torchtrt.load("./trt.ep").module()
+    # deser_trt_module = torchtrt.load(trt_ep_path).module()
     outputs_pyt = model(input)
     outputs_trt = trt_module(input)
     cos_sim = cosine_similarity(outputs_pyt, outputs_trt[0])
@@ -309,9 +313,9 @@ def forward(self, x):
     exp_program = torchtrt.dynamo.trace(model, **compile_spec)
     trt_module = torchtrt.dynamo.compile(exp_program, **compile_spec)
 
-    torchtrt.save(trt_module, "./trt.ep", inputs=[input])
+    torchtrt.save(trt_module, trt_ep_path, inputs=[input])
     # TODO: Enable this serialization issues are fixed
-    # deser_trt_module = torchtrt.load("./trt.ep").module()
+    # deser_trt_module = torchtrt.load(trt_ep_path).module()
     outputs_pyt = model(input)
     outputs_trt = trt_module(input)
 
diff --git a/tests/py/dynamo/partitioning/test_dynamic_partitioning.py b/tests/py/dynamo/partitioning/test_dynamic_partitioning.py
index 9b18c1fc2f..0c9ee14b12 100644
--- a/tests/py/dynamo/partitioning/test_dynamic_partitioning.py
+++ b/tests/py/dynamo/partitioning/test_dynamic_partitioning.py
@@ -48,12 +48,12 @@ def forward(self, x):
             elif "_run_on_gpu" in submod[0]:
                 pyt_segments += 1
 
-        self.assertEquals(
+        self.assertEqual(
             trt_segments,
             1,
             f"Number of TRT segments should be 1 but got {trt_segments}",
         )
-        self.assertEquals(
+        self.assertEqual(
             pyt_segments,
             1,
             f"Number of PyTorch segments should be 1 but got {pyt_segments}",
@@ -93,12 +93,12 @@ def forward(self, x):
             elif "_run_on_gpu" in submod[0]:
                 pyt_segments += 1
 
-        self.assertEquals(
+        self.assertEqual(
             trt_segments,
             1,
             f"Number of TRT segments should be 2 but got {trt_segments}",
         )
-        self.assertEquals(
+        self.assertEqual(
             pyt_segments,
             0,
             f"Number of PyTorch segments should be 0 but got {pyt_segments}",
diff --git a/tests/py/requirements.txt b/tests/py/requirements.txt
index 09a9264f4f..bdae578713 100644
--- a/tests/py/requirements.txt
+++ b/tests/py/requirements.txt
@@ -1,15 +1,13 @@
 # This file is specifically to install correct version of libraries during CI testing.
-# The index url for torch & torchvision libs is configured in install-torch-tensorrt.sh based on CUDA version
 # networkx library issue: https://discuss.pytorch.org/t/installing-pytorch-under-python-3-8-question-about-networkx-version/196740
+expecttest==0.1.6
+networkx==2.8.8
+numpy<2.0.0
+parameterized>=0.2.0
 pytest>=8.2.1
 pytest-xdist>=3.6.1
-networkx==2.8.8
-torch>=2.4.0.dev,<2.5.0
-torchvision>=0.19.0.dev,<0.20.0
---extra-index-url https://pypi.ngc.nvidia.com
 pyyaml
 tensorrt==10.0.1
 timm>=1.0.3
 transformers==4.39.3
-parameterized>=0.2.0
-expecttest==0.1.6
\ No newline at end of file
+--extra-index-url https://pypi.nvidia.com
diff --git a/third_party/tensorrt/local/BUILD b/third_party/tensorrt/local/BUILD
index ef9ff7b956..a755ecf862 100644
--- a/third_party/tensorrt/local/BUILD
+++ b/third_party/tensorrt/local/BUILD
@@ -80,7 +80,7 @@ cc_import(
     static_library = select({
         ":aarch64_linux": "lib/aarch64-linux-gnu/libnvinfer_static.a",
         ":ci_rhel_x86_64_linux": "lib64/libnvinfer_static.a",
-        ":windows": "lib/nvinfer.lib",
+        ":windows": "lib/nvinfer_10.lib",
         "//conditions:default": "lib/x86_64-linux-gnu/libnvinfer_static.a",
     }),
     visibility = ["//visibility:private"],
@@ -91,7 +91,7 @@ cc_import(
     shared_library = select({
         ":aarch64_linux": "lib/aarch64-linux-gnu/libnvinfer.so",
         ":ci_rhel_x86_64_linux": "lib64/libnvinfer.so",
-        ":windows": "lib/nvinfer.dll",
+        ":windows": "lib/nvinfer_10.dll",
         "//conditions:default": "lib/x86_64-linux-gnu/libnvinfer.so",
     }),
     visibility = ["//visibility:private"],
@@ -104,7 +104,10 @@ cc_library(
         "nvinfer_headers",
         "nvinfer_lib",
         "@cuda//:cudart",
-    ],
+    ] + select({
+        ":windows": ["nvinfer_static_lib"],
+        "//conditions:default": [],
+    }),
 )
 
 ####################################################################################
@@ -330,7 +333,7 @@ cc_library(
     srcs = select({
         ":aarch64_linux": ["lib/aarch64-linux-gnu/libnvinfer_plugin.so"],
         ":ci_rhel_x86_64_linux": ["lib64/libnvinfer_plugin.so"],
-        ":windows": ["lib/nvinfer_plugin.lib"],
+        ":windows": ["lib/nvinfer_plugin_10.lib"],
         "//conditions:default": ["lib/x86_64-linux-gnu/libnvinfer_plugin.so"],
     }),
     hdrs = select({
diff --git a/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl b/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl
index 01bbd2130e..40ebf12494 100644
--- a/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl
+++ b/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl
@@ -36,7 +36,7 @@ http_archive(
 # External dependency for torch_tensorrt if you already have precompiled binaries.
 local_repository(
     name = "torch_tensorrt",
-    path = "/opt/circleci/.pyenv/versions/3.10.9/lib/python3.10/site-packages/torch_tensorrt"
+    path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt",
 )
 
 # CUDA should be installed on the system locally
@@ -46,11 +46,6 @@ new_local_repository(
     path = "${CUDA_HOME}",
 )
 
-new_local_repository(
-    name = "cublas",
-    build_file = "@//third_party/cublas:BUILD",
-    path = "C:/",
-)
 #############################################################################################################
 # Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
 #############################################################################################################
@@ -59,14 +54,14 @@ http_archive(
     name = "libtorch",
     build_file = "@//third_party/libtorch:BUILD",
     strip_prefix = "libtorch",
-    urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
+    urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
 )
 
 http_archive(
     name = "libtorch_pre_cxx11_abi",
     build_file = "@//third_party/libtorch:BUILD",
     strip_prefix = "libtorch",
-    urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-latest.zip"],
+    urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
 )
 
 ####################################################################################
@@ -75,13 +70,13 @@ http_archive(
 
 new_local_repository(
    name = "tensorrt",
-   path = "C:/",
+   path = "C:/TensorRT-10.0.1.6",
    build_file = "@//third_party/tensorrt/local:BUILD"
 )
 
-# #########################################################################
-# # Testing Dependencies (optional - comment out on aarch64)
-# #########################################################################
+#########################################################################
+# Development Dependencies (optional - comment out on aarch64)
+#########################################################################
 
 load("@rules_python//python:pip.bzl", "pip_parse")