|
10 | 10 | from torch_tensorrt._Device import Device
|
11 | 11 | from torch_tensorrt._enums import EngineCapability, dtype
|
12 | 12 | from torch_tensorrt._Input import Input
|
13 |
| -from torch_tensorrt.dynamo import partitioning |
14 |
| -from torch_tensorrt.dynamo._defaults import ( |
15 |
| - DEBUG, |
16 |
| - DEVICE, |
17 |
| - DISABLE_TF32, |
18 |
| - DLA_GLOBAL_DRAM_SIZE, |
19 |
| - DLA_LOCAL_DRAM_SIZE, |
20 |
| - DLA_SRAM_SIZE, |
21 |
| - DRYRUN, |
22 |
| - ENABLE_EXPERIMENTAL_DECOMPOSITIONS, |
23 |
| - ENGINE_CAPABILITY, |
24 |
| - HARDWARE_COMPATIBLE, |
25 |
| - MAX_AUX_STREAMS, |
26 |
| - MIN_BLOCK_SIZE, |
27 |
| - NUM_AVG_TIMING_ITERS, |
28 |
| - OPTIMIZATION_LEVEL, |
29 |
| - PASS_THROUGH_BUILD_FAILURES, |
30 |
| - PRECISION, |
31 |
| - REFIT, |
32 |
| - REQUIRE_FULL_COMPILATION, |
33 |
| - SPARSE_WEIGHTS, |
34 |
| - TRUNCATE_LONG_AND_DOUBLE, |
35 |
| - USE_FAST_PARTITIONER, |
36 |
| - USE_PYTHON_RUNTIME, |
37 |
| - VERSION_COMPATIBLE, |
38 |
| - WORKSPACE_SIZE, |
39 |
| -) |
| 13 | +from torch_tensorrt.dynamo import _defaults, partitioning |
40 | 14 | from torch_tensorrt.dynamo._DryRunTracker import (
|
41 | 15 | DryRunTracker,
|
42 | 16 | PerSubgraphData,
|
@@ -89,15 +63,15 @@ def compile(
|
89 | 63 | min_block_size: int = _defaults.MIN_BLOCK_SIZE,
|
90 | 64 | torch_executed_ops: Optional[Collection[Target]] = None,
|
91 | 65 | torch_executed_modules: Optional[List[str]] = None,
|
92 |
| - pass_through_build_failures: bool = PASS_THROUGH_BUILD_FAILURES, |
93 |
| - max_aux_streams: Optional[int] = MAX_AUX_STREAMS, |
94 |
| - version_compatible: bool = VERSION_COMPATIBLE, |
95 |
| - optimization_level: Optional[int] = OPTIMIZATION_LEVEL, |
96 |
| - use_python_runtime: bool = USE_PYTHON_RUNTIME, |
97 |
| - use_fast_partitioner: bool = USE_FAST_PARTITIONER, |
98 |
| - enable_experimental_decompositions: bool = ENABLE_EXPERIMENTAL_DECOMPOSITIONS, |
99 |
| - dryrun: bool = DRYRUN, |
100 |
| - hardware_compatible: bool = HARDWARE_COMPATIBLE, |
| 66 | + pass_through_build_failures: bool = _defaults.PASS_THROUGH_BUILD_FAILURES, |
| 67 | + max_aux_streams: Optional[int] = _defaults.MAX_AUX_STREAMS, |
| 68 | + version_compatible: bool = _defaults.VERSION_COMPATIBLE, |
| 69 | + optimization_level: Optional[int] = _defaults.OPTIMIZATION_LEVEL, |
| 70 | + use_python_runtime: bool = _defaults.USE_PYTHON_RUNTIME, |
| 71 | + use_fast_partitioner: bool = _defaults.USE_FAST_PARTITIONER, |
| 72 | + enable_experimental_decompositions: bool = _defaults.ENABLE_EXPERIMENTAL_DECOMPOSITIONS, |
| 73 | + dryrun: bool = _defaults.DRYRUN, |
| 74 | + hardware_compatible: bool = _defaults.HARDWARE_COMPATIBLE, |
101 | 75 | **kwargs: Any,
|
102 | 76 | ) -> torch.fx.GraphModule:
|
103 | 77 | """Compile a TorchScript module for NVIDIA GPUs using TensorRT
|
|
0 commit comments