Skip to content

Commit 794b276

Browse files
authored
migrate export to 2x and 3x from deprecated (#1845)
Signed-off-by: xin3he <[email protected]>
1 parent 0eced14 commit 794b276

File tree

15 files changed

+657
-8
lines changed

15 files changed

+657
-8
lines changed

neural_compressor/experimental/export/qlinear2qdq.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1515
# See the License for the specific language governing permissions and
1616
# limitations under the License.
17+
18+
# pragma: no cover
1719
"""Helper functions to export onnx model from QLinearops to QDQ."""
1820
from deprecated import deprecated
1921

neural_compressor/experimental/export/tf2onnx.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1515
# See the License for the specific language governing permissions and
1616
# limitations under the License.
17+
18+
# pragma: no cover
1719
"""Helper functions to export model from TensorFlow to ONNX."""
1820

1921
import re

neural_compressor/experimental/export/torch2onnx.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1515
# See the License for the specific language governing permissions and
1616
# limitations under the License.
17+
18+
# pragma: no cover
1719
"""Helper functions to export model from PyTorch/TensorFlow to ONNX."""
1820

1921
import os

neural_compressor/model/onnx_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -827,7 +827,7 @@ def find_ffn_matmul(self, attention_index, attention_matmul_list, block_len):
827827
def export(self, save_path, conf):
828828
"""Export Qlinear to QDQ model."""
829829
from neural_compressor.config import ONNXQlinear2QDQConfig
830-
from neural_compressor.experimental.export import onnx_qlinear_to_qdq
830+
from neural_compressor.utils.export import onnx_qlinear_to_qdq
831831

832832
if isinstance(conf, ONNXQlinear2QDQConfig):
833833
add_nodes, remove_nodes, inits = onnx_qlinear_to_qdq(self._model, self._input_name_to_nodes)

neural_compressor/model/tensorflow_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1009,7 +1009,7 @@ def export(self, save_path, conf):
10091009
+ "we reset opset_version={} here".format(conf.opset_version)
10101010
)
10111011

1012-
from neural_compressor.experimental.export import tf_to_fp32_onnx, tf_to_int8_onnx
1012+
from neural_compressor.utils.export import tf_to_fp32_onnx, tf_to_int8_onnx
10131013

10141014
inputs_as_nchw = conf.kwargs.get("inputs_as_nchw", None)
10151015
if conf.dtype == "int8":

neural_compressor/model/torch_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,7 @@ def export(
418418
"but the torch version found is {}".format(Version("1.12.0"), version)
419419
)
420420

421-
from neural_compressor.experimental.export import torch_to_fp32_onnx, torch_to_int8_onnx
421+
from neural_compressor.utils.export import torch_to_fp32_onnx, torch_to_int8_onnx
422422

423423
if conf.dtype == "int8":
424424
torch_to_int8_onnx(

neural_compressor/onnxrt/utils/onnx_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,7 @@ def find_ffn_matmul(self, attention_index, attention_matmul_list, block_len):
648648
def export(self, save_path, conf):
649649
"""Export Qlinear to QDQ model."""
650650
from neural_compressor.config import ONNXQlinear2QDQConfig
651-
from neural_compressor.experimental.export import onnx_qlinear_to_qdq
651+
from neural_compressor.utils.export import onnx_qlinear_to_qdq
652652

653653
if isinstance(conf, ONNXQlinear2QDQConfig):
654654
if len(self._input_name_to_nodes) == 0:

neural_compressor/torch/export/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from neural_compressor.torch.export._export import export_model_for_pt2e_quant, export
15+
from neural_compressor.torch.export.pt2e_export import export_model_for_pt2e_quant, export
File renamed without changes.

neural_compressor/torch/utils/utility.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,7 @@
1818
import torch
1919
from typing_extensions import TypeAlias
2020

21-
from neural_compressor.common import logger
22-
from neural_compressor.common.utils import Mode
21+
from neural_compressor.common.utils import LazyImport, Mode, logger
2322

2423
OP_NAME_AND_TYPE_TUPLE_TYPE: TypeAlias = Tuple[str, Union[torch.nn.Module, Callable]]
2524

0 commit comments

Comments
 (0)