Skip to content

Commit 46b8796

Browse files
authored
Add back mistakenly deleted QAT BC import test (#1417)
Summary: The unused imports in this test were mistakenly deleted in #1359. This commit adds them back. Test Plan: python test/quantization/test_qat.py
1 parent a555734 commit 46b8796

File tree

1 file changed

+47
-0
lines changed

1 file changed

+47
-0
lines changed

test/quantization/test_qat.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1108,6 +1108,53 @@ def test_qat_prototype_bc(self):
11081108
Just to make sure we can import all the old prototype paths.
11091109
We will remove this test in the near future when we actually break BC.
11101110
"""
1111+
from torchao.quantization.prototype.qat import ( # noqa: F401, F811, I001
1112+
disable_4w_fake_quant,
1113+
disable_8da4w_fake_quant,
1114+
enable_4w_fake_quant,
1115+
enable_8da4w_fake_quant,
1116+
ComposableQATQuantizer,
1117+
Int8DynActInt4WeightQATLinear,
1118+
Int4WeightOnlyEmbeddingQATQuantizer,
1119+
Int4WeightOnlyQATQuantizer,
1120+
Int8DynActInt4WeightQATQuantizer,
1121+
)
1122+
from torchao.quantization.prototype.qat._module_swap_api import ( # noqa: F401, F811
1123+
disable_4w_fake_quant_module_swap,
1124+
enable_4w_fake_quant_module_swap,
1125+
disable_8da4w_fake_quant_module_swap,
1126+
enable_8da4w_fake_quant_module_swap,
1127+
Int4WeightOnlyQATQuantizerModuleSwap,
1128+
Int8DynActInt4WeightQATQuantizerModuleSwap,
1129+
)
1130+
from torchao.quantization.prototype.qat.affine_fake_quantized_tensor import ( # noqa: F401, F811
1131+
AffineFakeQuantizedTensor,
1132+
to_affine_fake_quantized,
1133+
)
1134+
from torchao.quantization.prototype.qat.api import ( # noqa: F401, F811
1135+
ComposableQATQuantizer,
1136+
FakeQuantizeConfig,
1137+
)
1138+
from torchao.quantization.prototype.qat.embedding import ( # noqa: F401, F811
1139+
FakeQuantizedEmbedding,
1140+
Int4WeightOnlyEmbeddingQATQuantizer,
1141+
Int4WeightOnlyEmbedding,
1142+
Int4WeightOnlyQATEmbedding,
1143+
)
1144+
from torchao.quantization.prototype.qat.fake_quantizer import ( # noqa: F401, F811
1145+
FakeQuantizer,
1146+
)
1147+
from torchao.quantization.prototype.qat.linear import ( # noqa: F401, F811
1148+
disable_4w_fake_quant,
1149+
disable_8da4w_fake_quant,
1150+
enable_4w_fake_quant,
1151+
enable_8da4w_fake_quant,
1152+
FakeQuantizedLinear,
1153+
Int4WeightOnlyQATLinear,
1154+
Int4WeightOnlyQATQuantizer,
1155+
Int8DynActInt4WeightQATLinear,
1156+
Int8DynActInt4WeightQATQuantizer,
1157+
)
11111158

11121159

11131160
if __name__ == "__main__":

0 commit comments

Comments
 (0)