We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 79703b2 commit d07da41Copy full SHA for d07da41
torchvision/models/quantization/mobilenetv3.py
@@ -140,6 +140,10 @@ def _mobilenet_v3_model(
140
_replace_relu(model)
141
142
if quantize:
143
+ # Instead of quantizing the model and then loading the quantized weights we take a different approach.
144
+ # We prepare the QAT model, load the QAT weights from training and then convert it.
145
+ # This is done to avoid extremely low accuracies observed on the specific model. This is rather a workaround
146
+ # for an unresolved bug on the eager quantization API detailed at: https://github.com/pytorch/vision/issues/5890
147
model.fuse_model(is_qat=True)
148
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(backend)
149
torch.ao.quantization.prepare_qat(model, inplace=True)
0 commit comments