Skip to content

Commit ba3abac

Browse files
authored
Fix comparison of module_type and MulLinear (#1671)
Signed-off-by: Kaihui-intel <[email protected]>
1 parent 047560f commit ba3abac

File tree

1 file changed

+1
-2
lines changed

1 file changed

+1
-2
lines changed

neural_compressor/utils/pytorch.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,6 @@ def load_weight_only(checkpoint_dir, model, layer_wise=False):
210210
Returns:
211211
(object): quantized model
212212
"""
213-
import neural_compressor # for eval(config['module_type'])
214213
from neural_compressor.adaptor.torch_utils.model_wrapper import MulLinear
215214

216215
weights_file = os.path.join(os.path.abspath(os.path.expanduser(checkpoint_dir)), "best_model.pt")
@@ -221,7 +220,7 @@ def load_weight_only(checkpoint_dir, model, layer_wise=False):
221220
for op_name, config in weight_only_config.items():
222221
if config["dtype"] == "fp32":
223222
continue
224-
if eval(config["module_type"]) == MulLinear:
223+
if config["module_type"] == MulLinear.__module__ + "." + MulLinear.__name__:
225224
# op should be repleced by MulLinear
226225
module = util.fetch_module(model, op_name)
227226
new_module = MulLinear(module)

0 commit comments

Comments
 (0)