From b3cd7f0722f82b3ac3134aa6d9f7cf826fd57b08 Mon Sep 17 00:00:00 2001 From: xin3he Date: Fri, 14 Jun 2024 15:51:58 +0800 Subject: [PATCH] change model.prepared attribution to model.is prepared Signed-off-by: xin3he --- neural_compressor/torch/quantization/quantize.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_compressor/torch/quantization/quantize.py b/neural_compressor/torch/quantization/quantize.py index 77bfaeb1d52..ff8298dad88 100644 --- a/neural_compressor/torch/quantization/quantize.py +++ b/neural_compressor/torch/quantization/quantize.py @@ -178,12 +178,12 @@ def convert( q_model = model if inplace else copy.deepcopy(model) # TODO: Optimize the check for prepared flag after adding HQT FP8 Quant - assert getattr(model, "prepared", False), "Please run prepare function before convert." + assert getattr(model, "is_prepared", False), "Please run prepare function before convert." - if getattr(model, "prepared", False): + if getattr(model, "is_prepared", False): if quant_config is None: quant_config = model.quant_config - example_inputs = model.example_inputs if getattr(model, "prepared", False) else None + example_inputs = model.example_inputs if getattr(model, "is_prepared", False) else None registered_configs = config_registry.get_cls_configs() if isinstance(quant_config, dict):