Skip to content

Commit

Permalink
change model.prepared attribution to model.is prepared
Browse files Browse the repository at this point in the history
Signed-off-by: xin3he <[email protected]>
  • Loading branch information
xin3he committed Jun 14, 2024
1 parent f579eec commit b3cd7f0
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions neural_compressor/torch/quantization/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,12 +178,12 @@ def convert(
q_model = model if inplace else copy.deepcopy(model)

# TODO: Optimize the check for prepared flag after adding HQT FP8 Quant
assert getattr(model, "prepared", False), "Please run prepare function before convert."
assert getattr(model, "is_prepared", False), "Please run prepare function before convert."

if getattr(model, "prepared", False):
if getattr(model, "is_prepared", False):
if quant_config is None:
quant_config = model.quant_config
example_inputs = model.example_inputs if getattr(model, "prepared", False) else None
example_inputs = model.example_inputs if getattr(model, "is_prepared", False) else None

registered_configs = config_registry.get_cls_configs()
if isinstance(quant_config, dict):
Expand Down

0 comments on commit b3cd7f0

Please sign in to comment.