diff --git a/src/transformers/quantizers/quantizer_hqq.py b/src/transformers/quantizers/quantizer_hqq.py index 41cc32c143c2f5..775fea8f4901e6 100755 --- a/src/transformers/quantizers/quantizer_hqq.py +++ b/src/transformers/quantizers/quantizer_hqq.py @@ -289,7 +289,7 @@ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs return model def is_serializable(self, safe_serialization=None): - return False + return True @property def is_trainable(self) -> bool: diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 9f56157df9e609..8be0bb672e51b8 100755 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -221,7 +221,9 @@ def __init__( for deprecated_key in ["quant_zero", "quant_scale", "offload_meta"]: if deprecated_key in kwargs: - logger.info(deprecated_key + " is deprecated. This parameter will be ignored in quantization settings.") + logger.info( + deprecated_key + " is deprecated. This parameter will be ignored in quantization settings." + ) if axis is None: axis = 1