Skip to content

Commit

Permalink
drop valueerror as this was from when 4bit required gptq
Browse files Browse the repository at this point in the history
  • Loading branch information
winglian committed Aug 22, 2024
1 parent fefa95e commit 5b15816
Showing 1 changed file with 0 additions and 6 deletions.
6 changes: 0 additions & 6 deletions src/axolotl/utils/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,6 @@ def check_model_config(cfg: DictDefault, model_config: Union[AutoConfig, DictDef
"Please make sure to point to a GPTQ model."
)

if not cfg.gptq and quant_config_exists and not cfg.load_in_4bit:
raise ValueError(
"model_config.quantization_config is set but `gptq` flag is not. "
"Please use the `gptq` flag to train quantized model or point to a non-quantized model."
)

lora_modules_to_save = get_linear_embedding_layers(model_config.model_type)
if (
cfg.adapter
Expand Down

0 comments on commit 5b15816

Please sign in to comment.