Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implements SPPO Alignment Algoritm #1735

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 74 additions & 0 deletions examples/llama-3/sppo-qlora-8b.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
base_model: meta-llama/Meta-Llama-3-8B-Instruct
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer

load_in_8bit: false
load_in_4bit: true
strict: false

rl: sppo
rl_beta: 0.1
datasets:
- path: orion-research/Aura-SPPO-Iter1_score
type: chatml.sppo_argilla_chat
dataset_prepared_path:
val_set_size: 0

output_dir: ./outputs/out/Meta-Llama-3-8B-Instruct-SPPO-Iter1
dataset_prepared_path: last_run_prepared

adapter: qlora
lora_model_dir:

sequence_len: 1024
sample_packing: false
pad_to_sequence_len: true

lora_r: 32
lora_alpha: 16
lora_dropout: 0.05
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_dora: true

wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:

gradient_accumulation_steps: 8
micro_batch_size: 1
num_epochs: 1
optimizer: paged_adamw_8bit
lr_scheduler: cosine
learning_rate: 2.0e-4

train_on_inputs: false
group_by_length: false
bf16: auto
fp16:
tf32: false

gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true

save_safetensors: true
warmup_steps: 50
evals_per_epoch: 1
eval_max_new_tokens: 128
eval_table_size:
save_steps: 100
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: "<|end_of_text|>"
76 changes: 75 additions & 1 deletion src/axolotl/core/trainer_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
)
from transformers.trainer_utils import seed_worker
from transformers.utils import is_sagemaker_mp_enabled
from axolotl.custom.trainers.SPPOTrainer import SPPOTrainer
from trl import DPOConfig, DPOTrainer, KTOConfig, KTOTrainer, ORPOConfig, ORPOTrainer
from trl.trainer.utils import pad_to_length

Expand Down Expand Up @@ -244,6 +245,11 @@ class AxolotlDPOConfig(AxolotlTrainingMixins, DPOConfig):
DPO config for DPO training
"""

@dataclass
class AxolotlSPPOConfig(AxolotlTrainingMixins, DPOConfig):
"""
DPO config for DPO training
"""

@dataclass
class AxolotlORPOConfig(AxolotlTrainingMixins, ORPOConfig):
Expand Down Expand Up @@ -897,6 +903,65 @@ def tokenize_row(
res[key] = res[key][1:]
return res

class AxolotlSPPOTrainer(SPPOTrainer):
"""
Extend the base SPPOTrainer for axolotl helpers
"""

tag_names = ["axolotl", "sppo"]

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.optimizer = None

def create_optimizer(self):
if self.args.loraplus_lr_ratio is None:
return super().create_optimizer()

opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.optimizer is None: # pylint: disable=access-member-before-definition
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(
self.args,
opt_model,
)

loraplus_lr_ratio = getattr(self.args, "loraplus_lr_ratio", None)
if loraplus_lr_ratio:
print("Using lora+")
loraplus_lr_embedding = getattr(self.args, "loraplus_lr_embedding", None)
self.optimizer = create_loraplus_optimizer( # pylint: disable=attribute-defined-outside-init
opt_model,
optimizer_cls,
optimizer_kwargs,
loraplus_lr_ratio,
loraplus_lr_embedding,
)

if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer( # pylint: disable=attribute-defined-outside-init
self.optimizer
)

return self.optimizer
Comment on lines +917 to +945
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if it might be worth extracting this as a AxolotlCreateOptimizerMixin and then including it in both here and the AxolotlTrainer


@wraps(DPOTrainer.push_to_hub)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is DPOTrainer correct for this?

def push_to_hub(self, *args, **kwargs) -> str:
"""
Overwrite the `push_to_hub` method in order to force-add the tags when pushing the
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
"""
kwargs = _sanitize_kwargs_for_tagging(tag_names=self.tag_names, kwargs=kwargs)

return super().push_to_hub(*args, **kwargs)

def tokenize_row(
self, feature, model: Optional[Union[PreTrainedModel, torch.nn.Module]] = None
) -> Dict:
res = super().tokenize_row(feature, model=model)
if self.tokenizer.bos_token_id is None and res["prompt_input_ids"][0] is None:
for key in res.keys():
res[key] = res[key][1:]
return res
Comment on lines +948 to +964
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

more duplicated code that makes me think we should be extracting this into a Mixin.


class AxolotlORPOTrainer(ORPOTrainer):
"""
Expand Down Expand Up @@ -1521,7 +1586,7 @@ def build_collator(

class HFRLTrainerBuilder(TrainerBuilderBase):
"""
Trainer factory class for DPO Trainer
Trainer factory class for DPO/SPPO Trainer
"""

def get_callbacks(self):
Expand Down Expand Up @@ -1690,6 +1755,15 @@ def build(self, total_num_steps):
dpo_trainer_kwargs["generate_during_eval"] = True
if self.cfg.rl == "dpo":
dpo_trainer_kwargs["dataset_num_proc"] = self.cfg.dataset_processes
if self.cfg.rl in ["sppo"]:
trainer_cls = AxolotlSPPOTrainer
dpo_trainer_kwargs["beta"] = self.cfg.rl_beta or 0.1
trainer_cls_args = [self.model, self.model_ref]
dpo_trainer_kwargs["max_length"] = self.cfg.sequence_len
dpo_trainer_kwargs["max_target_length"] = None
dpo_trainer_kwargs["max_prompt_length"] = self.cfg.sequence_len
dpo_trainer_kwargs["generate_during_eval"] = True
dpo_trainer_kwargs["dataset_num_proc"] = self.cfg.dataset_processes
elif self.cfg.rl == "orpo":
trainer_cls = AxolotlORPOTrainer
trainer_cls_args = [self.model]
Expand Down
Loading