Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactoring reinforce #23

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
2 changes: 1 addition & 1 deletion configs/exp/train/dpo/dpo.json
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@
"bos_token": "<|begin_of_text|>",
"eos_token": "<|end_of_text|>"
},
"trainer_settings": {
"trainer_settings": {
"evaluation_strategy": "steps",
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 1,
Expand Down
214 changes: 214 additions & 0 deletions configs/exp/train/reinforce/reinforce.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,214 @@
{
"train_dataset_settings": {
"sources": [
{
"name": "train_hh",
"records_path": "/from_s3/datasets/train_chat_filtered.jsonl",
"sample_rate": 1.0
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 1024,
"only_answer_loss": true,
"keep_end": true,
"random_cut": false,
"check_max_len_enforced": true
},
"val_dataset_settings": {
"sources": [
{
"name": "val_hh",
"records_path": "/from_s3/datasets/val_chat_filtered.jsonl",
"sample_rate": 0.2
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 1024,
"only_answer_loss": true,
"keep_end": true,
"random_cut": false,
"check_max_len_enforced": true
},

"cherry_pick_settings": {
"generator_transformers_settings": {
"num_beams": 1,
"do_sample": true,
"num_return_sequences": 3,
"max_new_tokens": 1024,
"temperature": 0.7,
"top_p": 1.0,
"top_k": 0,
"repetition_penalty": 1.0
},
"custom_generation_settings": {
"generation_eos_token": "<|eot_id|>",
"remove_prompt": true,
"skip_special_tokens": true
},
"dataset_settings": {
"sources": [
{
"name": "cp_hh",
"records_path": "/from_s3/datasets/val_chat_filtered.jsonl",
"num_samples": 10
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 1024,
"random_cut": false,
"keep_end": true,
"only_answer_loss": true,
"check_max_len_enforced": true
},
"metric_settings": [
{
"type": "length",
"parameters": {
"need_average": [true, false]
}
},
{
"type": "self_bleu",
"parameters": {
"need_average": [true, false]
}
},
{
"type": "dist_n",
"parameters": {
"need_average": [true, false]
}
},
{
"type": "diversity",
"parameters": {
"need_average": [true, false]
}
},
{
"type": "perplexity",
"parameters": {
"need_average": [true, false]
}
}
]
},
"reward_model_settings": {
"model_path": "/from_s3/models/rm",
"model_type": "llama_reward_value",
"model_kwargs": {
"num_labels": 1,
"attn_implementation": "flash_attention_2"
},
"transformers_settings": {},
"check_checkpoint_is_compatible": true
},
"model_settings": {
"model_path": "/from_s3/models/policy/",
"model_type": "causal",
"transformers_settings": {},
"model_kwargs": {
"attn_implementation": "flash_attention_2"
},
"check_checkpoint_is_compatible": true
},
"tokenizer_settings": {
"use_fast": true,
"padding_side": "left"
},
"peft_settings": {
"inference_mode": false,
"r": 64,
"lora_alpha": 64,
"lora_dropout": 0.0,
"use_rslora": false,
"init_lora_weights": true,
"task_type": "CAUSAL_LM"
},
"trainer_settings": {
"evaluation_strategy": "steps",
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"gradient_accumulation_steps": 10,
"adam_beta1": 0.9,
"adam_beta2": 0.95,
"adam_epsilon": 1e-5,
"eval_steps": 50,
"save_steps": 100,
"save_strategy": "steps",
"load_best_model_at_end": false,
"logging_steps": 1,
"learning_rate": 1e-5,
"max_steps": 1001,
"lr_scheduler_type": "constant_with_warmup",
"warmup_ratio": 0.03,
"fp16": false,
"bf16": true,
"optim": "adamw_torch",
"weight_decay": 0.0,
"max_grad_norm": 2,
"save_total_limit": 11,
"dataloader_num_workers": 12,
"deepspeed": "configs/exp/deepspeed/ds_config_stage_2_reinforce.json",

"max_response_length": 1024,
"stop_token": "<|eot_id|>",
"temperature": 0.7,
"non_eos_penalty": true,
"penalty_reward_value": -1,
"clip_rewards_min": -1e8,
"clip_rewards_max": 1e8,
"whiten_rewards": false,
"kl_coef": 0.05,
"risk_coef": 0.00,
"mean_baseline_coef": 0.95,

"init_kl_coef": null,
"target_kl": null,
"adaptive_kl_k": null,
"adaptive_kl_clip_value": null,
"min_kl_coef": null,
"max_kl_coef": null,

"num_samples_for_reward_stats": 1000

},
"wandb_settings": {
"project_name": "...",
"group": "hh-llama3-8b",
"job_type": "reinforce",
"run_name": "rvm-kl_0.05",
"entity": "...",
"tags": ["reinforce", "llama3-8b", "hh", "rvm"]
},
"log_path": "train_output/hh/llama3-8b/reinforce/rvm-kl_0.05-0.01/"
}

175 changes: 175 additions & 0 deletions tests/fixtures/configs/train/reinforce/base.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
{
"train_dataset_settings": {
"sources": [
{
"name": "train_chat",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"sample_rate": 1.0
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 2000,
"only_answer_loss": true
},
"val_dataset_settings": {
"sources": [
{
"name": "val_chat",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"sample_rate": 1.0
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 2000,
"only_answer_loss": true
},
"cherry_pick_settings": {
"generator_transformers_settings": {
"num_beams": 1,
"do_sample": false,
"stop_strings": "</RS>",
"max_new_tokens": 8
},
"custom_generation_settings": {
"skip_special_tokens": false
},
"dataset_settings": {
"sources": [
{
"name": "chat_test",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"num_samples": 2
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 150,
"only_answer_loss": true
},
"metric_settings": [
{
"type": "length",
"parameters": {
"need_average": [
true
]
}
},
{
"type": "kl",
"parameters": {
"need_average": [
true
],
"ref_logits_type": "sft"
}
},
{
"type": "kl",
"parameters": {
"need_average": [
true
],
"ref_logits_type": "reference"
}
}
]
},
"reward_model_settings": {
"model_path": "tests/fixtures/models/llama2_tiny",
"model_type": "seq_cls",
"model_kwargs": {
"num_labels": 1
},
"transformers_settings": {}
},
"model_settings": {
"model_path": "tests/fixtures/models/llama2_tiny",
"model_type": "causal",
"transformers_settings": {},
"embeddings_initialization_strategy": {
"<|start_header_id|>": "<s>",
"<|end_header_id|>": "</s>",
"<|eot_id|>": "</s>"
}
},
"tokenizer_settings": {
"tokenizer_kwargs": {
"padding_side": "left"
}
},
"trainer_settings": {
"evaluation_strategy": "steps",
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"gradient_accumulation_steps": 10,
"adam_beta1": 0.9,
"adam_beta2": 0.95,
"adam_epsilon": 0.00001,
"eval_steps": 50,
"save_steps": 100,
"save_strategy": "steps",
"load_best_model_at_end": false,
"logging_steps": 1,
"learning_rate": 0.00001,
"max_steps": 1001,
"lr_scheduler_type": "constant_with_warmup",
"warmup_ratio": 0.03,
"fp16": false,
"bf16": false,
"optim": "adamw_torch",
"weight_decay": 0.0,
"max_grad_norm": 2,
"save_total_limit": 11,
"dataloader_num_workers": 12,
"stop_token": "<|eot_id|>",
"temperature": 0.7,
"non_eos_penalty": true,
"penalty_reward_value": -1,
"clip_rewards_min": -1e+8,
"clip_rewards_max": 1e+8,
"whiten_rewards": false,
"kl_coef": 0.05,
"mean_baseline_coef": 0.95,
"num_samples_for_reward_stats": 1000,
"no_cuda": true
},
"special_tokens_settings": {
"bos_token": "<s>",
"eos_token": "</s>",
"pad_token": "<unk>"
},
"logging_settings": {
"project_name": "alignment",
"run_name": "sft",
"entity": "turbo-alignment"
},
"seed": 0,
"log_path": "train_output"
}
Loading
Loading