Skip to content

Commit

Permalink
Release v0.1.6
Browse files Browse the repository at this point in the history
  • Loading branch information
hiyouga committed Aug 11, 2023
1 parent 156710a commit a48cb0d
Show file tree
Hide file tree
Showing 18 changed files with 127 additions and 41 deletions.
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
| [InternLM](https://github.com/InternLM/InternLM) | 7B | q_proj,v_proj | intern |
| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B | c_attn | chatml |
| [XVERSE](https://github.com/xverse-ai/XVERSE-13B) | 13B | q_proj,v_proj | - |
| [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) | 6B | query_key_value | chatglm2 |

- **Default module** is used for the `--lora_target` argument. Please use `python src/train_bash.py -h` to see all available options.
- For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the corresponding template for the "chat" models.
Expand Down Expand Up @@ -408,6 +409,8 @@ Please follow the model licenses to use the corresponding model weights:
- [Baichuan](https://huggingface.co/baichuan-inc/baichuan-7B/resolve/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf)
- [InternLM](https://github.com/InternLM/InternLM#open-source-license)
- [Qwen](https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/LICENSE)
- [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf)
- [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B/blob/main/MODEL_LICENSE)

## Citation

Expand Down
2 changes: 1 addition & 1 deletion src/llmtuner/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@
from llmtuner.webui import create_ui, create_web_demo


__version__ = "0.1.5"
__version__ = "0.1.6"
6 changes: 4 additions & 2 deletions src/llmtuner/dsets/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,11 +93,13 @@ def get_dataset(
dataset = dataset.rename_column(getattr(dataset_attr, column_name), column_name)

if dataset_attr.source_prefix: # add prefix
features = None
if data_args.streaming:
features = dataset.features
features["prefix"] = Value(dtype="string", id=None)
dataset = dataset.map(lambda _: {"prefix": dataset_attr.source_prefix}, features=features)
dataset = dataset.map(lambda _: {"prefix": dataset_attr.source_prefix}, features=features)
else:
prefix_data = [dataset_attr.source_prefix] * len(dataset)
dataset = dataset.add_column("prefix", prefix_data)

all_datasets.append(dataset)

Expand Down
3 changes: 2 additions & 1 deletion src/llmtuner/dsets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def split_dataset(
dataset = dataset.shuffle(buffer_size=data_args.buffer_size, seed=training_args.seed)
return {"train_dataset": train_set, "eval_dataset": val_set}
else:
dataset = dataset.train_test_split(test_size=data_args.val_size, seed=training_args.seed)
val_size = int(data_args.val_size) if data_args.val_size > 1 else data_args.val_size
dataset = dataset.train_test_split(test_size=val_size, seed=training_args.seed)
return {"train_dataset": dataset["train"], "eval_dataset": dataset["test"]}
else:
if data_args.streaming:
Expand Down
8 changes: 6 additions & 2 deletions src/llmtuner/extras/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@
"InternLM-7B": "internlm/internlm-7b",
"InternLM-7B-Chat": "internlm/internlm-chat-7b",
"Qwen-7B": "Qwen/Qwen-7B",
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat"
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
"XVERSE-13B": "xverse/XVERSE-13B",
"ChatGLM2-6B": "THUDM/chatglm2-6b"
}

DEFAULT_MODULE = {
Expand All @@ -48,5 +50,7 @@
"Falcon": "query_key_value",
"Baichuan": "W_pack",
"InternLM": "q_proj,v_proj",
"Qwen": "c_attn"
"Qwen": "c_attn",
"XVERSE": "q_proj,v_proj",
"ChatGLM2": "query_key_value"
}
19 changes: 18 additions & 1 deletion src/llmtuner/extras/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def register_template(
stop_words: List[str],
use_history: bool
) -> None:
template_class = Llama2Template if name == "llama2" else Template
template_class = Llama2Template if "llama2" in name else Template
templates[name] = template_class(
prefix=prefix,
prompt=prompt,
Expand Down Expand Up @@ -272,6 +272,23 @@ def get_template_and_fix_tokenizer(
)


r"""
Supports: https://github.com/ymcui/Chinese-LLaMA-Alpaca-2
"""
register_template(
name="llama2_zh",
prefix=[
"<<SYS>>\nYou are a helpful assistant. 你是一个乐于助人的助手。\n<</SYS>>\n\n"
],
prompt=[
"[INST] {{query}} [/INST] "
],
sep=[],
stop_words=[],
use_history=True
)


r"""
Supports: https://huggingface.co/tatsu-lab/alpaca-7b-wdiff
https://github.com/ymcui/Chinese-LLaMA-Alpaca
Expand Down
4 changes: 4 additions & 0 deletions src/llmtuner/hparams/finetuning_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ class FinetuningArguments:
Qwen choices: [\"c_attn\", \"attn.c_proj\", \"w1\", \"w2\", \"mlp.c_proj\"], \
LLaMA-2, InternLM, XVERSE choices: the same as LLaMA."}
)
resume_lora_training: Optional[bool] = field(
default=True,
metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."}
)
dpo_beta: Optional[float] = field(
default=0.1,
metadata={"help": "The beta parameter for the DPO loss."}
Expand Down
4 changes: 0 additions & 4 deletions src/llmtuner/hparams/model_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,6 @@ class ModelArguments:
default=None,
metadata={"help": "Path to the directory containing the checkpoints of the reward model."}
)
resume_lora_training: Optional[bool] = field(
default=True,
metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."}
)
plot_loss: Optional[bool] = field(
default=False,
metadata={"help": "Whether to plot the training loss after fine-tuning or not."}
Expand Down
2 changes: 1 addition & 1 deletion src/llmtuner/tuner/core/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def init_adapter(
assert os.path.exists(os.path.join(model_args.checkpoint_dir[0], CONFIG_NAME)), \
"The given checkpoint may be not a LoRA checkpoint, please specify `--finetuning_type full/freeze` instead."

if (is_trainable and model_args.resume_lora_training) or (not is_mergeable): # continually train on the lora weights
if (is_trainable and finetuning_args.resume_lora_training) or (not is_mergeable): # continually fine-tuning
checkpoints_to_merge, latest_checkpoint = model_args.checkpoint_dir[:-1], model_args.checkpoint_dir[-1]
else:
checkpoints_to_merge = model_args.checkpoint_dir
Expand Down
2 changes: 1 addition & 1 deletion src/llmtuner/tuner/tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["TrainerCallback"]] = None):
model_args, data_args, training_args, finetuning_args, generating_args, general_args = get_train_args(args)
callbacks = [LogCallback()] if callbacks is None else callbacks + [LogCallback()]
callbacks = [LogCallback()] if callbacks is None else callbacks

if general_args.stage == "pt":
run_pt(model_args, data_args, training_args, finetuning_args, callbacks)
Expand Down
2 changes: 1 addition & 1 deletion src/llmtuner/webui/components/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ def create_preview_box() -> Tuple["Block", "Component", "Component", "Component"

close_btn = gr.Button()

close_btn.click(lambda: gr.update(visible=False), outputs=[preview_box])
close_btn.click(lambda: gr.update(visible=False), outputs=[preview_box], queue=False)

return preview_box, preview_count, preview_samples, close_btn
15 changes: 13 additions & 2 deletions src/llmtuner/webui/components/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,12 @@ def create_eval_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict

dataset_dir.change(list_dataset, [dataset_dir], [dataset])
dataset.change(can_preview, [dataset_dir, dataset], [preview_btn])
preview_btn.click(get_preview, [dataset_dir, dataset], [preview_count, preview_samples, preview_box])
preview_btn.click(
get_preview,
[dataset_dir, dataset],
[preview_count, preview_samples, preview_box],
queue=False
)

with gr.Row():
max_source_length = gr.Slider(value=512, minimum=4, maximum=4096, step=1)
Expand All @@ -33,6 +38,9 @@ def create_eval_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict
start_btn = gr.Button()
stop_btn = gr.Button()

with gr.Row():
process_bar = gr.Slider(visible=False, interactive=False)

with gr.Box():
output_box = gr.Markdown()

Expand All @@ -54,7 +62,10 @@ def create_eval_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict
batch_size,
predict
],
[output_box]
[
output_box,
process_bar
]
)
stop_btn.click(runner.set_abort, queue=False)

Expand Down
28 changes: 23 additions & 5 deletions src/llmtuner/webui/components/sft.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,12 @@ def create_sft_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict[

dataset_dir.change(list_dataset, [dataset_dir], [dataset])
dataset.change(can_preview, [dataset_dir, dataset], [preview_btn])
preview_btn.click(get_preview, [dataset_dir, dataset], [preview_count, preview_samples, preview_box])
preview_btn.click(
get_preview,
[dataset_dir, dataset],
[preview_count, preview_samples, preview_box],
queue=False
)

with gr.Row():
max_source_length = gr.Slider(value=512, minimum=4, maximum=4096, step=1)
Expand All @@ -46,20 +51,26 @@ def create_sft_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict[
save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10)
warmup_steps = gr.Slider(value=0, minimum=0, maximum=5000, step=1)
compute_type = gr.Radio(choices=["fp16", "bf16"], value="fp16")
padding_side = gr.Radio(choices=["left", "right"], value="left")

with gr.Accordion(label="LoRA config", open=False) as lora_tab:
with gr.Row():
lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1, scale=1)
lora_dropout = gr.Slider(value=0, minimum=0, maximum=1, step=0.01, scale=1)
lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1)
lora_target = gr.Textbox(scale=2)
resume_lora_training = gr.Checkbox(value=True, scale=1)

with gr.Row():
start_btn = gr.Button()
stop_btn = gr.Button()

with gr.Row():
with gr.Column(scale=3):
output_dir = gr.Textbox()
with gr.Row():
output_dir = gr.Textbox()

with gr.Row():
process_bar = gr.Slider(visible=False, interactive=False)

with gr.Box():
output_box = gr.Markdown()
Expand Down Expand Up @@ -93,16 +104,21 @@ def create_sft_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict[
save_steps,
warmup_steps,
compute_type,
padding_side,
lora_rank,
lora_dropout,
lora_target,
resume_lora_training,
output_dir
],
[output_box]
[
output_box,
process_bar
]
)
stop_btn.click(runner.set_abort, queue=False)

output_box.change(
process_bar.change(
gen_plot, [top_elems["model_name"], top_elems["finetuning_type"], output_dir], loss_viewer, queue=False
)

Expand All @@ -128,10 +144,12 @@ def create_sft_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict[
save_steps=save_steps,
warmup_steps=warmup_steps,
compute_type=compute_type,
padding_side=padding_side,
lora_tab=lora_tab,
lora_rank=lora_rank,
lora_dropout=lora_dropout,
lora_target=lora_target,
resume_lora_training=resume_lora_training,
start_btn=start_btn,
stop_btn=stop_btn,
output_dir=output_dir,
Expand Down
2 changes: 1 addition & 1 deletion src/llmtuner/webui/components/top.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def create_top() -> Dict[str, "Component"]:
can_quantize, [finetuning_type], [quantization_bit]
)

refresh_btn.click(list_checkpoint, [model_name, finetuning_type], [checkpoints])
refresh_btn.click(list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False)

return dict(
lang=lang,
Expand Down
2 changes: 1 addition & 1 deletion src/llmtuner/webui/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def create_web_demo() -> gr.Blocks:

demo.load(manager.gen_label, [lang], [lang] + list(chat_elems.values()))

lang.change(manager.gen_label, [lang], [lang] + list(chat_elems.values()))
lang.change(manager.gen_label, [lang], [lang] + list(chat_elems.values()), queue=False)

return demo

Expand Down
20 changes: 20 additions & 0 deletions src/llmtuner/webui/locales.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,16 @@
"info": "是否启用 FP16 或 BF16 混合精度训练。"
}
},
"padding_side": {
"en": {
"label": "Padding side",
"info": "The side on which the model should have padding applied."
},
"zh": {
"label": "填充位置",
"info": "使用左填充或右填充。"
}
},
"lora_tab": {
"en": {
"label": "LoRA configurations"
Expand Down Expand Up @@ -315,6 +325,16 @@
"info": "应用 LoRA 的线性层名称。使用英文逗号分隔多个名称。"
}
},
"resume_lora_training": {
"en": {
"label": "Resume LoRA training",
"info": "Whether to resume training from the last LoRA weights or create new lora weights."
},
"zh": {
"label": "继续上次的训练",
"info": "接着上次的 LoRA 权重训练或创建一个新的 LoRA 权重。"
}
},
"start_btn": {
"en": {
"value": "Start"
Expand Down
Loading

0 comments on commit a48cb0d

Please sign in to comment.