-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
vllm configs and added question to reference in prediction outputs
- Loading branch information
1 parent
4995158
commit 646578e
Showing
18 changed files
with
450 additions
and
47 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,10 +1,15 @@ | ||
from mmengine.config import read_base | ||
with read_base(): | ||
from ..model_template import get_default_model | ||
from ..model_template import get_default_model, get_vllm_model | ||
from ...paths import ROOT_DIR | ||
|
||
|
||
vicuna_7b_v1_5 = get_default_model(abbr="vicuna-7b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-7b-v1.5") | ||
vicuna_13b_v1_5 = get_default_model(abbr="vicuna-13b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-13b-v1.5") | ||
|
||
vicuna_bases = [vicuna_7b_v1_5, vicuna_13b_v1_5] | ||
vicuna_bases = [vicuna_7b_v1_5, vicuna_13b_v1_5] | ||
|
||
vicuna_7b_v1_5_vllm = get_vllm_model(abbr="vicuna-7b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-7b-v1.5") | ||
vicuna_13b_v1_5_vllm = get_vllm_model(abbr="vicuna-13b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-13b-v1.5") | ||
|
||
vicuna_bases_vllm = [vicuna_7b_v1_5_vllm, vicuna_13b_v1_5_vllm] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,8 @@ | ||
from mmengine.config import read_base | ||
with read_base(): | ||
from ..model_template import get_default_model | ||
from ..model_template import get_default_model, get_vllm_model | ||
from ...paths import ROOT_DIR | ||
|
||
mistral_7b = get_default_model(abbr="mistral-7b", path=f"{ROOT_DIR}models/mistralai/Mistral-7B-v0.1") | ||
mistral_7b = get_default_model(abbr="mistral-7b", path=f"{ROOT_DIR}models/mistralai/Mistral-7B-v0.1") | ||
|
||
mistral_7b_vllm = get_vllm_model(abbr="mistral-7b", path=f"{ROOT_DIR}models/mistralai/Mistral-7B-v0.1") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,17 +1,42 @@ | ||
from mmengine.config import read_base | ||
with read_base(): | ||
from ...paths import ROOT_DIR | ||
from ..model_template import get_default_model | ||
from ..model_template import get_default_model, get_vllm_model | ||
|
||
yi_meta_template = dict( | ||
round=[ | ||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>'), | ||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>', generate=True), | ||
], | ||
) | ||
|
||
yi_6b = get_default_model(abbr="yi-6b", path=f"{ROOT_DIR}models/01-ai/Yi-6B") | ||
yi_9b = get_default_model(abbr="yi-9b", path=f"{ROOT_DIR}models/01-ai/Yi-9B") | ||
yi_34b = get_default_model(abbr="yi-34b", path=f"{ROOT_DIR}models/01-ai/Yi-34B") | ||
yi_34b = get_default_model(abbr="yi-34b", path=f"{ROOT_DIR}models/01-ai/Yi-34B", num_gpus=2) | ||
|
||
yi_6b_chat = get_default_model(abbr="yi-6b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-6B-Chat") | ||
yi_9b_chat = get_default_model(abbr="yi-9b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-9B-Chat") | ||
yi_34b_chat = get_default_model(abbr="yi-34b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-34B-Chat") | ||
yi_6b_chat = get_default_model(abbr="yi-6b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-6B-Chat", meta_template=yi_meta_template) | ||
# yi_9b_chat = get_default_model(abbr="yi-9b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-9B-Chat", meta_template=yi_meta_template) | ||
yi_34b_chat = get_default_model(abbr="yi-34b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-34B-Chat", meta_template=yi_meta_template, num_gpus=4) | ||
|
||
yi_bases = [yi_6b, yi_9b, yi_34b] | ||
yi_chats = [yi_6b_chat, yi_9b_chat, yi_34b_chat] | ||
yi_all = yi_bases + yi_chats | ||
yi_chats = [yi_6b_chat, | ||
# yi_9b_chat, | ||
yi_34b_chat] | ||
yi_all = yi_bases + yi_chats | ||
|
||
|
||
|
||
|
||
yi_6b_vllm = get_vllm_model(abbr="yi-6b", path=f"{ROOT_DIR}models/01-ai/Yi-6B") | ||
yi_9b_vllm = get_vllm_model(abbr="yi-9b", path=f"{ROOT_DIR}models/01-ai/Yi-9B") | ||
yi_34b_vllm = get_vllm_model(abbr="yi-34b", path=f"{ROOT_DIR}models/01-ai/Yi-34B", num_gpus=2) | ||
|
||
yi_6b_chat_vllm = get_vllm_model(abbr="yi-6b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-6B-Chat", meta_template=yi_meta_template) | ||
# yi_9b_chat_vllm = get_vllm_model(abbr="yi-9b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-9B-Chat", meta_template=yi_meta_template) | ||
yi_34b_chat_vllm = get_vllm_model(abbr="yi-34b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-34B-Chat", meta_template=yi_meta_template, num_gpus=4) | ||
|
||
yi_bases_vllm = [yi_6b_vllm, yi_9b_vllm, yi_34b_vllm] | ||
yi_chats_vllm = [yi_6b_chat_vllm, | ||
# yi_9b_chat_vllm, | ||
yi_34b_chat_vllm] | ||
yi_all_vllm = yi_bases_vllm + yi_chats_vllm |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
from mmengine.config import read_base | ||
from opencompass.partitioners import SizePartitioner, NaivePartitioner | ||
from opencompass.runners import LocalRunner | ||
from opencompass.tasks import OpenICLInferTask, OpenICLEvalTask | ||
|
||
with read_base(): | ||
# Datasets | ||
from ..datasets.opseval.datasets import all_gen_qa | ||
# Models | ||
from ..local_models.baichuan.baichuan import baichuan2_chats_vllm | ||
from ..local_models.google.gemma import gemma_vllm | ||
from ..local_models.internlm.internlm import internlm2_chats_vllm | ||
from ..local_models.lmsys.vicuna import vicuna_bases_vllm | ||
from ..local_models.mistral.mistral import mistral_7b_vllm | ||
from ..local_models.qwen.qwen import qwen1_5_chat_vllm_models | ||
from ..local_models.yi.yi import yi_all_vllm | ||
from ..models.gpt_3dot5_turbo_peiqi import models as peiqi_models | ||
|
||
from ..paths import ROOT_DIR | ||
|
||
|
||
datasets = [ | ||
*all_gen_qa | ||
] | ||
|
||
# datasets = [datasets[0]] | ||
|
||
models = [ | ||
*peiqi_models, | ||
*baichuan2_chats_vllm, | ||
*gemma_vllm, | ||
*internlm2_chats_vllm, | ||
*vicuna_bases_vllm, | ||
*yi_all_vllm, | ||
mistral_7b_vllm, | ||
*qwen1_5_chat_vllm_models, | ||
] | ||
|
||
for dataset in datasets: | ||
dataset['sample_setting'] = dict() | ||
dataset['infer_cfg']['inferencer']['save_every'] = 8 | ||
dataset['infer_cfg']['inferencer']['sc_size'] = 1 | ||
dataset['infer_cfg']['inferencer']['max_token_len'] = 200 | ||
dataset['eval_cfg']['sc_size'] = 1 | ||
# dataset['sample_setting'] = dict(sample_size=2) # !!!WARNING: Use for testing only!!! | ||
|
||
|
||
infer = dict( | ||
partitioner=dict( | ||
# type=SizePartitioner, | ||
# max_task_size=100, | ||
# gen_task_coef=1, | ||
type=NaivePartitioner | ||
), | ||
runner=dict( | ||
type=LocalRunner, | ||
max_num_workers=16, | ||
max_workers_per_gpu=1, | ||
task=dict(type=OpenICLInferTask), | ||
), | ||
) | ||
|
||
eval = dict( | ||
partitioner=dict(type=NaivePartitioner), | ||
runner=dict( | ||
type=LocalRunner, | ||
max_num_workers=32, | ||
task=dict(type=OpenICLEvalTask)), | ||
) |
Oops, something went wrong.