Skip to content

Commit

Permalink
merge remote
Browse files Browse the repository at this point in the history
  • Loading branch information
NickLennonLiu committed Apr 2, 2024
2 parents ad9cf3e + 646578e commit 8e0760a
Show file tree
Hide file tree
Showing 21 changed files with 886 additions and 32 deletions.
23 changes: 19 additions & 4 deletions configs/datasets/opseval/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,13 @@ def get_selected_datasets(setting_func, name, path, langs, qtypes):
('zjyd', f'{ROOT_DIR}data/opseval/zjyd/splitted_v2', ['zh'], ['single', 'multiple']),
]

company_qa_list = [
('bosc', f'{ROOT_DIR}data/opseval/bosc/splitted_v2', ['zh'], None),
('rzy', f'{ROOT_DIR}data/opseval/rzy/splitted_v2', ['zh'], None),
('zabbix', f'{ROOT_DIR}data/opseval/zabbix/splitted_v2', ['zh'], None),
('zjyd', f'{ROOT_DIR}data/opseval/zjyd/splitted_v2', ['zh'], None),
]

company_mc_ppl = sum([
get_selected_datasets([get_mc_ppl_datasets], name, path, langs, qtypes) for name, path, langs, qtypes in company_mc_list
], [])
Expand All @@ -82,16 +89,24 @@ def get_selected_datasets(setting_func, name, path, langs, qtypes):
], [])
company_mc = company_mc_ppl+company_mc_gen

company_qa_ppl = sum([
get_selected_datasets([get_qa_ppl_datasets], name, path, langs, qtypes) for name, path, langs, qtypes in company_qa_list
], [])
company_qa_gen = sum([
get_selected_datasets([get_qa_gen_datasets], name, path, langs, qtypes) for name, path, langs, qtypes in company_qa_list
], [])
company_qa = company_qa_ppl+company_qa_gen


rzy_qa_ppl = get_selected_datasets([get_qa_ppl_datasets], 'rzy', f'{ROOT_DIR}data/opseval/rzy/splitted', langs=['zh'], qtypes=None)
rzy_qa_gen = get_selected_datasets([get_qa_gen_datasets], 'rzy', f'{ROOT_DIR}data/opseval/rzy/splitted', langs=['zh'], qtypes=None)
rzy_qa_ppl = get_selected_datasets([get_qa_ppl_datasets], 'rzy', f'{ROOT_DIR}data/opseval/rzy/splitted_v2', langs=['zh'], qtypes=None)
rzy_qa_gen = get_selected_datasets([get_qa_gen_datasets], 'rzy', f'{ROOT_DIR}data/opseval/rzy/splitted_v2', langs=['zh'], qtypes=None)
rzy_qa = rzy_qa_ppl + rzy_qa_gen

all_ppl_mc = zte_mc_ppl + oracle_mc_ppl + owl_mc_ppl + network_mc_ppl + company_mc_ppl
all_gen_mc = zte_mc_gen + oracle_mc_gen + owl_mc_gen + network_mc_gen + company_mc_gen

all_ppl_qa = owl_qa_ppl + rzy_qa_ppl
all_gen_qa = owl_qa_gen + rzy_qa_gen
all_ppl_qa = owl_qa_ppl + company_qa_ppl
all_gen_qa = owl_qa_gen + company_qa_gen

ceval_mc_ppl = get_selected_datasets([get_mc_ppl_datasets], 'ceval', f'{ROOT_DIR}data/opseval/ceval/splitted_dev', langs=['zh'], qtypes=['single'])
ceval_mc_gen = get_selected_datasets([get_mc_gen_datasets], 'ceval', f'{ROOT_DIR}data/opseval/ceval/splitted_dev', langs=['zh'], qtypes=['single'])
Expand Down
12 changes: 11 additions & 1 deletion configs/local_models/baichuan/baichuan.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from mmengine import read_base
with read_base():
from ...paths import ROOT_DIR
from ..model_template import get_default_model
from ..model_template import get_default_model, get_vllm_model
from ...api_key import baichuan_key

_meta_template = dict(
Expand All @@ -23,6 +23,16 @@
baichuan2_chats = [baichuan2_7b_chat, baichuan2_13b_chat]


baichuan2_7b_base_vllm = get_vllm_model(abbr="baichuan2-7b", path=f"{ROOT_DIR}models/baichuan-inc/Baichuan2-7B-Base")
baichuan2_7b_chat_vllm = get_vllm_model(abbr="baichuan2-7b-chat", path=f"{ROOT_DIR}models/baichuan-inc/Baichuan2-7B-Chat", meta_template=_meta_template)

baichuan2_13b_base_vllm = get_vllm_model(abbr="baichuan2-13b", path=f"{ROOT_DIR}models/baichuan-inc/Baichuan2-13B-Base")
baichuan2_13b_chat_vllm = get_vllm_model(abbr="baichuan2-13b-chat", path=f"{ROOT_DIR}models/baichuan-inc/Baichuan2-13B-Chat", meta_template=_meta_template, num_gpus=1)

baichuan2_bases_vllm = [baichuan2_7b_base_vllm, baichuan2_13b_base_vllm]
baichuan2_chats_vllm = [baichuan2_7b_chat_vllm, baichuan2_13b_chat_vllm]


baichuan2_turbo = dict(abbr='Baichuan2-Turbo',
type=BaichuanAPI, path='Baichuan2-Turbo', key=baichuan_key,
max_out_len=100, max_seq_len=2048, batch_size=1)
Expand Down
18 changes: 18 additions & 0 deletions configs/local_models/bert/bert.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from opencompass.models import Bert

from mmengine.config import read_base
with read_base():
from ...paths import ROOT_DIR

bert_large_cased = dict(
type=Bert,
abbr='bert_large_cased',
path=ROOT_DIR+"models/google-bert/bert-large-cased",
tokenizer_path=ROOT_DIR+"models/google-bert/bert-large-cased",
tokenizer_kwargs=dict(trust_remote_code=True),
max_out_len=400,
max_seq_len=2048,
batch_size=8,
model_kwargs=dict(trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1),
)
8 changes: 7 additions & 1 deletion configs/local_models/google/gemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from mmengine.config import read_base
with read_base():
from ...paths import ROOT_DIR
from ..model_template import get_default_model, get_vllm_model

gemma_2b = dict(
type=HuggingFaceCausalLM,
Expand All @@ -28,4 +29,9 @@
batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1),
)
)

gemma_2b_vllm = get_vllm_model(abbr="gemma-2b", path=ROOT_DIR+"models/google/gemma-2b")
gemma_7b_vllm = get_vllm_model(abbr="gemma-7b", path=ROOT_DIR+"models/google/gemma-7b")

gemma_vllm = [gemma_2b_vllm, gemma_7b_vllm]
13 changes: 11 additions & 2 deletions configs/local_models/internlm/internlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from mmengine.config import read_base
with read_base():
from ...paths import ROOT_DIR
from ..model_template import get_default_model
from ..model_template import get_default_model, get_vllm_model

internlm2_chat_7b = dict(
type=HuggingFaceCausalLM,
Expand Down Expand Up @@ -35,4 +35,13 @@
internlm2_20b = get_default_model(abbr="internlm2-20b", path=ROOT_DIR+"models/Shanghai_AI_Laboratory/internlm2-20b", num_gpus=2)

internlm2_bases = [internlm2_7b, internlm2_20b]
internlm2_chats = [internlm2_chat_20b, internlm2_chat_7b]
internlm2_chats = [internlm2_chat_20b, internlm2_chat_7b]

internlm2_7b_vllm = get_vllm_model(abbr="internlm2-7b", path=ROOT_DIR+"models/Shanghai_AI_Laboratory/internlm2-7b")
internlm2_20b_vllm = get_vllm_model(abbr="internlm2-20b", path=ROOT_DIR+"models/Shanghai_AI_Laboratory/internlm2-20b", num_gpus=4)

internlm2_chat_7b_vllm = get_vllm_model(abbr="internlm2-chat-7b", path=ROOT_DIR+"models/Shanghai_AI_Laboratory/internlm2-chat-7b")
internlm2_chat_20b_vllm = get_vllm_model(abbr="internlm2-chat-20b", path=ROOT_DIR+"models/Shanghai_AI_Laboratory/internlm2-chat-20b", num_gpus=4)

internlm2_bases_vllm = [internlm2_7b_vllm, internlm2_20b_vllm]
internlm2_chats_vllm = [internlm2_chat_20b_vllm, internlm2_chat_7b_vllm]
9 changes: 7 additions & 2 deletions configs/local_models/lmsys/vicuna.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
from mmengine.config import read_base
with read_base():
from ..model_template import get_default_model
from ..model_template import get_default_model, get_vllm_model
from ...paths import ROOT_DIR


vicuna_7b_v1_5 = get_default_model(abbr="vicuna-7b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-7b-v1.5")
vicuna_13b_v1_5 = get_default_model(abbr="vicuna-13b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-13b-v1.5")

vicuna_bases = [vicuna_7b_v1_5, vicuna_13b_v1_5]
vicuna_bases = [vicuna_7b_v1_5, vicuna_13b_v1_5]

vicuna_7b_v1_5_vllm = get_vllm_model(abbr="vicuna-7b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-7b-v1.5")
vicuna_13b_v1_5_vllm = get_vllm_model(abbr="vicuna-13b-v1.5", path=f"{ROOT_DIR}models/lmsys/vicuna-13b-v1.5")

vicuna_bases_vllm = [vicuna_7b_v1_5_vllm, vicuna_13b_v1_5_vllm]
6 changes: 4 additions & 2 deletions configs/local_models/mistral/mistral.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from mmengine.config import read_base
with read_base():
from ..model_template import get_default_model
from ..model_template import get_default_model, get_vllm_model
from ...paths import ROOT_DIR

mistral_7b = get_default_model(abbr="mistral-7b", path=f"{ROOT_DIR}models/mistralai/Mistral-7B-v0.1")
mistral_7b = get_default_model(abbr="mistral-7b", path=f"{ROOT_DIR}models/mistralai/Mistral-7B-v0.1")

mistral_7b_vllm = get_vllm_model(abbr="mistral-7b", path=f"{ROOT_DIR}models/mistralai/Mistral-7B-v0.1")
18 changes: 18 additions & 0 deletions configs/local_models/model_template.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from opencompass.models.huggingface import HuggingFaceCausalLM
from opencompass.models import VLLM


def get_default_model(abbr, path, num_gpus=1, meta_template=None):
Expand All @@ -17,4 +18,21 @@ def get_default_model(abbr, path, num_gpus=1, meta_template=None):
batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=num_gpus, num_procs=1),
)

def get_vllm_model(abbr, path, num_gpus=1, meta_template=None, generation_kwargs=dict()):
return dict(
type=VLLM,
abbr=abbr,
path=path,
max_seq_len=2048,
model_kwargs=dict(trust_remote_code=True, max_model_len=2000, tensor_parallel_size=num_gpus, gpu_memory_utilization=0.95),
generation_kwargs=generation_kwargs,
meta_template=meta_template,
max_out_len=400,
mode='none',
batch_size=1,
use_fastchat_template=False,
run_cfg=dict(num_gpus=num_gpus, num_procs=1),
end_str=None,
)
21 changes: 19 additions & 2 deletions configs/local_models/qwen/qwen.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from mmengine.config import read_base
with read_base():
from ...paths import ROOT_DIR
from ..model_template import get_default_model
from ..model_template import get_default_model, get_vllm_model

qwen_meta_template = dict(
round=[
Expand Down Expand Up @@ -126,4 +126,21 @@
batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1),
)
)

qwen1_5_base_vllm_models = [
get_vllm_model(
abbr=f"qwen1.5-{quant}b-base",
path=f"{ROOT_DIR}models/Qwen/Qwen1.5-{quant}B",
num_gpus=1 if "72" not in quant else 4)
for quant in ["0.5", "1.8", "4", "7", "14", "72"]
]

qwen1_5_chat_vllm_models = [
get_vllm_model(
abbr=f"qwen1.5-{quant}b-chat",
meta_template=qwen_meta_template,
num_gpus=1 if "72" not in quant else 4,
path=f"{ROOT_DIR}models/Qwen/Qwen1.5-{quant}B-Chat")
for quant in ["0.5", "1.8", "4", "7", "14", "72"]
]
39 changes: 32 additions & 7 deletions configs/local_models/yi/yi.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,42 @@
from mmengine.config import read_base
with read_base():
from ...paths import ROOT_DIR
from ..model_template import get_default_model
from ..model_template import get_default_model, get_vllm_model

yi_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>', generate=True),
],
)

yi_6b = get_default_model(abbr="yi-6b", path=f"{ROOT_DIR}models/01-ai/Yi-6B")
yi_9b = get_default_model(abbr="yi-9b", path=f"{ROOT_DIR}models/01-ai/Yi-9B")
yi_34b = get_default_model(abbr="yi-34b", path=f"{ROOT_DIR}models/01-ai/Yi-34B")
yi_34b = get_default_model(abbr="yi-34b", path=f"{ROOT_DIR}models/01-ai/Yi-34B", num_gpus=2)

yi_6b_chat = get_default_model(abbr="yi-6b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-6B-Chat")
yi_9b_chat = get_default_model(abbr="yi-9b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-9B-Chat")
yi_34b_chat = get_default_model(abbr="yi-34b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-34B-Chat")
yi_6b_chat = get_default_model(abbr="yi-6b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-6B-Chat", meta_template=yi_meta_template)
# yi_9b_chat = get_default_model(abbr="yi-9b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-9B-Chat", meta_template=yi_meta_template)
yi_34b_chat = get_default_model(abbr="yi-34b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-34B-Chat", meta_template=yi_meta_template, num_gpus=4)

yi_bases = [yi_6b, yi_9b, yi_34b]
yi_chats = [yi_6b_chat, yi_9b_chat, yi_34b_chat]
yi_all = yi_bases + yi_chats
yi_chats = [yi_6b_chat,
# yi_9b_chat,
yi_34b_chat]
yi_all = yi_bases + yi_chats




yi_6b_vllm = get_vllm_model(abbr="yi-6b", path=f"{ROOT_DIR}models/01-ai/Yi-6B")
yi_9b_vllm = get_vllm_model(abbr="yi-9b", path=f"{ROOT_DIR}models/01-ai/Yi-9B")
yi_34b_vllm = get_vllm_model(abbr="yi-34b", path=f"{ROOT_DIR}models/01-ai/Yi-34B", num_gpus=2)

yi_6b_chat_vllm = get_vllm_model(abbr="yi-6b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-6B-Chat", meta_template=yi_meta_template)
# yi_9b_chat_vllm = get_vllm_model(abbr="yi-9b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-9B-Chat", meta_template=yi_meta_template)
yi_34b_chat_vllm = get_vllm_model(abbr="yi-34b-chat", path=f"{ROOT_DIR}models/01-ai/Yi-34B-Chat", meta_template=yi_meta_template, num_gpus=4)

yi_bases_vllm = [yi_6b_vllm, yi_9b_vllm, yi_34b_vllm]
yi_chats_vllm = [yi_6b_chat_vllm,
# yi_9b_chat_vllm,
yi_34b_chat_vllm]
yi_all_vllm = yi_bases_vllm + yi_chats_vllm
Empty file added configs/lyh/t5_all.py
Empty file.
69 changes: 69 additions & 0 deletions configs/lyh/vllm_qa.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
from mmengine.config import read_base
from opencompass.partitioners import SizePartitioner, NaivePartitioner
from opencompass.runners import LocalRunner
from opencompass.tasks import OpenICLInferTask, OpenICLEvalTask

with read_base():
# Datasets
from ..datasets.opseval.datasets import all_gen_qa
# Models
from ..local_models.baichuan.baichuan import baichuan2_chats_vllm
from ..local_models.google.gemma import gemma_vllm
from ..local_models.internlm.internlm import internlm2_chats_vllm
from ..local_models.lmsys.vicuna import vicuna_bases_vllm
from ..local_models.mistral.mistral import mistral_7b_vllm
from ..local_models.qwen.qwen import qwen1_5_chat_vllm_models
from ..local_models.yi.yi import yi_all_vllm
from ..models.gpt_3dot5_turbo_peiqi import models as peiqi_models

from ..paths import ROOT_DIR


datasets = [
*all_gen_qa
]

# datasets = [datasets[0]]

models = [
*peiqi_models,
*baichuan2_chats_vllm,
*gemma_vllm,
*internlm2_chats_vllm,
*vicuna_bases_vllm,
*yi_all_vllm,
mistral_7b_vllm,
*qwen1_5_chat_vllm_models,
]

for dataset in datasets:
dataset['sample_setting'] = dict()
dataset['infer_cfg']['inferencer']['save_every'] = 8
dataset['infer_cfg']['inferencer']['sc_size'] = 1
dataset['infer_cfg']['inferencer']['max_token_len'] = 200
dataset['eval_cfg']['sc_size'] = 1
# dataset['sample_setting'] = dict(sample_size=2) # !!!WARNING: Use for testing only!!!


infer = dict(
partitioner=dict(
# type=SizePartitioner,
# max_task_size=100,
# gen_task_coef=1,
type=NaivePartitioner
),
runner=dict(
type=LocalRunner,
max_num_workers=16,
max_workers_per_gpu=1,
task=dict(type=OpenICLInferTask),
),
)

eval = dict(
partitioner=dict(type=NaivePartitioner),
runner=dict(
type=LocalRunner,
max_num_workers=32,
task=dict(type=OpenICLEvalTask)),
)
Loading

0 comments on commit 8e0760a

Please sign in to comment.