forked from abacaj/code-eval
-
Notifications
You must be signed in to change notification settings - Fork 0
/
eval_codet5.py
73 lines (62 loc) · 1.81 KB
/
eval_codet5.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
PreTrainedModel,
PreTrainedTokenizer,
)
from core import run_eval, filter_code, fix_indents, standard_prompt
import os
import torch
# TODO: move to python-dotenv
# add hugging face access token here
TOKEN = ""
@torch.inference_mode()
def generate_batch_completion(
model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prompt, batch_size
) -> list[str]:
prompt_input = standard_prompt(prompt)
input_batch = [prompt for _ in range(batch_size)]
inputs = tokenizer(input_batch, return_tensors="pt").to(model.device)
generated_ids = model.generate(
**inputs,
use_cache=True,
max_new_tokens=512,
temperature=0.2,
top_p=0.95,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
)
batch_completions = tokenizer.batch_decode(
generated_ids,
skip_special_tokens=True,
)
return [filter_code(fix_indents(completion)) for completion in batch_completions]
if __name__ == "__main__":
# adjust for n = 10 etc
num_samples_per_task = 10
out_path = "results/codet5p_770/eval.jsonl"
os.makedirs("results/codet5p_770", exist_ok=True)
tokenizer = AutoTokenizer.from_pretrained(
"Salesforce/codet5p-770m",
trust_remote_code=True,
use_auth_token=TOKEN,
)
model = torch.compile(
AutoModelForSeq2SeqLM.from_pretrained(
"Salesforce/codet5p-770m",
torch_dtype=torch.bfloat16,
trust_remote_code=True,
use_auth_token=TOKEN,
)
.eval()
.to("cuda:5")
)
run_eval(
model,
tokenizer,
num_samples_per_task,
out_path,
generate_batch_completion,
True,
)