-
Notifications
You must be signed in to change notification settings - Fork 0
/
sft.py
143 lines (113 loc) · 4.99 KB
/
sft.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import argparse
from datasets import load_dataset, Dataset
# from trl import SFTConfig, SFTTrainer # Had to install trl from source to get this to run
from datasets import load_dataset
# from trl import SFTConfig, SFTTrainer # Had to install trl from source to get this to run
import json
import random
from accelerate import Accelerator
import torch
from lm import LanguageModel
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, TrainingArguments, Trainer
from transformers import DataCollatorForLanguageModeling
from torch.optim.lr_scheduler import LambdaLR
from torch.optim import AdamW
import os
import wandb
os.environ["WANDB_PROJECT"] = "ast_sft"
accelerator = Accelerator(project_dir="sft_out/", log_with="wandb")
R = random.Random(24)
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(DEVICE)
transformers.logging.set_verbosity_info()
# Train on toxic comments
# dataset = load_dataset("csv", data_files={"train":"toxic_comments.csv"})
def formatting_prompts_func(example):
formatted_texts = []
for text in example["text"]:
formatted_texts.append(text)
return formatted_texts
def preprocess_function(example, tokenizer):
x = tokenizer.encode(example["text"], add_special_tokens=True, max_length=1024, truncation=True)
#x = tokenizer(example["text"])
return {"input_ids": x}
def preprocess_rtp(example, tokenizer):
x = tokenizer.encode(example["prompt"], add_special_tokens=True, max_length=1024, truncation=True)
return {"input_ids": x}
# Train on RTP
with open("prompts.jsonl", 'r') as df:
lines = df.readlines()
data = json.loads("["+",".join(lines)+"]")
prompts_rtp = [{"prompt": R.choice([i["prompt"]["text"][0].lower(),
i["prompt"]["text"][0]])+i["prompt"]["text"][1:],
"continuation": R.choice([i["continuation"]["text"][0].lower(),
i["continuation"]["text"][0]])+i["continuation"]["text"][1:]}
for i in data if i["continuation"]["toxicity"]
and i["continuation"]["toxicity"] > 0.5]
dataset = Dataset.from_list(prompts_rtp)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SFT Trainer')
parser.add_argument('--weights', type=str, help='which model shall we evaluate?', default="openai-community/gpt2")
parser.add_argument('--defense', type=str, help='what weights should we use for defense?',
default="openai-community/gpt2")
parser.add_argument('--warmup_steps', type=int, default=150,
help='number of warmup steps')
args = parser.parse_args()
# Set up tracking
accelerator.init_trackers(project_name="ast_sft")
# SFT model will be adversary
adversary = LanguageModel(dont_init=True)
adversary.model = AutoModelForCausalLM.from_pretrained(args.weights)
adversary.tokenizer = AutoTokenizer.from_pretrained(args.weights)
# Init defender
defender = LanguageModel(dont_init=True)
defender.model = AutoModelForCausalLM.from_pretrained(args.defense)
defender.tokenizer = AutoTokenizer.from_pretrained(args.defense)
defender.model.eval()
# GPT 2 doesn't have a padding token, so we add it
adversary.tokenizer.pad_token = adversary.tokenizer.eos_token
defender.tokenizer.pad_token = defender.tokenizer.eos_token
adversary.tokenizer.pad_token_id = adversary.tokenizer.eos_token_id
defender.tokenizer.pad_token_id = defender.tokenizer.eos_token_id
# Optimizer and scheduler
# Default LR from HF SFTConfig
optimizer = AdamW(adversary.model.parameters(), lr=5e-05)
scheduler = LambdaLR(optimizer, lr_lambda=lambda step: min(1.0, (step + 1) / (args.warmup_steps + 1)))
# In case accelerator moves things around
(adversary.model, defender.model, optimizer, scheduler) = accelerator.prepare(adversary.model, defender.model, optimizer, scheduler)
# Register the LR scheduler
accelerator.register_for_checkpointing(scheduler)
# Save the starting state
accelerator.save_state("sft/")
print("cuda?",torch.cuda.is_available)
# Watch model
wandb.watch(adversary.model)
data_collator = DataCollatorForLanguageModeling(tokenizer=adversary.tokenizer, mlm=False)
tokenized_dataset = dataset.map(
lambda x: preprocess_rtp(x, adversary.tokenizer),
)
wandb.log({"test":1})
wandb.watch(adversary.model)
training_args = TrainingArguments(
output_dir="sft_out/",
learning_rate=5e-5,
logging_first_step=True,
report_to="wandb",
save_steps=10000,
num_train_epochs=20,
max_steps=10000,
per_device_train_batch_size=8,
load_best_model_at_end=True
)
trainer = Trainer(
model=adversary.model,
args=training_args,
train_dataset=tokenized_dataset,
data_collator=data_collator,
)
trainer.train()
trainer.save_model(f'best')
accelerator.end_training()
# save best weights
# save every 2000