diff --git a/README.md b/README.md
index 6d04f0c9e..277f25fb8 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
*Visual instruction tuning towards large language and vision models with GPT-4 level capabilities.*
-[[Project Page](https://llava-vl.github.io/)] [[Demo](https://llava.hliu.cc/)] [[Data](https://github.com/haotian-liu/LLaVA/blob/main/docs/Data.md)] [[Model Zoo](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)]
+[📢 [LLaVA-1.6 Blog](https://llava-vl.github.io/blog/2024-01-30-llava-1-6/)] [[Project Page](https://llava-vl.github.io/)] [[Demo](https://llava.hliu.cc/)] [[Data](https://github.com/haotian-liu/LLaVA/blob/main/docs/Data.md)] [[Model Zoo](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)]
🤝Community Contributions: [[llama.cpp](https://github.com/ggerganov/llama.cpp/pull/3436)] [[Colab](https://github.com/camenduru/LLaVA-colab)] [[🤗Space](https://huggingface.co/spaces/badayvedat/LLaVA)] [[Replicate](https://replicate.com/yorickvp/llava-13b)] [[AutoGen](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_lmm_llava.ipynb)] [[BakLLaVA](https://github.com/SkunkworksAI/BakLLaVA)]
@@ -19,6 +19,7 @@
## Release
+- [1/30] 🔥 LLaVA-1.6 is out! With additional scaling to LLaVA-1.5, LLaVA-1.6-34B outperforms Gemini Pro. It can now process 4x more pixels and perform more tasks/applications than before. Check out the [blog post](https://llava-vl.github.io/blog/2024-01-30-llava-1-6/), and explore the [demo](https://llava.hliu.cc/)! Models are available in [Model Zoo](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md). Training/eval data and scripts coming soon.
- [11/10] [LLaVA-Plus](https://llava-vl.github.io/llava-plus/) is released: Learning to Use Tools for Creating Multimodal Agents, with LLaVA-Plus (LLaVA that Plug and Learn to Use Skills). [[Project Page](https://llava-vl.github.io/llava-plus/)] [[Demo](https://llavaplus.ngrok.io/)] [[Code](https://github.com/LLaVA-VL/LLaVA-Plus-Codebase)] [[Paper](https://arxiv.org/abs/2311.05437)]
- [11/2] [LLaVA-Interactive](https://llava-vl.github.io/llava-interactive/) is released: Experience the future of human-AI multimodal interaction with an all-in-one demo for Image Chat, Segmentation, Generation and Editing. [[Project Page](https://llava-vl.github.io/llava-interactive/)] [[Demo](https://llavainteractive.ngrok.io/)] [[Code](https://github.com/LLaVA-VL/LLaVA-Interactive-Demo)] [[Paper](https://arxiv.org/abs/2311.00571)]
- [10/26] 🔥 LLaVA-1.5 with LoRA achieves comparable performance as full-model finetuning, with a reduced GPU RAM requirement ([ckpts](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md#llava-v15), [script](https://github.com/haotian-liu/LLaVA#train)). We also provide a [doc](https://github.com/haotian-liu/LLaVA/blob/main/docs/Finetune_Custom_Data.md) on how to finetune LLaVA-1.5 on your own dataset with LoRA.
diff --git a/docs/MODEL_ZOO.md b/docs/MODEL_ZOO.md
index 2ab48d58a..577218226 100644
--- a/docs/MODEL_ZOO.md
+++ b/docs/MODEL_ZOO.md
@@ -4,7 +4,19 @@
If you are interested in including any other details in Model Zoo, please open an issue :)
-The model weights below are *merged* weights. You do not need to apply delta. The usage of LLaVA checkpoints should comply with the base LLM's model license: [Llama 2](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md).
+The model weights below are *merged* weights. You do not need to apply delta. The usage of LLaVA checkpoints should comply with the base LLM's model license.
+
+## LLaVA-v1.6
+
+| Version | LLM | Schedule | Checkpoint | MMMU | MathVista | VQAv2 | GQA | VizWiz | SQA | TextVQA | POPE | MME | MM-Bench | MM-Bench-CN | SEED-IMG | LLaVA-Bench-Wild | MM-Vet |
+|----------|----------|-----------|-----------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
+| LLaVA-1.6 | Vicuna-7B | full_ft-1e | [liuhaotian/llava-v1.6-vicuna-7b](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b) | 35.8 | 34.6 | 81.8 | 64.2 | 57.6 | 70.1 | 64.9 | 86.5 | 1519/332 | 67.4 | 60.6 | 70.2 | 81.6 | 43.9 |
+| LLaVA-1.6 | Vicuna-13B | full_ft-1e | [liuhaotian/llava-v1.6-vicuna-13b](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-13b) | 36.2 | 35.3 | 82.8 | 65.4 | 60.5 | 73.6 | 67.1 | 86.2 | 1575/326 | 70 | 64.4 | 71.9 | 87.3 | 48.4 |
+| LLaVA-1.6 | Mistral-7B | full_ft-1e | [liuhaotian/llava-v1.6-mistral-7b](https://huggingface.co/liuhaotian/llava-v1.6-mistral-7b) | 35.3 | 37.7 | 82.2 | 64.8 | 60.0 | 72.8 | 65.7 | 86.7 | 1498/321 | 68.7 | 61.2 | 72.2 | 83.2 | 47.3 |
+| LLaVA-1.6 | Hermes-Yi-34B | full_ft-1e | [liuhaotian/llava-v1.6-34b](https://huggingface.co/liuhaotian/llava-v1.6-34b) | 51.1 | 46.5 | 83.7 | 67.1 | 63.8 | 81.8 | 69.5 | 87.7 | 1631/397 | 79.3 | 79 | 75.9 | 89.6 | 57.4 |
+
+*LLaVA-1.6-34B outperforms Gemini Pro on benchmarks like MMMU and MathVista.*
+
## LLaVA-v1.5
diff --git a/llava/conversation.py b/llava/conversation.py
index 0025f5b11..65ee05d5e 100644
--- a/llava/conversation.py
+++ b/llava/conversation.py
@@ -68,7 +68,7 @@ def get_prompt(self):
else:
ret += role
elif self.sep_style == SeparatorStyle.LLAMA_2:
- wrap_sys = lambda msg: f"<>\n{msg}\n<>\n\n"
+ wrap_sys = lambda msg: f"<>\n{msg}\n<>\n\n" if len(msg) > 0 else msg
wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
ret = ""
@@ -357,6 +357,28 @@ def dict(self):
version="v1_mmtag",
)
+conv_mistral_instruct = Conversation(
+ system="",
+ roles=("USER", "ASSISTANT"),
+ version="llama_v2",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.LLAMA_2,
+ sep="",
+ sep2="",
+)
+
+conv_chatml_direct = Conversation(
+ system="""<|im_start|>system
+Answer the questions.""",
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
+ version="mpt",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.MPT,
+ sep="<|im_end|>",
+)
+
default_conversation = conv_vicuna_v1
conv_templates = {
"default": conv_vicuna_v0,
@@ -364,6 +386,9 @@ def dict(self):
"v1": conv_vicuna_v1,
"vicuna_v1": conv_vicuna_v1,
"llama_2": conv_llama_2,
+ "mistral_instruct": conv_mistral_instruct,
+ "chatml_direct": conv_chatml_direct,
+ "mistral_direct": conv_chatml_direct,
"plain": conv_llava_plain,
"v0_plain": conv_llava_plain,
diff --git a/llava/eval/model_vqa.py b/llava/eval/model_vqa.py
index 59dca734c..938706438 100644
--- a/llava/eval/model_vqa.py
+++ b/llava/eval/model_vqa.py
@@ -9,7 +9,7 @@
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
-from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
+from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
from PIL import Image
import math
@@ -55,17 +55,14 @@ def eval_model(args):
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
- image = Image.open(os.path.join(args.image_folder, image_file))
- image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
-
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
- keywords = [stop_str]
- stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
+ image = Image.open(os.path.join(args.image_folder, image_file)).convert('RGB')
+ image_tensor = process_images([image], image_processor, model.config)[0]
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor.unsqueeze(0).half().cuda(),
+ image_sizes=[image.size],
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
top_p=args.top_p,
@@ -74,15 +71,7 @@ def eval_model(args):
max_new_tokens=1024,
use_cache=True)
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[:-len(stop_str)]
- outputs = outputs.strip()
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
ans_id = shortuuid.uuid()
ans_file.write(json.dumps({"question_id": idx,
diff --git a/llava/eval/model_vqa_loader.py b/llava/eval/model_vqa_loader.py
index a50944db6..d435b7d83 100644
--- a/llava/eval/model_vqa_loader.py
+++ b/llava/eval/model_vqa_loader.py
@@ -55,17 +55,24 @@ def __getitem__(self, index):
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
- return input_ids, image_tensor
+ return input_ids, image_tensor, image.size
def __len__(self):
return len(self.questions)
+def collate_fn(batch):
+ input_ids, image_tensors, image_sizes = zip(*batch)
+ input_ids = torch.stack(input_ids, dim=0)
+ image_tensors = torch.stack(image_tensors, dim=0)
+ return input_ids, image_tensors, image_sizes
+
+
# DataLoader
def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4):
assert batch_size == 1, "batch_size must be 1"
dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config)
- data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
+ data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False, collate_fn=collate_fn)
return data_loader
@@ -88,7 +95,7 @@ def eval_model(args):
data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config)
- for (input_ids, image_tensor), line in tqdm(zip(data_loader, questions), total=len(questions)):
+ for (input_ids, image_tensor, image_sizes), line in tqdm(zip(data_loader, questions), total=len(questions)):
idx = line["question_id"]
cur_prompt = line["text"]
@@ -98,6 +105,7 @@ def eval_model(args):
output_ids = model.generate(
input_ids,
images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True),
+ image_sizes=image_sizes,
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
top_p=args.top_p,
@@ -105,12 +113,7 @@ def eval_model(args):
max_new_tokens=args.max_new_tokens,
use_cache=True)
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
ans_id = shortuuid.uuid()
ans_file.write(json.dumps({"question_id": idx,
diff --git a/llava/eval/model_vqa_mmbench.py b/llava/eval/model_vqa_mmbench.py
index 2ffec1b59..bd7a4c808 100644
--- a/llava/eval/model_vqa_mmbench.py
+++ b/llava/eval/model_vqa_mmbench.py
@@ -106,14 +106,12 @@ def eval_model(args):
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
image_tensor = process_images([image], image_processor, model.config)[0]
- # image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
-
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor.unsqueeze(0).half().cuda(),
+ image_sizes=[image.size],
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
top_p=args.top_p,
@@ -122,15 +120,7 @@ def eval_model(args):
max_new_tokens=1024,
use_cache=True)
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[:-len(stop_str)]
- outputs = outputs.strip()
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
ans_id = shortuuid.uuid()
ans_file.write(json.dumps({"question_id": idx,
diff --git a/llava/eval/model_vqa_qbench.py b/llava/eval/model_vqa_qbench.py
deleted file mode 100644
index f3ca8177c..000000000
--- a/llava/eval/model_vqa_qbench.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import argparse
-import torch
-from tqdm import tqdm
-import json
-
-from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
-from llava.conversation import conv_templates, SeparatorStyle
-from llava.model.builder import load_pretrained_model
-from llava.utils import disable_torch_init
-from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
-
-from PIL import Image
-
-import requests
-from PIL import Image
-from io import BytesIO
-
-
-def load_image(image_file):
- if image_file.startswith('http') or image_file.startswith('https'):
- response = requests.get(image_file)
- image = Image.open(BytesIO(response.content)).convert('RGB')
- else:
- image = Image.open(image_file).convert('RGB')
- return image
-
-
-def eval_model(args):
- # Model
- disable_torch_init()
-
- model_name = get_model_name_from_path(args.model_path)
- tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, True)
-
-
-
-
- with open(args.questions_file) as f:
- llvqa_data = json.load(f)
-
- for i, llddata in enumerate(tqdm(llvqa_data)):
- filename = llddata["img_path"]
- if args.lang == "en":
- message = llddata["question"] + "\nChoose between one of the options as follows:\n"
- elif args.lang == "zh":
- message = llddata["question"] + "\在下列选项中选择一个:\n"
- else:
- raise NotImplementedError("Q-Bench does not support languages other than English (en) and Chinese (zh) yet. Contact us (https://github.com/VQAssessment/Q-Bench/) to convert Q-Bench into more languages.")
- for choice, ans in zip(["A.", "B.", "C.", "D."], llddata["candidates"]):
- message += f"{choice} {ans}\n"
- qs = message
-
- if model.config.mm_use_im_start_end:
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
- else:
- qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
-
- if 'llama-2' in model_name.lower():
- conv_mode = "llava_llama_2"
- elif "v1" in model_name.lower():
- conv_mode = "llava_v1"
- elif "mpt" in model_name.lower():
- conv_mode = "mpt"
- else:
- conv_mode = "llava_v0"
-
- if args.conv_mode is not None and conv_mode != args.conv_mode:
- print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
- else:
- args.conv_mode = conv_mode
-
- conv = conv_templates[args.conv_mode].copy()
- conv.append_message(conv.roles[0], qs)
- conv.append_message(conv.roles[1], None)
- prompt = conv.get_prompt()
-
- image = load_image(args.image_folder + filename)
- image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda()
-
- input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
-
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
- keywords = [stop_str]
- stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
-
-
- with torch.inference_mode():
- output_ids = model.generate(
- input_ids,
- images=image_tensor,
- num_beams=1,
- do_sample=False,
- temperature=0,
- max_new_tokens=1024,
- use_cache=True,
- stopping_criteria=[stopping_criteria])
-
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[:-len(stop_str)]
- outputs = outputs.strip()
- llddata["response"] = outputs
- with open(args.answers_file, "a") as wf:
- json.dump(llddata, wf)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--model-path", type=str, default="llava-v1.5")
- parser.add_argument("--model-base", type=str, default=None)
- parser.add_argument("--image-folder", type=str, default="./playground/data/qbench/images_llvisionqa")
- parser.add_argument("--questions-file", type=str, default="./playground/data/qbench/llvisionqa_dev.json")
- parser.add_argument("--answers-file", type=str, default="answer.jsonl")
- parser.add_argument("--conv-mode", type=str, default="llava_v1")
- parser.add_argument("--lang", type=str, default="en")
- args = parser.parse_args()
-
- eval_model(args)
diff --git a/llava/eval/model_vqa_science.py b/llava/eval/model_vqa_science.py
index e99501f2b..90fc681a2 100644
--- a/llava/eval/model_vqa_science.py
+++ b/llava/eval/model_vqa_science.py
@@ -9,7 +9,7 @@
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
-from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
+from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
from PIL import Image
import math
@@ -47,8 +47,9 @@ def eval_model(args):
if 'image' in line:
image_file = line["image"]
image = Image.open(os.path.join(args.image_folder, image_file))
- image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
+ image_tensor = process_images([image], image_processor, model.config)[0]
images = image_tensor.unsqueeze(0).half().cuda()
+ image_sizes = [image.size]
if getattr(model.config, 'mm_use_im_start_end', False):
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
else:
@@ -56,6 +57,7 @@ def eval_model(args):
cur_prompt = '' + '\n' + cur_prompt
else:
images = None
+ image_sizes = None
if args.single_pred_prompt:
qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
@@ -68,56 +70,18 @@ def eval_model(args):
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
- keywords = [stop_str]
- stopping_criteria = [KeywordsStoppingCriteria(keywords, tokenizer, input_ids)] if conv.version == "v0" else None
-
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=images,
+ image_sizes=image_sizes,
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
max_new_tokens=1024,
use_cache=True,
- stopping_criteria=stopping_criteria,
)
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[:-len(stop_str)]
- outputs = outputs.strip()
-
- # prompt for answer
- if args.answer_prompter:
- outputs_reasoning = outputs
- input_ids = tokenizer_image_token(prompt + outputs_reasoning + ' ###\nANSWER:', tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
-
- with torch.inference_mode():
- output_ids = model.generate(
- input_ids,
- images=images,
- do_sample=True if args.temperature > 0 else False,
- temperature=args.temperature,
- max_new_tokens=64,
- use_cache=True,
- stopping_criteria=[stopping_criteria])
-
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[:-len(stop_str)]
- outputs = outputs.strip()
- outputs = outputs_reasoning + '\n The answer is ' + outputs
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
ans_id = shortuuid.uuid()
ans_file.write(json.dumps({"question_id": idx,
diff --git a/llava/mm_utils.py b/llava/mm_utils.py
index c4b8862ff..71092ff1d 100644
--- a/llava/mm_utils.py
+++ b/llava/mm_utils.py
@@ -1,12 +1,150 @@
from PIL import Image
from io import BytesIO
import base64
-
import torch
+import math
+import ast
+
from transformers import StoppingCriteria
from llava.constants import IMAGE_TOKEN_INDEX
+def select_best_resolution(original_size, possible_resolutions):
+ """
+ Selects the best resolution from a list of possible resolutions based on the original size.
+
+ Args:
+ original_size (tuple): The original size of the image in the format (width, height).
+ possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
+
+ Returns:
+ tuple: The best fit resolution in the format (width, height).
+ """
+ original_width, original_height = original_size
+ best_fit = None
+ max_effective_resolution = 0
+ min_wasted_resolution = float('inf')
+
+ for width, height in possible_resolutions:
+ scale = min(width / original_width, height / original_height)
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
+ wasted_resolution = (width * height) - effective_resolution
+
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
+ max_effective_resolution = effective_resolution
+ min_wasted_resolution = wasted_resolution
+ best_fit = (width, height)
+
+ return best_fit
+
+
+def resize_and_pad_image(image, target_resolution):
+ """
+ Resize and pad an image to a target resolution while maintaining aspect ratio.
+
+ Args:
+ image (PIL.Image.Image): The input image.
+ target_resolution (tuple): The target resolution (width, height) of the image.
+
+ Returns:
+ PIL.Image.Image: The resized and padded image.
+ """
+ original_width, original_height = image.size
+ target_width, target_height = target_resolution
+
+ scale_w = target_width / original_width
+ scale_h = target_height / original_height
+
+ if scale_w < scale_h:
+ new_width = target_width
+ new_height = min(math.ceil(original_height * scale_w), target_height)
+ else:
+ new_height = target_height
+ new_width = min(math.ceil(original_width * scale_h), target_width)
+
+ # Resize the image
+ resized_image = image.resize((new_width, new_height))
+
+ new_image = Image.new('RGB', (target_width, target_height), (0, 0, 0))
+ paste_x = (target_width - new_width) // 2
+ paste_y = (target_height - new_height) // 2
+ new_image.paste(resized_image, (paste_x, paste_y))
+
+ return new_image
+
+
+def divide_to_patches(image, patch_size):
+ """
+ Divides an image into patches of a specified size.
+
+ Args:
+ image (PIL.Image.Image): The input image.
+ patch_size (int): The size of each patch.
+
+ Returns:
+ list: A list of PIL.Image.Image objects representing the patches.
+ """
+ patches = []
+ width, height = image.size
+ for i in range(0, height, patch_size):
+ for j in range(0, width, patch_size):
+ box = (j, i, j + patch_size, i + patch_size)
+ patch = image.crop(box)
+ patches.append(patch)
+
+ return patches
+
+
+def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
+ """
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
+
+ Args:
+ image_size (tuple): The size of the input image in the format (width, height).
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
+ patch_size (int): The size of each image patch.
+
+ Returns:
+ tuple: The shape of the image patch grid in the format (width, height).
+ """
+ if type(grid_pinpoints) is list:
+ possible_resolutions = grid_pinpoints
+ else:
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
+ width, height = select_best_resolution(image_size, possible_resolutions)
+ return width // patch_size, height // patch_size
+
+
+def process_anyres_image(image, processor, grid_pinpoints):
+ """
+ Process an image with variable resolutions.
+
+ Args:
+ image (PIL.Image.Image): The input image to be processed.
+ processor: The image processor object.
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
+
+ Returns:
+ torch.Tensor: A tensor containing the processed image patches.
+ """
+ if type(grid_pinpoints) is list:
+ possible_resolutions = grid_pinpoints
+ else:
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
+ best_resolution = select_best_resolution(image.size, possible_resolutions)
+ image_padded = resize_and_pad_image(image, best_resolution)
+
+ patches = divide_to_patches(image_padded, processor.crop_size['height'])
+
+ image_original_resize = image.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
+
+ image_patches = [image_original_resize] + patches
+ image_patches = [processor.preprocess(image_patch, return_tensors='pt')['pixel_values'][0]
+ for image_patch in image_patches]
+ return torch.stack(image_patches, dim=0)
+
+
def load_image_from_base64(image):
return Image.open(BytesIO(base64.b64decode(image)))
@@ -33,6 +171,10 @@ def process_images(images, image_processor, model_cfg):
image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
new_images.append(image)
+ elif image_aspect_ratio == "anyres":
+ for image in images:
+ image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints)
+ new_images.append(image)
else:
return image_processor(images, return_tensors='pt')['pixel_values']
if all(x.shape == new_images[0].shape for x in new_images):
diff --git a/llava/model/__init__.py b/llava/model/__init__.py
index fa7996054..dbd91789f 100644
--- a/llava/model/__init__.py
+++ b/llava/model/__init__.py
@@ -1,2 +1,6 @@
-from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig
-from .language_model.llava_mpt import LlavaMPTForCausalLM, LlavaMPTConfig
+try:
+ from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig
+ from .language_model.llava_mpt import LlavaMptForCausalLM, LlavaMptConfig
+ from .language_model.llava_mistral import LlavaMistralForCausalLM, LlavaMistralConfig
+except:
+ pass
diff --git a/llava/model/builder.py b/llava/model/builder.py
index ca9c81a7a..33edbd100 100644
--- a/llava/model/builder.py
+++ b/llava/model/builder.py
@@ -101,6 +101,14 @@ def load_from_hf(repo_id, filename, subfolder=None):
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
+ elif 'mistral' in model_name.lower():
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
+ model = LlavaMistralForCausalLM.from_pretrained(
+ model_path,
+ low_cpu_mem_usage=True,
+ use_flash_attention_2=False,
+ **kwargs
+ )
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
diff --git a/llava/model/language_model/llava_llama.py b/llava/model/language_model/llava_llama.py
index 58ccb3057..069d0d1c1 100644
--- a/llava/model/language_model/llava_llama.py
+++ b/llava/model/language_model/llava_llama.py
@@ -22,12 +22,13 @@
LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
+from transformers.generation.utils import GenerateOutput
from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
class LlavaConfig(LlamaConfig):
- model_type = "llava"
+ model_type = "llava_llama"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
@@ -65,6 +66,7 @@ def forward(
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
+ image_sizes: Optional[List[List[int]]] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
@@ -82,7 +84,8 @@ def forward(
attention_mask,
past_key_values,
labels,
- images
+ images,
+ image_sizes
)
return super().forward(
@@ -98,14 +101,58 @@ def forward(
return_dict=return_dict
)
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ images: Optional[torch.Tensor] = None,
+ image_sizes: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[GenerateOutput, torch.LongTensor]:
+ position_ids = kwargs.pop("position_ids", None)
+ attention_mask = kwargs.pop("attention_mask", None)
+ if "inputs_embeds" in kwargs:
+ raise NotImplementedError("`inputs_embeds` is not supported")
+
+ if images is not None:
+ (
+ inputs,
+ position_ids,
+ attention_mask,
+ _,
+ inputs_embeds,
+ _
+ ) = self.prepare_inputs_labels_for_multimodal(
+ inputs,
+ position_ids,
+ attention_mask,
+ None,
+ None,
+ images,
+ image_sizes=image_sizes
+ )
+ else:
+ inputs_embeds = self.get_model().embed_tokens(inputs)
+
+ return super().generate(
+ position_ids=position_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ **kwargs
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None,
+ inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
- _inputs = super().prepare_inputs_for_generation(
+ image_sizes = kwargs.pop("image_sizes", None)
+ inputs = super().prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
)
if images is not None:
- _inputs['images'] = images
- return _inputs
+ inputs['images'] = images
+ if image_sizes is not None:
+ inputs['image_sizes'] = image_sizes
+ return inputs
-AutoConfig.register("llava", LlavaConfig)
+AutoConfig.register("llava_llama", LlavaConfig)
AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM)
diff --git a/llava/model/language_model/llava_mistral.py b/llava/model/language_model/llava_mistral.py
new file mode 100644
index 000000000..0def682ea
--- /dev/null
+++ b/llava/model/language_model/llava_mistral.py
@@ -0,0 +1,158 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+from torch.nn import CrossEntropyLoss
+
+from transformers import AutoConfig, AutoModelForCausalLM, \
+ MistralConfig, MistralModel, MistralForCausalLM
+
+from transformers.modeling_outputs import CausalLMOutputWithPast
+from transformers.generation.utils import GenerateOutput
+
+from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
+
+
+class LlavaMistralConfig(MistralConfig):
+ model_type = "llava_mistral"
+
+
+class LlavaMistralModel(LlavaMetaModel, MistralModel):
+ config_class = LlavaMistralConfig
+
+ def __init__(self, config: MistralConfig):
+ super(LlavaMistralModel, self).__init__(config)
+
+
+class LlavaMistralForCausalLM(MistralForCausalLM, LlavaMetaForCausalLM):
+ config_class = LlavaMistralConfig
+
+ def __init__(self, config):
+ super(MistralForCausalLM, self).__init__(config)
+ self.model = LlavaMistralModel(config)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_model(self):
+ return self.model
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ images: Optional[torch.FloatTensor] = None,
+ image_sizes: Optional[List[List[int]]] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+
+ if inputs_embeds is None:
+ (
+ input_ids,
+ position_ids,
+ attention_mask,
+ past_key_values,
+ inputs_embeds,
+ labels
+ ) = self.prepare_inputs_labels_for_multimodal(
+ input_ids,
+ position_ids,
+ attention_mask,
+ past_key_values,
+ labels,
+ images,
+ image_sizes
+ )
+
+ return super().forward(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ images: Optional[torch.Tensor] = None,
+ image_sizes: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[GenerateOutput, torch.LongTensor]:
+ position_ids = kwargs.pop("position_ids", None)
+ attention_mask = kwargs.pop("attention_mask", None)
+ if "inputs_embeds" in kwargs:
+ raise NotImplementedError("`inputs_embeds` is not supported")
+
+ if images is not None:
+ (
+ inputs,
+ position_ids,
+ attention_mask,
+ _,
+ inputs_embeds,
+ _
+ ) = self.prepare_inputs_labels_for_multimodal(
+ inputs,
+ position_ids,
+ attention_mask,
+ None,
+ None,
+ images,
+ image_sizes=image_sizes
+ )
+ else:
+ inputs_embeds = self.get_model().embed_tokens(inputs)
+
+ return super().generate(
+ position_ids=position_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ **kwargs
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None,
+ inputs_embeds=None, **kwargs):
+ images = kwargs.pop("images", None)
+ image_sizes = kwargs.pop("image_sizes", None)
+ inputs = super().prepare_inputs_for_generation(
+ input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
+ )
+ if images is not None:
+ inputs['images'] = images
+ if image_sizes is not None:
+ inputs['image_sizes'] = image_sizes
+ return inputs
+
+AutoConfig.register("llava_mistral", LlavaMistralConfig)
+AutoModelForCausalLM.register(LlavaMistralConfig, LlavaMistralForCausalLM)
diff --git a/llava/model/language_model/llava_mpt.py b/llava/model/language_model/llava_mpt.py
index 0200d1f8c..02e5237ec 100644
--- a/llava/model/language_model/llava_mpt.py
+++ b/llava/model/language_model/llava_mpt.py
@@ -13,101 +13,85 @@
# limitations under the License.
-from typing import List, Optional, Tuple
-import warnings
+from typing import Optional, Tuple
import torch
-import torch.nn.functional as F
-import math
-from transformers import AutoConfig, AutoModelForCausalLM
-from transformers.modeling_outputs import CausalLMOutputWithPast
-
-from .mpt.modeling_mpt import MPTConfig, MPTForCausalLM, MPTModel
+from transformers import AutoConfig, AutoModelForCausalLM, \
+ MptConfig, MptForCausalLM, MptModel
from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
-class LlavaMPTConfig(MPTConfig):
+class LlavaMptConfig(MptConfig):
model_type = "llava_mpt"
-class LlavaMPTModel(LlavaMetaModel, MPTModel):
- config_class = LlavaMPTConfig
+class LlavaMptModel(LlavaMetaModel, MptModel):
+ config_class = LlavaMptConfig
- def __init__(self, config: MPTConfig):
+ def __init__(self, config: MptConfig):
config.hidden_size = config.d_model
- super(LlavaMPTModel, self).__init__(config)
+ super(LlavaMptModel, self).__init__(config)
def embed_tokens(self, x):
return self.wte(x)
-class LlavaMPTForCausalLM(MPTForCausalLM, LlavaMetaForCausalLM):
- config_class = LlavaMPTConfig
+class LlavaMptForCausalLM(MptForCausalLM, LlavaMetaForCausalLM):
+ config_class = LlavaMptConfig
supports_gradient_checkpointing = True
def __init__(self, config):
- super(MPTForCausalLM, self).__init__(config)
-
- if not config.tie_word_embeddings:
- raise ValueError('MPTForCausalLM only supports tied word embeddings')
- self.transformer = LlavaMPTModel(config)
- self.logit_scale = None
- if config.logit_scale is not None:
- logit_scale = config.logit_scale
- if isinstance(logit_scale, str):
- if logit_scale == 'inv_sqrt_d_model':
- logit_scale = 1 / math.sqrt(config.d_model)
- else:
- raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
- self.logit_scale = logit_scale
+ super(MptForCausalLM, self).__init__(config)
+
+ self.transformer = LlavaMptModel(config)
+ self.lm_head = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
def get_model(self):
return self.transformer
def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, LlavaMPTModel):
+ if isinstance(module, LlavaMptModel):
module.gradient_checkpointing = value
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, images=None):
- return_dict = return_dict if return_dict is not None else self.config.return_dict
- use_cache = use_cache if use_cache is not None else self.config.use_cache
-
- input_ids, _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, None, attention_mask, past_key_values, labels, images)
- outputs = self.transformer(input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)
- # FIXME: this is a hack to fix the multiple gpu inference issue in https://github.com/haotian-liu/LLaVA/issues/338
- logits = F.linear(outputs.last_hidden_state.to(self.transformer.wte.weight.device), self.transformer.wte.weight)
- if self.logit_scale is not None:
- if self.logit_scale == 0:
- warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
- logits *= self.logit_scale
- loss = None
- if labels is not None:
- labels = torch.roll(labels, shifts=-1)
- labels[:, -1] = -100
- loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1))
- return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ images=None):
+
+ input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
+
+ return super().forward(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
- if inputs_embeds is not None:
- raise NotImplementedError('inputs_embeds is not implemented for MPT yet')
- attention_mask = kwargs['attention_mask'].bool()
- if attention_mask[:, -1].sum() != attention_mask.shape[0]:
- raise NotImplementedError('MPT does not support generation with right padding.')
- if self.transformer.attn_uses_sequence_id and self.training:
- sequence_id = torch.zeros_like(input_ids[:1])
- else:
- sequence_id = None
- if past_key_values is not None:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- if self.transformer.prefix_lm:
- prefix_mask = torch.ones_like(attention_mask)
- if kwargs.get('use_cache') == False:
- raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
- else:
- prefix_mask = None
- return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True), "images": kwargs.get("images", None)}
-
-
-AutoConfig.register("llava_mpt", LlavaMPTConfig)
-AutoModelForCausalLM.register(LlavaMPTConfig, LlavaMPTForCausalLM)
+ images = kwargs.pop("images", None)
+ _inputs = super().prepare_inputs_for_generation(
+ input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
+ )
+ _inputs['images'] = images
+ return _inputs
+
+
+AutoConfig.register("llava_mpt", LlavaMptConfig)
+AutoModelForCausalLM.register(LlavaMptConfig, LlavaMptForCausalLM)
diff --git a/llava/model/language_model/mpt/adapt_tokenizer.py b/llava/model/language_model/mpt/adapt_tokenizer.py
deleted file mode 100644
index e640c157e..000000000
--- a/llava/model/language_model/mpt/adapt_tokenizer.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from typing import Union
-from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
-Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
-NUM_SENTINEL_TOKENS: int = 100
-
-def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
- """Adds sentinel tokens and padding token (if missing).
-
- Expands the tokenizer vocabulary to include sentinel tokens
- used in mixture-of-denoiser tasks as well as a padding token.
-
- All added tokens are added as special tokens. No tokens are
- added if sentinel tokens and padding token already exist.
- """
- sentinels_to_add = [f'' for i in range(NUM_SENTINEL_TOKENS)]
- tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
- if tokenizer.pad_token is None:
- tokenizer.add_tokens('', special_tokens=True)
- tokenizer.pad_token = ''
- assert tokenizer.pad_token_id is not None
- sentinels = ''.join([f'' for i in range(NUM_SENTINEL_TOKENS)])
- _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
- tokenizer.sentinel_token_ids = _sentinel_token_ids
-
-class AutoTokenizerForMOD(AutoTokenizer):
- """AutoTokenizer + Adaptation for MOD.
-
- A simple wrapper around AutoTokenizer to make instantiating
- an MOD-adapted tokenizer a bit easier.
-
- MOD-adapted tokenizers have sentinel tokens (e.g., ),
- a padding token, and a property to get the token ids of the
- sentinel tokens.
- """
-
- @classmethod
- def from_pretrained(cls, *args, **kwargs):
- """See `AutoTokenizer.from_pretrained` docstring."""
- tokenizer = super().from_pretrained(*args, **kwargs)
- adapt_tokenizer_for_denoising(tokenizer)
- return tokenizer
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/attention.py b/llava/model/language_model/mpt/attention.py
deleted file mode 100644
index b5543ef21..000000000
--- a/llava/model/language_model/mpt/attention.py
+++ /dev/null
@@ -1,300 +0,0 @@
-"""Attention layers."""
-import math
-import warnings
-from typing import Optional
-import torch
-import torch.nn as nn
-from einops import rearrange
-from packaging import version
-from torch import nn
-from .norm import LPLayerNorm
-
-def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
- if original_is_causal and num_query_tokens != num_key_tokens:
- if num_query_tokens != 1:
- raise NotImplementedError('MPT does not support query and key with different number of tokens, unless number of query tokens is 1.')
- else:
- return False
- return original_is_causal
-
-def scaled_multihead_dot_product_attention(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
- q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
- kv_n_heads = 1 if multiquery else n_heads
- k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
- v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
- if past_key_value is not None:
- if len(past_key_value) != 0:
- k = torch.cat([past_key_value[0], k], dim=3)
- v = torch.cat([past_key_value[1], v], dim=2)
- past_key_value = (k, v)
- (b, _, s_q, d) = q.shape
- s_k = k.size(-1)
- if softmax_scale is None:
- softmax_scale = 1 / math.sqrt(d)
- attn_weight = q.matmul(k) * softmax_scale
- if attn_bias is not None:
- _s_q = max(0, attn_bias.size(2) - s_q)
- _s_k = max(0, attn_bias.size(3) - s_k)
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
- if attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q):
- raise RuntimeError(f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.')
- attn_weight = attn_weight + attn_bias
- min_val = torch.finfo(q.dtype).min
- if key_padding_mask is not None:
- if attn_bias is not None:
- warnings.warn('Propogating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unneccessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
- attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val)
- if is_causal and (not q.size(2) == 1):
- s = max(s_q, s_k)
- causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
- causal_mask = causal_mask.tril()
- causal_mask = causal_mask.to(torch.bool)
- causal_mask = ~causal_mask
- causal_mask = causal_mask[-s_q:, -s_k:]
- attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
- attn_weight = torch.softmax(attn_weight, dim=-1)
- if dropout_p:
- attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True)
- out = attn_weight.to(v.dtype).matmul(v)
- out = rearrange(out, 'b h s d -> b s (h d)')
- if needs_weights:
- return (out, attn_weight, past_key_value)
- return (out, None, past_key_value)
-
-def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
- for tensor in tensors:
- if tensor.dtype not in valid_dtypes:
- raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.')
- if not tensor.is_cuda:
- raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
-
-def flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
- try:
- from flash_attn import bert_padding, flash_attn_interface
- except:
- raise RuntimeError('Please install flash-attn==1.0.3.post0')
- check_valid_inputs(query, key, value)
- if past_key_value is not None:
- if len(past_key_value) != 0:
- key = torch.cat([past_key_value[0], key], dim=1)
- value = torch.cat([past_key_value[1], value], dim=1)
- past_key_value = (key, value)
- if attn_bias is not None:
- _s_q = max(0, attn_bias.size(2) - query.size(1))
- _s_k = max(0, attn_bias.size(3) - key.size(1))
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
- if attn_bias is not None:
- raise NotImplementedError(f'attn_bias not implemented for flash attn.')
- (batch_size, seqlen) = query.shape[:2]
- if key_padding_mask is None:
- key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
- query_padding_mask = key_padding_mask[:, -query.size(1):]
- (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask)
- query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
- (key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask)
- key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
- (value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
- value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
- if multiquery:
- key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
- value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
- dropout_p = dropout_p if training else 0.0
- reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
- output_unpad = flash_attn_interface.flash_attn_unpadded_func(query_unpad, key_unpad, value_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
- output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
- return (output, None, past_key_value)
-
-def triton_flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
- try:
- from .flash_attn_triton import flash_attn_func
- except:
- _installed = False
- if version.parse(torch.__version__) < version.parse('2.0.0'):
- _installed = True
- try:
- from flash_attn.flash_attn_triton import flash_attn_func
- except:
- _installed = False
- if not _installed:
- raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed.')
- check_valid_inputs(query, key, value)
- if past_key_value is not None:
- if len(past_key_value) != 0:
- key = torch.cat([past_key_value[0], key], dim=1)
- value = torch.cat([past_key_value[1], value], dim=1)
- past_key_value = (key, value)
- if attn_bias is not None:
- _s_q = max(0, attn_bias.size(2) - query.size(1))
- _s_k = max(0, attn_bias.size(3) - key.size(1))
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
- if dropout_p:
- raise NotImplementedError(f'Dropout not implemented for attn_impl: triton.')
- if needs_weights:
- raise NotImplementedError(f'attn_impl: triton cannot return attn weights.')
- if key_padding_mask is not None:
- warnings.warn('Propagating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unnecessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
- (b_size, s_k) = key_padding_mask.shape[:2]
- if attn_bias is None:
- attn_bias = query.new_zeros(b_size, 1, 1, s_k)
- attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min)
- query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
- key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
- value = rearrange(value, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
- if multiquery:
- key = key.expand(*key.shape[:2], n_heads, key.size(-1))
- value = value.expand(*value.shape[:2], n_heads, value.size(-1))
- reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
- attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
- output = attn_output.view(*attn_output.shape[:2], -1)
- return (output, None, past_key_value)
-
-class MultiheadAttention(nn.Module):
- """Multi-head self attention.
-
- Using torch or triton attention implementation enables user to also use
- additive bias.
- """
-
- def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
- super().__init__()
- self.attn_impl = attn_impl
- self.clip_qkv = clip_qkv
- self.qk_ln = qk_ln
- self.d_model = d_model
- self.n_heads = n_heads
- self.softmax_scale = softmax_scale
- if self.softmax_scale is None:
- self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
- self.attn_dropout_p = attn_pdrop
- self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device)
- fuse_splits = (d_model, 2 * d_model)
- self.Wqkv._fused = (0, fuse_splits)
- if self.qk_ln:
- layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
- self.q_ln = layernorm_class(self.d_model, device=device)
- self.k_ln = layernorm_class(self.d_model, device=device)
- if self.attn_impl == 'flash':
- self.attn_fn = flash_attn_fn
- elif self.attn_impl == 'triton':
- self.attn_fn = triton_flash_attn_fn
- if verbose:
- warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
- elif self.attn_impl == 'torch':
- self.attn_fn = scaled_multihead_dot_product_attention
- if torch.cuda.is_available() and verbose:
- warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
- self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
- self.out_proj._is_residual = True
-
- def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
- qkv = self.Wqkv(x)
- if self.clip_qkv:
- qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
- (query, key, value) = qkv.chunk(3, dim=2)
- key_padding_mask = attention_mask
- if self.qk_ln:
- dtype = query.dtype
- query = self.q_ln(query).to(dtype)
- key = self.k_ln(key).to(dtype)
- (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights)
- return (self.out_proj(context), attn_weights, past_key_value)
-
-class MultiQueryAttention(nn.Module):
- """Multi-Query self attention.
-
- Using torch or triton attention implementation enables user to also use
- additive bias.
- """
-
- def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
- super().__init__()
- self.attn_impl = attn_impl
- self.clip_qkv = clip_qkv
- self.qk_ln = qk_ln
- self.d_model = d_model
- self.n_heads = n_heads
- self.head_dim = d_model // n_heads
- self.softmax_scale = softmax_scale
- if self.softmax_scale is None:
- self.softmax_scale = 1 / math.sqrt(self.head_dim)
- self.attn_dropout_p = attn_pdrop
- self.Wqkv = nn.Linear(d_model, d_model + 2 * self.head_dim, device=device)
- fuse_splits = (d_model, d_model + self.head_dim)
- self.Wqkv._fused = (0, fuse_splits)
- if self.qk_ln:
- layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
- self.q_ln = layernorm_class(d_model, device=device)
- self.k_ln = layernorm_class(self.head_dim, device=device)
- if self.attn_impl == 'flash':
- self.attn_fn = flash_attn_fn
- elif self.attn_impl == 'triton':
- self.attn_fn = triton_flash_attn_fn
- if verbose:
- warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
- elif self.attn_impl == 'torch':
- self.attn_fn = scaled_multihead_dot_product_attention
- if torch.cuda.is_available() and verbose:
- warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
- self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
- self.out_proj._is_residual = True
-
- def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
- qkv = self.Wqkv(x)
- if self.clip_qkv:
- qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
- (query, key, value) = qkv.split([self.d_model, self.head_dim, self.head_dim], dim=2)
- key_padding_mask = attention_mask
- if self.qk_ln:
- dtype = query.dtype
- query = self.q_ln(query).to(dtype)
- key = self.k_ln(key).to(dtype)
- (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, multiquery=True)
- return (self.out_proj(context), attn_weights, past_key_value)
-
-def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):
- if attn_impl == 'flash':
- return None
- elif attn_impl in ['torch', 'triton']:
- if alibi:
- if (prefix_lm or not causal) or use_sequence_id:
- return (1, n_heads, seq_len, seq_len)
- return (1, n_heads, 1, seq_len)
- elif prefix_lm or use_sequence_id:
- return (1, 1, seq_len, seq_len)
- return None
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
-
-def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):
- if attn_impl == 'flash':
- return None
- elif attn_impl in ['torch', 'triton']:
- if alibi:
- (device, dtype) = (attn_bias.device, attn_bias.dtype)
- attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))
- return attn_bias
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
-
-def gen_slopes(n_heads, alibi_bias_max=8, device=None):
- _n_heads = 2 ** math.ceil(math.log2(n_heads))
- m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
- m = m.mul(alibi_bias_max / _n_heads)
- slopes = 1.0 / torch.pow(2, m)
- if _n_heads != n_heads:
- slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
- return slopes.view(1, n_heads, 1, 1)
-
-def build_alibi_bias(n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None):
- alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, 1, seq_len)
- if full:
- alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, seq_len, 1)
- alibi_bias = alibi_bias.abs().mul(-1)
- slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
- alibi_bias = alibi_bias * slopes
- return alibi_bias.to(dtype=dtype)
-ATTN_CLASS_REGISTRY = {'multihead_attention': MultiheadAttention, 'multiquery_attention': MultiQueryAttention}
diff --git a/llava/model/language_model/mpt/blocks.py b/llava/model/language_model/mpt/blocks.py
deleted file mode 100644
index 537e7f919..000000000
--- a/llava/model/language_model/mpt/blocks.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""GPT Blocks used for the GPT Model."""
-from typing import Dict, Optional, Tuple
-import torch
-import torch.nn as nn
-from .attention import ATTN_CLASS_REGISTRY
-from .norm import NORM_CLASS_REGISTRY
-
-class MPTMLP(nn.Module):
-
- def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None):
- super().__init__()
- self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device)
- self.act = nn.GELU(approximate='none')
- self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device)
- self.down_proj._is_residual = True
-
- def forward(self, x):
- return self.down_proj(self.act(self.up_proj(x)))
-
-class MPTBlock(nn.Module):
-
- def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):
- del kwargs
- super().__init__()
- norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
- attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
- self.norm_1 = norm_class(d_model, device=device)
- self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)
- self.norm_2 = norm_class(d_model, device=device)
- self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)
- self.resid_attn_dropout = nn.Dropout(resid_pdrop)
- self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
-
- def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
- a = self.norm_1(x)
- (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)
- x = x + self.resid_attn_dropout(b)
- m = self.norm_2(x)
- n = self.ffn(m)
- x = x + self.resid_ffn_dropout(n)
- return (x, attn_weights, past_key_value)
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/configuration_mpt.py b/llava/model/language_model/mpt/configuration_mpt.py
deleted file mode 100644
index e9eb6fc59..000000000
--- a/llava/model/language_model/mpt/configuration_mpt.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""A HuggingFace-style model configuration."""
-from typing import Dict, Optional, Union
-from transformers import PretrainedConfig
-attn_config_defaults: Dict = {'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}
-init_config_defaults: Dict = {'name': 'kaiming_normal_', 'fan_mode': 'fan_in', 'init_nonlinearity': 'relu', 'init_div_is_residual': True, 'emb_init_std': None, 'emb_init_uniform_lim': None, 'init_std': None, 'init_gain': 0.0}
-
-class MPTConfig(PretrainedConfig):
- model_type = 'mpt'
-
- def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):
- """The MPT configuration class.
-
- Args:
- d_model (int): The size of the embedding dimension of the model.
- n_heads (int): The number of attention heads.
- n_layers (int): The number of layers in the model.
- expansion_ratio (int): The ratio of the up/down scale in the MLP.
- max_seq_len (int): The maximum sequence length of the model.
- vocab_size (int): The size of the vocabulary.
- resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
- emb_pdrop (float): The dropout probability for the embedding layer.
- learned_pos_emb (bool): Whether to use learned positional embeddings
- attn_config (Dict): A dictionary used to configure the model's attention module:
- attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention
- attn_pdrop (float): The dropout probability for the attention layers.
- attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
- qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
- clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
- this value.
- softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
- use the default scale of ``1/sqrt(d_keys)``.
- prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
- extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
- can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
- attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
- When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
- which sub-sequence each token belongs to.
- Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
- alibi (bool): Whether to use the alibi bias instead of position embeddings.
- alibi_bias_max (int): The maximum value of the alibi bias.
- init_device (str): The device to use for parameter initialization.
- logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
- no_bias (bool): Whether to use bias in all layers.
- verbose (int): The verbosity level. 0 is silent.
- embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
- norm_type (str): choose type of norm to use
- multiquery_attention (bool): Whether to use multiquery attention implementation.
- use_cache (bool): Whether or not the model should return the last key/values attentions
- init_config (Dict): A dictionary used to configure the model initialization:
- init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',
- 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or
- 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.
- init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
- emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
- emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
- used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
- init_std (float): The standard deviation of the normal distribution used to initialize the model,
- if using the baseline_ parameter initialization scheme.
- init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
- fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
- init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
- ---
- See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
- """
- self.d_model = d_model
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.expansion_ratio = expansion_ratio
- self.max_seq_len = max_seq_len
- self.vocab_size = vocab_size
- self.resid_pdrop = resid_pdrop
- self.emb_pdrop = emb_pdrop
- self.learned_pos_emb = learned_pos_emb
- self.attn_config = attn_config
- self.init_device = init_device
- self.logit_scale = logit_scale
- self.no_bias = no_bias
- self.verbose = verbose
- self.embedding_fraction = embedding_fraction
- self.norm_type = norm_type
- self.use_cache = use_cache
- self.init_config = init_config
- if 'name' in kwargs:
- del kwargs['name']
- if 'loss_fn' in kwargs:
- del kwargs['loss_fn']
- super().__init__(**kwargs)
- self._validate_config()
-
- def _set_config_defaults(self, config, config_defaults):
- for (k, v) in config_defaults.items():
- if k not in config:
- config[k] = v
- return config
-
- def _validate_config(self):
- self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)
- self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)
- if self.d_model % self.n_heads != 0:
- raise ValueError('d_model must be divisible by n_heads')
- if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):
- raise ValueError("self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1")
- if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:
- raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
- if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
- raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')
- if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
- raise NotImplementedError('alibi only implemented with torch and triton attention.')
- if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
- raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')
- if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
- raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')
- if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':
- raise ValueError(f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
- if self.init_config.get('name', None) is None:
- raise ValueError(f"self.init_config={self.init_config!r} 'name' needs to be set.")
- if not self.learned_pos_emb and (not self.attn_config['alibi']):
- raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/custom_embedding.py b/llava/model/language_model/mpt/custom_embedding.py
deleted file mode 100644
index ab357952c..000000000
--- a/llava/model/language_model/mpt/custom_embedding.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch import Tensor
-
-class SharedEmbedding(nn.Embedding):
-
- def forward(self, input: Tensor, unembed: bool=False) -> Tensor:
- if unembed:
- return F.linear(input, self.weight)
- return super().forward(input)
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/flash_attn_triton.py b/llava/model/language_model/mpt/flash_attn_triton.py
deleted file mode 100644
index c0a42186d..000000000
--- a/llava/model/language_model/mpt/flash_attn_triton.py
+++ /dev/null
@@ -1,484 +0,0 @@
-"""
-Copied from https://github.com/HazyResearch/flash-attention/blob/eff9fe6b8076df59d64d7a3f464696738a3c7c24/flash_attn/flash_attn_triton.py
-update imports to use 'triton_pre_mlir'
-
-*Experimental* implementation of FlashAttention in Triton.
-Tested with triton==2.0.0.dev20221202.
-Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions
-other than 64:
-https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention.py#L207
-We'll update this implementation with the new Triton backend once this is fixed.
-
-We use the FlashAttention implementation from Phil Tillet a starting point.
-https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
-
-Changes:
-- Implement both causal and non-causal attention.
-- Implement both self-attention and cross-attention.
-- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
-- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
-- Support attention bias.
-- Speed up the forward pass a bit, and only store the LSE instead of m and l.
-- Make the backward for d=128 much faster by reducing register spilling.
-- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
-small batch size * nheads.
-
-Caution:
-- This is an *experimental* implementation. The forward pass should be quite robust but
-I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler).
-- This implementation has only been tested on A100.
-- If you plan to use headdim other than 64 and 128, you should test for race conditions
-(due to the Triton compiler), as done in tests/test_flash_attn.py
-"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
-for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
-that there are none left for other head dimensions.
-
-Differences between this Triton version and the CUDA version:
-- Triton version doesn't support dropout.
-- Triton forward is generally faster than CUDA forward, while Triton backward is
-generally slower than CUDA backward. Overall Triton forward + backward is slightly slower
-than CUDA forward + backward.
-- Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
-- Triton version supports attention bias, while CUDA version doesn't.
-"""
-import math
-import torch
-import triton_pre_mlir as triton
-import triton_pre_mlir.language as tl
-
-@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM']})
-@triton.jit
-def _fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
- start_m = tl.program_id(0)
- off_hb = tl.program_id(1)
- off_b = off_hb // nheads
- off_h = off_hb % nheads
- offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
- offs_n = tl.arange(0, BLOCK_N)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :])
- k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :])
- v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :])
- if BIAS_TYPE == 'vector':
- b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
- elif BIAS_TYPE == 'matrix':
- b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :])
- t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
- lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
- m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
- acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
- if EVEN_M & EVEN_N:
- if EVEN_HEADDIM:
- q = tl.load(q_ptrs)
- else:
- q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
- else:
- q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
- end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
- for start_n in range(0, end_n, BLOCK_N):
- start_n = tl.multiple_of(start_n, BLOCK_N)
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- k = tl.load(k_ptrs + start_n * stride_kn)
- else:
- k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0)
- else:
- k = tl.load(k_ptrs + start_n * stride_kn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
- qk += tl.dot(q, k, trans_b=True)
- if not EVEN_N:
- qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float('-inf'))
- if IS_CAUSAL:
- qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float('-inf'))
- if BIAS_TYPE != 'none':
- if BIAS_TYPE == 'vector':
- if EVEN_N:
- bias = tl.load(b_ptrs + start_n).to(tl.float32)
- else:
- bias = tl.load(b_ptrs + start_n, mask=start_n + offs_n < seqlen_k, other=0.0).to(tl.float32)
- bias = bias[None, :]
- elif BIAS_TYPE == 'matrix':
- if EVEN_M & EVEN_N:
- bias = tl.load(b_ptrs + start_n).to(tl.float32)
- else:
- bias = tl.load(b_ptrs + start_n, mask=(offs_m[:, None] < seqlen_q) & ((start_n + offs_n)[None, :] < seqlen_k), other=0.0).to(tl.float32)
- qk = qk * softmax_scale + bias
- m_ij = tl.maximum(tl.max(qk, 1), lse_i)
- p = tl.exp(qk - m_ij[:, None])
- else:
- m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
- p = tl.exp(qk * softmax_scale - m_ij[:, None])
- l_ij = tl.sum(p, 1)
- acc_o_scale = tl.exp(m_i - m_ij)
- tl.store(t_ptrs, acc_o_scale)
- acc_o_scale = tl.load(t_ptrs)
- acc_o = acc_o * acc_o_scale[:, None]
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- v = tl.load(v_ptrs + start_n * stride_vn)
- else:
- v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0)
- else:
- v = tl.load(v_ptrs + start_n * stride_vn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- p = p.to(v.dtype)
- acc_o += tl.dot(p, v)
- m_i = m_ij
- l_i_new = tl.exp(lse_i - m_ij) + l_ij
- lse_i = m_ij + tl.log(l_i_new)
- o_scale = tl.exp(m_i - lse_i)
- tl.store(t_ptrs, o_scale)
- o_scale = tl.load(t_ptrs)
- acc_o = acc_o * o_scale[:, None]
- start_m = tl.program_id(0)
- offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
- lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
- tl.store(lse_ptrs, lse_i)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :])
- if EVEN_M:
- if EVEN_HEADDIM:
- tl.store(out_ptrs, acc_o)
- else:
- tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
- elif EVEN_HEADDIM:
- tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
- else:
- tl.store(out_ptrs, acc_o, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
-
-@triton.jit
-def _bwd_preprocess_do_o_dot(Out, DO, Delta, stride_ob, stride_oh, stride_om, stride_dob, stride_doh, stride_dom, nheads, seqlen_q, seqlen_q_rounded, headdim, BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl.constexpr):
- start_m = tl.program_id(0)
- off_hb = tl.program_id(1)
- off_b = off_hb // nheads
- off_h = off_hb % nheads
- offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- o = tl.load(Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
- do = tl.load(DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
- delta = tl.sum(o * do, axis=1)
- tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
-
-@triton.jit
-def _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr):
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- tl.store(dv_ptrs, dv)
- tl.store(dk_ptrs, dk)
- else:
- tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
- tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
- elif EVEN_HEADDIM:
- tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
- tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
- else:
- tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
- tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
-
-@triton.jit
-def _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD: tl.constexpr, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
- begin_m = 0 if not IS_CAUSAL else start_n * BLOCK_N // BLOCK_M * BLOCK_M
- offs_qm = begin_m + tl.arange(0, BLOCK_M)
- offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
- offs_m = tl.arange(0, BLOCK_M)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
- k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
- v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
- do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
- dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
- if BIAS_TYPE == 'vector':
- b_ptrs = Bias + offs_n
- elif BIAS_TYPE == 'matrix':
- b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
- dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
- dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
- if begin_m >= seqlen_q:
- dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
- dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
- _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
- return
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- k = tl.load(k_ptrs)
- v = tl.load(v_ptrs)
- else:
- k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
- v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
- v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
- else:
- k = tl.load(k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- v = tl.load(v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
- for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
- start_m = tl.multiple_of(start_m, BLOCK_M)
- offs_m_curr = start_m + offs_m
- if EVEN_M & EVEN_HEADDIM:
- q = tl.load(q_ptrs)
- elif EVEN_HEADDIM:
- q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
- else:
- q = tl.load(q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
- qk = tl.dot(q, k, trans_b=True)
- if not EVEN_N:
- qk = tl.where(offs_n[None, :] < seqlen_k, qk, float('-inf'))
- if IS_CAUSAL:
- qk = tl.where(offs_m_curr[:, None] >= offs_n[None, :], qk, float('-inf'))
- if BIAS_TYPE != 'none':
- tl.debug_barrier()
- if BIAS_TYPE == 'vector':
- if EVEN_N:
- bias = tl.load(b_ptrs).to(tl.float32)
- else:
- bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32)
- bias = bias[None, :]
- elif BIAS_TYPE == 'matrix':
- if EVEN_M & EVEN_N:
- bias = tl.load(b_ptrs).to(tl.float32)
- else:
- bias = tl.load(b_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_n[None, :] < seqlen_k), other=0.0).to(tl.float32)
- qk = qk * softmax_scale + bias
- if not EVEN_M & EVEN_HEADDIM:
- tl.debug_barrier()
- lse_i = tl.load(LSE + offs_m_curr)
- if BIAS_TYPE == 'none':
- p = tl.exp(qk * softmax_scale - lse_i[:, None])
- else:
- p = tl.exp(qk - lse_i[:, None])
- if EVEN_M & EVEN_HEADDIM:
- do = tl.load(do_ptrs)
- else:
- do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
- dv += tl.dot(p.to(do.dtype), do, trans_a=True)
- if not EVEN_M & EVEN_HEADDIM:
- tl.debug_barrier()
- dp = tl.dot(do, v, trans_b=True)
- if not EVEN_HEADDIM:
- tl.debug_barrier()
- Di = tl.load(D + offs_m_curr)
- ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
- dk += tl.dot(ds, q, trans_a=True)
- if not EVEN_M & EVEN_HEADDIM:
- tl.debug_barrier()
- if not ATOMIC_ADD:
- if EVEN_M & EVEN_HEADDIM:
- dq = tl.load(dq_ptrs, eviction_policy='evict_last')
- dq += tl.dot(ds, k)
- tl.store(dq_ptrs, dq, eviction_policy='evict_last')
- elif EVEN_HEADDIM:
- dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0, eviction_policy='evict_last')
- dq += tl.dot(ds, k)
- tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q, eviction_policy='evict_last')
- else:
- dq = tl.load(dq_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, eviction_policy='evict_last')
- dq += tl.dot(ds, k)
- tl.store(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), eviction_policy='evict_last')
- else:
- dq = tl.dot(ds, k)
- if EVEN_M & EVEN_HEADDIM:
- tl.atomic_add(dq_ptrs, dq)
- elif EVEN_HEADDIM:
- tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q)
- else:
- tl.atomic_add(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
- dq_ptrs += BLOCK_M * stride_dqm
- q_ptrs += BLOCK_M * stride_qm
- do_ptrs += BLOCK_M * stride_dom
- if BIAS_TYPE == 'matrix':
- b_ptrs += BLOCK_M * stride_bm
- dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
- dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
- _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
-
-def init_to_zero(name):
- return lambda nargs: nargs[name].zero_()
-
-@triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ'))], key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM'])
-@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM']})
-@triton.jit
-def _bwd_kernel(Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_dob, stride_doh, stride_dom, stride_dqb, stride_dqh, stride_dqm, stride_dkb, stride_dkh, stride_dkn, stride_dvb, stride_dvh, stride_dvn, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
- off_hb = tl.program_id(1)
- off_b = off_hb // nheads
- off_h = off_hb % nheads
- Q += off_b * stride_qb + off_h * stride_qh
- K += off_b * stride_kb + off_h * stride_kh
- V += off_b * stride_vb + off_h * stride_vh
- DO += off_b * stride_dob + off_h * stride_doh
- DQ += off_b * stride_dqb + off_h * stride_dqh
- DK += off_b * stride_dkb + off_h * stride_dkh
- DV += off_b * stride_dvb + off_h * stride_dvh
- if BIAS_TYPE != 'none':
- Bias += off_b * stride_bb + off_h * stride_bh
- D += off_hb * seqlen_q_rounded
- LSE += off_hb * seqlen_q_rounded
- if not SEQUENCE_PARALLEL:
- num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
- for start_n in range(0, num_block_n):
- _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=False, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N)
- else:
- start_n = tl.program_id(0)
- _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=True, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N)
-
-def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
- (batch, seqlen_q, nheads, d) = q.shape
- (_, seqlen_k, _, _) = k.shape
- assert k.shape == (batch, seqlen_k, nheads, d)
- assert v.shape == (batch, seqlen_k, nheads, d)
- assert d <= 128, 'FlashAttention only support head dimensions up to 128'
- assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type'
- assert q.dtype in [torch.float16, torch.bfloat16], 'Only support fp16 and bf16'
- assert q.is_cuda and k.is_cuda and v.is_cuda
- softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
- has_bias = bias is not None
- bias_type = 'none'
- if has_bias:
- assert bias.dtype in [q.dtype, torch.float]
- assert bias.is_cuda
- assert bias.dim() == 4
- if bias.stride(-1) != 1:
- bias = bias.contiguous()
- if bias.shape[2:] == (1, seqlen_k):
- bias_type = 'vector'
- elif bias.shape[2:] == (seqlen_q, seqlen_k):
- bias_type = 'matrix'
- else:
- raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)')
- bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
- bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
- seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
- lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
- tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
- o = torch.empty_like(q)
- BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
- BLOCK = 128
- num_warps = 4 if d <= 64 else 8
- grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
- _fwd_kernel[grid](q, k, v, bias, o, lse, tmp, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, o.stride(0), o.stride(2), o.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, bias_type, causal, BLOCK_HEADDIM, BLOCK_M=BLOCK, BLOCK_N=BLOCK, num_warps=num_warps, num_stages=1)
- return (o, lse, softmax_scale)
-
-def _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None):
- if do.stride(-1) != 1:
- do = do.contiguous()
- (batch, seqlen_q, nheads, d) = q.shape
- (_, seqlen_k, _, _) = k.shape
- assert d <= 128
- seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
- assert lse.shape == (batch, nheads, seqlen_q_rounded)
- assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
- assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
- softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
- dq_accum = torch.empty_like(q, dtype=torch.float32)
- delta = torch.empty_like(lse)
- BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
- grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
- _bwd_preprocess_do_o_dot[grid](o, do, delta, o.stride(0), o.stride(2), o.stride(1), do.stride(0), do.stride(2), do.stride(1), nheads, seqlen_q, seqlen_q_rounded, d, BLOCK_M=128, BLOCK_HEADDIM=BLOCK_HEADDIM)
- has_bias = bias is not None
- bias_type = 'none'
- if has_bias:
- assert bias.dtype in [q.dtype, torch.float]
- assert bias.is_cuda
- assert bias.dim() == 4
- assert bias.stride(-1) == 1
- if bias.shape[2:] == (1, seqlen_k):
- bias_type = 'vector'
- elif bias.shape[2:] == (seqlen_q, seqlen_k):
- bias_type = 'matrix'
- else:
- raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)')
- bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
- bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
- grid = lambda META: (triton.cdiv(seqlen_k, META['BLOCK_N']) if META['SEQUENCE_PARALLEL'] else 1, batch * nheads)
- _bwd_kernel[grid](q, k, v, bias, do, dq_accum, dk, dv, lse, delta, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, do.stride(0), do.stride(2), do.stride(1), dq_accum.stride(0), dq_accum.stride(2), dq_accum.stride(1), dk.stride(0), dk.stride(2), dk.stride(1), dv.stride(0), dv.stride(2), dv.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, bias_type, causal, BLOCK_HEADDIM)
- dq.copy_(dq_accum)
-
-class FlashAttnQKVPackedFunc(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
- """
- qkv: (batch, seqlen, 3, nheads, headdim)
- bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
- For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
- ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
- """
- if qkv.stride(-1) != 1:
- qkv = qkv.contiguous()
- (o, lse, ctx.softmax_scale) = _flash_attn_forward(qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal, softmax_scale=softmax_scale)
- ctx.save_for_backward(qkv, o, lse, bias)
- ctx.causal = causal
- return o
-
- @staticmethod
- def backward(ctx, do):
- (qkv, o, lse, bias) = ctx.saved_tensors
- assert not ctx.needs_input_grad[1], 'FlashAttention does not support bias gradient yet'
- with torch.inference_mode():
- dqkv = torch.empty_like(qkv)
- _flash_attn_backward(do, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], o, lse, dqkv[:, :, 0], dqkv[:, :, 1], dqkv[:, :, 2], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
- return (dqkv, None, None, None)
-flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
-
-class FlashAttnKVPackedFunc(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
- """
- q: (batch, seqlen_q, nheads, headdim)
- kv: (batch, seqlen_k, 2, nheads, headdim)
- bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
- For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
- ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
- """
- (q, kv) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
- (o, lse, ctx.softmax_scale) = _flash_attn_forward(q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale)
- ctx.save_for_backward(q, kv, o, lse, bias)
- ctx.causal = causal
- return o
-
- @staticmethod
- def backward(ctx, do):
- (q, kv, o, lse, bias) = ctx.saved_tensors
- if len(ctx.needs_input_grad) >= 3:
- assert not ctx.needs_input_grad[2], 'FlashAttention does not support bias gradient yet'
- with torch.inference_mode():
- dq = torch.empty_like(q)
- dkv = torch.empty_like(kv)
- _flash_attn_backward(do, q, kv[:, :, 0], kv[:, :, 1], o, lse, dq, dkv[:, :, 0], dkv[:, :, 1], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
- return (dq, dkv, None, None, None)
-flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
-
-class FlashAttnFunc(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
- """
- q: (batch_size, seqlen_q, nheads, headdim)
- k, v: (batch_size, seqlen_k, nheads, headdim)
- bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
- For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
- ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
- """
- (q, k, v) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
- (o, lse, ctx.softmax_scale) = _flash_attn_forward(q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale)
- ctx.save_for_backward(q, k, v, o, lse, bias)
- ctx.causal = causal
- return o
-
- @staticmethod
- def backward(ctx, do):
- (q, k, v, o, lse, bias) = ctx.saved_tensors
- assert not ctx.needs_input_grad[3], 'FlashAttention does not support bias gradient yet'
- with torch.inference_mode():
- dq = torch.empty_like(q)
- dk = torch.empty_like(k)
- dv = torch.empty_like(v)
- _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
- return (dq, dk, dv, None, None, None)
-flash_attn_func = FlashAttnFunc.apply
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/hf_prefixlm_converter.py b/llava/model/language_model/mpt/hf_prefixlm_converter.py
deleted file mode 100644
index 8c1a64872..000000000
--- a/llava/model/language_model/mpt/hf_prefixlm_converter.py
+++ /dev/null
@@ -1,415 +0,0 @@
-"""Converts Huggingface Causal LM to Prefix LM.
-
-Conversion does lightweight surgery on a HuggingFace
-Causal LM to convert it to a Prefix LM.
-
-Prefix LMs accepts a `bidirectional_mask` input in `forward`
-and treat the input prompt as the prefix in `generate`.
-"""
-import math
-import warnings
-from types import MethodType
-from typing import Any, Dict, List, Optional, Tuple, Union
-import torch
-from transformers.models.bloom.modeling_bloom import BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel, CausalLMOutputWithCrossAttentions, CrossEntropyLoss
-from transformers.models.bloom.modeling_bloom import _expand_mask as _expand_mask_bloom
-from transformers.models.bloom.modeling_bloom import _make_causal_mask as _make_causal_mask_bloom
-from transformers.models.bloom.modeling_bloom import logging
-from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
-from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
-from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
-from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
-from transformers.models.opt.modeling_opt import OPTForCausalLM
-from transformers.models.opt.modeling_opt import _expand_mask as _expand_mask_opt
-from transformers.models.opt.modeling_opt import _make_causal_mask as _make_causal_mask_opt
-logger = logging.get_logger(__name__)
-_SUPPORTED_GPT_MODELS = (GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM)
-CAUSAL_GPT_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM]
-
-def _convert_gpt_causal_lm_to_prefix_lm(model: CAUSAL_GPT_TYPES) -> CAUSAL_GPT_TYPES:
- """Converts a GPT-style Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `GPT2LMHeadModel`
- - `GPTNeoForCausalLM`
- - `GPTNeoXForCausalLM`
- - `GPTJForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
- assert isinstance(model, _SUPPORTED_GPT_MODELS)
- assert model.config.add_cross_attention == False, 'Only supports GPT-style decoder-only models'
-
- def _get_attn_modules(model: CAUSAL_GPT_TYPES) -> List[torch.nn.Module]:
- """Helper that gets a list of the model's attention modules.
-
- Each module has a `bias` buffer used for causal masking. The Prefix LM
- conversion adds logic to dynamically manipulate these biases to support
- Prefix LM attention masking.
- """
- attn_modules = []
- if isinstance(model, GPTNeoXForCausalLM):
- blocks = model.gpt_neox.layers
- else:
- blocks = model.transformer.h
- for block in blocks:
- if isinstance(model, GPTNeoForCausalLM):
- if block.attn.attention_type != 'global':
- continue
- attn_module = block.attn.attention
- elif isinstance(model, GPTNeoXForCausalLM):
- attn_module = block.attention
- else:
- attn_module = block.attn
- attn_modules.append(attn_module)
- return attn_modules
- setattr(model, '_original_forward', getattr(model, 'forward'))
- setattr(model, '_original_generate', getattr(model, 'generate'))
-
- def forward(self: CAUSAL_GPT_TYPES, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
- """Wraps original forward to enable PrefixLM attention."""
-
- def call_og_forward():
- if isinstance(self, GPTNeoXForCausalLM):
- return self._original_forward(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- else:
- return self._original_forward(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- if bidirectional_mask is None:
- return call_og_forward()
- assert isinstance(bidirectional_mask, torch.Tensor)
- attn_modules = _get_attn_modules(model)
- (b, s) = bidirectional_mask.shape
- max_length = attn_modules[0].bias.shape[-1]
- if s > max_length:
- raise ValueError(f'bidirectional_mask sequence length (={s}) exceeds the ' + f'max length allowed by the model ({max_length}).')
- assert s <= max_length
- if s < max_length:
- pad = torch.zeros((int(b), int(max_length - s)), dtype=bidirectional_mask.dtype, device=bidirectional_mask.device)
- bidirectional_mask = torch.cat([bidirectional_mask, pad], dim=1)
- bidirectional = bidirectional_mask.unsqueeze(1).unsqueeze(1)
- for attn_module in attn_modules:
- attn_module.bias.data = torch.logical_or(attn_module.bias.data, bidirectional)
- output = call_og_forward()
- for attn_module in attn_modules:
- attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
- return output
-
- def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]):
- """Wraps original generate to enable PrefixLM attention."""
- attn_modules = _get_attn_modules(model)
- for attn_module in attn_modules:
- attn_module.bias.data[:] = 1
- output = self._original_generate(*args, **kwargs)
- for attn_module in attn_modules:
- attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
- return output
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'generate', MethodType(generate, model))
- setattr(model, '_prefix_lm_converted', True)
- return model
-
-def _convert_bloom_causal_lm_to_prefix_lm(model: BloomForCausalLM) -> BloomForCausalLM:
- """Converts a BLOOM Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `BloomForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
- assert isinstance(model, BloomForCausalLM)
- assert model.config.add_cross_attention == False, 'Only supports BLOOM decoder-only models'
-
- def _prepare_attn_mask(self: BloomModel, attention_mask: torch.Tensor, bidirectional_mask: Optional[torch.Tensor], input_shape: Tuple[int, int], past_key_values_length: int) -> torch.BoolTensor:
- combined_attention_mask = None
- device = attention_mask.device
- (_, src_length) = input_shape
- if src_length > 1:
- combined_attention_mask = _make_causal_mask_bloom(input_shape, device=device, past_key_values_length=past_key_values_length)
- if bidirectional_mask is not None:
- assert attention_mask.shape == bidirectional_mask.shape
- expanded_bidirectional_mask = _expand_mask_bloom(bidirectional_mask, tgt_length=src_length)
- combined_attention_mask = torch.logical_and(combined_attention_mask, expanded_bidirectional_mask)
- expanded_attn_mask = _expand_mask_bloom(attention_mask, tgt_length=src_length)
- combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask
- return combined_attention_mask
-
- def _build_alibi_tensor(self: BloomModel, batch_size: int, query_length: int, key_length: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
- num_heads = self.config.n_head
- closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
- base = torch.tensor(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), device=device, dtype=torch.float32)
- powers = torch.arange(1, 1 + closest_power_of_2, device=device, dtype=torch.int32)
- slopes = torch.pow(base, powers)
- if closest_power_of_2 != num_heads:
- extra_base = torch.tensor(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), device=device, dtype=torch.float32)
- num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
- extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32)
- slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
- qa = torch.arange(query_length, device=device, dtype=torch.int32).view(-1, 1)
- ka = torch.arange(key_length, device=device, dtype=torch.int32).view(1, -1)
- diffs = qa - ka + key_length - query_length
- diffs = -diffs.abs()
- alibi = slopes.view(1, num_heads, 1, 1) * diffs.view(1, 1, query_length, key_length)
- alibi = alibi.expand(batch_size, -1, -1, -1).reshape(-1, query_length, key_length)
- return alibi.to(dtype)
- KeyValueT = Tuple[torch.Tensor, torch.Tensor]
-
- def forward(self: BloomModel, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[KeyValueT, ...]]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
- if deprecated_arguments.pop('position_ids', False) is not False:
- warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. ' + 'You can safely ignore passing `position_ids`.', FutureWarning)
- if len(deprecated_arguments) > 0:
- raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
- elif input_ids is not None:
- (batch_size, seq_length) = input_ids.shape
- elif inputs_embeds is not None:
- (batch_size, seq_length, _) = inputs_embeds.shape
- else:
- raise ValueError('You have to specify either input_ids or inputs_embeds')
- if past_key_values is None:
- past_key_values = tuple([None] * len(self.h))
- head_mask = self.get_head_mask(head_mask, self.config.n_layer)
- if inputs_embeds is None:
- inputs_embeds = self.word_embeddings(input_ids)
- hidden_states = self.word_embeddings_layernorm(inputs_embeds)
- presents = () if use_cache else None
- all_self_attentions = () if output_attentions else None
- all_hidden_states = () if output_hidden_states else None
- seq_length_with_past = seq_length
- past_key_values_length = 0
- if past_key_values[0] is not None:
- tmp = past_key_values[0][0]
- past_key_values_length = tmp.shape[2]
- seq_length_with_past = seq_length_with_past + past_key_values_length
- if attention_mask is None:
- attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
- else:
- attention_mask = attention_mask.to(hidden_states.device)
- alibi = self._build_alibi_tensor(batch_size=batch_size, query_length=seq_length, key_length=seq_length_with_past, dtype=hidden_states.dtype, device=hidden_states.device)
- causal_mask = self._prepare_attn_mask(attention_mask, bidirectional_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length)
- for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)):
- if output_hidden_states:
- hst = (hidden_states,)
- all_hidden_states = all_hidden_states + hst
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
- use_cache = False
-
- def create_custom_forward(module):
-
- def custom_forward(*inputs):
- return module(*inputs, use_cache=use_cache, output_attentions=output_attentions)
- return custom_forward
- outputs = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, alibi, causal_mask, head_mask[i])
- else:
- outputs = block(hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi)
- hidden_states = outputs[0]
- if use_cache is True:
- presents = presents + (outputs[1],)
- if output_attentions:
- oa = (outputs[2 if use_cache else 1],)
- all_self_attentions = all_self_attentions + oa
- hidden_states = self.ln_f(hidden_states)
- if output_hidden_states:
- hst = (hidden_states,)
- all_hidden_states = all_hidden_states + hst
- if not return_dict:
- return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None))
- return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions)
- setattr(model.transformer, '_prepare_attn_mask', MethodType(_prepare_attn_mask, model.transformer))
- setattr(model.transformer, '_build_alibi_tensor', MethodType(_build_alibi_tensor, model.transformer))
- setattr(model.transformer, 'forward', MethodType(forward, model.transformer))
- KeyValueT = Tuple[torch.Tensor, torch.Tensor]
-
- def forward(self: BloomForCausalLM, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[KeyValueT, ...]]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
- """Replacement forward method for BloomCausalLM."""
- if deprecated_arguments.pop('position_ids', False) is not False:
- warnings.warn('`position_ids` have no functionality in BLOOM and will be removed ' + 'in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning)
- if len(deprecated_arguments) > 0:
- raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, bidirectional_mask=bidirectional_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- hidden_states = transformer_outputs[0]
- lm_logits = self.lm_head(hidden_states)
- loss = None
- if labels is not None:
- shift_logits = lm_logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- (batch_size, seq_length, vocab_size) = shift_logits.shape
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length))
- if not return_dict:
- output = (lm_logits,) + transformer_outputs[1:]
- return (loss,) + output if loss is not None else output
- return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
-
- def prepare_inputs_for_generation(self: BloomForCausalLM, input_ids: torch.LongTensor, past: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> dict:
- if past:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- bidirectional_mask = None
- if past[0][0].shape[0] == input_ids.shape[0]:
- past = self._convert_to_bloom_cache(past)
- else:
- bidirectional_mask = torch.ones_like(input_ids)
- return {'input_ids': input_ids, 'past_key_values': past, 'use_cache': True, 'attention_mask': attention_mask, 'bidirectional_mask': bidirectional_mask}
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'prepare_inputs_for_generation', MethodType(prepare_inputs_for_generation, model))
- setattr(model, '_prefix_lm_converted', True)
- return model
-
-def _convert_opt_causal_lm_to_prefix_lm(model: OPTForCausalLM) -> OPTForCausalLM:
- """Converts an OPT Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `OPTForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
- assert isinstance(model, OPTForCausalLM)
- assert model.config.add_cross_attention == False, 'Only supports OPT decoder-only models'
- setattr(model, '_original_forward', getattr(model, 'forward'))
- setattr(model, '_original_generate', getattr(model, 'generate'))
- model.model.decoder.bidirectional_mask = None
-
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
- combined_attention_mask = None
- if input_shape[-1] > 1:
- if self.bidirectional_mask == 'g':
- (bsz, src_length) = input_shape
- combined_attention_mask = torch.zeros((bsz, 1, src_length, src_length + past_key_values_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
- else:
- combined_attention_mask = _make_causal_mask_opt(input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length).to(inputs_embeds.device)
- if self.bidirectional_mask is not None:
- assert attention_mask.shape == self.bidirectional_mask.shape
- expanded_bidirectional_mask = _expand_mask_opt(self.bidirectional_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device)
- combined_attention_mask = torch.maximum(expanded_bidirectional_mask, combined_attention_mask)
- if attention_mask is not None:
- expanded_attn_mask = _expand_mask_opt(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device)
- combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
- return combined_attention_mask
- setattr(model.model.decoder, '_prepare_decoder_attention_mask', MethodType(_prepare_decoder_attention_mask, model.model.decoder))
-
- def forward(self: OPTForCausalLM, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.ByteTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
-
- def call_og_forward():
- return self._original_forward(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- if bidirectional_mask is None:
- return call_og_forward()
- self.model.decoder.bidirectional_mask = bidirectional_mask
- try:
- outputs = call_og_forward()
- except:
- self.model.decoder.bidirectional_mask = None
- raise
- self.model.decoder.bidirectional_mask = None
- return outputs
-
- def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Dict[str, Any]):
- """Wraps original generate to enable PrefixLM-style attention."""
- self.model.decoder.bidirectional_mask = 'g'
- try:
- output = self._original_generate(*args, **kwargs)
- except:
- self.model.decoder.bidirectional_mask = None
- raise
- self.model.decoder.bidirectional_mask = None
- return output
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'generate', MethodType(generate, model))
- setattr(model, '_prefix_lm_converted', True)
- return model
-_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM, OPTForCausalLM)
-CAUSAL_LM_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM, BloomForCausalLM, OPTForCausalLM]
-
-def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:
- """Converts a HuggingFace Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `GPT2LMHeadModel`
- - `GPTNeoForCausalLM`
- - `GPTNeoXForCausalLM`
- - `GPTJForCausalLM`
- - `BloomForCausalLM`
- - `OPTForCausalLM`
-
- Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the
- `generate` method and/or select underlying methods depending on the model class.
-
- These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask".
-
- Notes on training:
- To actually train the converted model as a Prefix LM, training batches will need to indicate
- the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.
-
- **This is not a standard input and requires custom layers either within or after your dataloader.**
-
- In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`
- such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.
- That is, the prefix portion of the sequence should not generate any loss. Loss should only be
- generated by the target portion of the sequence.
-
- Notes on `GPTNeoForCausalLM`:
- To simplify the implementation, "global" and "local" attention layers are handled differently.
- For "global" layers, we handle conversion as described above. For "local" layers, which use a
- causal attention mask within a restricted local window, we do not alter the masking.
-
- Notes on `forward` method conversion:
- After conversion, the `forward` method will handle a new input, `bidirectional_mask`,
- which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions
- belonging to the prefix (prefix tokens can attend to one another bidirectionally), and
- 0 indicates token positions belonging to the target.
-
- The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing
- causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset
- the causal masks before returning the result.
-
- Notes on `generate` method conversion:
- After conversion, the `generate` method will have the same signature but will internally
- convert all causal masks to be purely bidirectional, call the original `generate` method, and
- (where appropriate) reset the causal masks before returning the result.
-
- This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token
- "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates
- each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one
- another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and
- previously-generated tokens (also as expected in a Prefix LM).
-
- To preserve the API, the original methods are renamed to `_original_forward` and
- `_original_generate`, and replaced with new `forward` and `generate` methods that wrap
- them, respectively. Although implementation details vary by model class.
- """
- if isinstance(model, _SUPPORTED_GPT_MODELS):
- return _convert_gpt_causal_lm_to_prefix_lm(model)
- elif isinstance(model, BloomForCausalLM):
- return _convert_bloom_causal_lm_to_prefix_lm(model)
- elif isinstance(model, OPTForCausalLM):
- return _convert_opt_causal_lm_to_prefix_lm(model)
- else:
- raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\n{_SUPPORTED_HF_MODELS}')
-
-def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):
- """Attempts to add bidirectional_mask to batch if missing.
-
- Raises:
- KeyError if bidirectional_mask is missing and can't be inferred
- """
- if 'bidirectional_mask' not in batch:
- if batch.get('mode', None) == 'icl_task':
- batch['bidirectional_mask'] = batch['attention_mask'].clone()
- for (i, continuation_indices) in enumerate(batch['continuation_indices']):
- batch['bidirectional_mask'][i, continuation_indices] = 0
- elif 'labels' in batch and 'attention_mask' in batch:
- batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])
- else:
- raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/meta_init_context.py b/llava/model/language_model/mpt/meta_init_context.py
deleted file mode 100644
index 6cba6fff0..000000000
--- a/llava/model/language_model/mpt/meta_init_context.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from contextlib import contextmanager
-import torch
-import torch.nn as nn
-
-@contextmanager
-def init_empty_weights(include_buffers: bool=False):
- """Meta initialization context manager.
-
- A context manager under which models are initialized with all parameters
- on the meta device, therefore creating an empty model. Useful when just
- initializing the model would blow the available RAM.
-
- Args:
- include_buffers (`bool`, *optional*, defaults to `False`): Whether or
- not to also put all buffers on the meta device while initializing.
-
- Example:
- ```python
- import torch.nn as nn
-
- # Initialize a model with 100 billions parameters in no time and without using any RAM.
- with init_empty_weights():
- tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
- ```
-
-
-
- Any model created under this context manager has no weights. As such you can't do something like
- `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
-
-
- """
- with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:
- yield f
-
-@contextmanager
-def init_on_device(device: torch.device, include_buffers: bool=False):
- """Device initialization context manager.
-
- A context manager under which models are initialized with all parameters
- on the specified device.
-
- Args:
- device (`torch.device`): Device to initialize all parameters on.
- include_buffers (`bool`, *optional*, defaults to `False`): Whether or
- not to also put all buffers on the meta device while initializing.
-
- Example:
- ```python
- import torch.nn as nn
-
- with init_on_device(device=torch.device("cuda")):
- tst = nn.Liner(100, 100) # on `cuda` device
- ```
- """
- old_register_parameter = nn.Module.register_parameter
- if include_buffers:
- old_register_buffer = nn.Module.register_buffer
-
- def register_empty_parameter(module, name, param):
- old_register_parameter(module, name, param)
- if param is not None:
- param_cls = type(module._parameters[name])
- kwargs = module._parameters[name].__dict__
- module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
-
- def register_empty_buffer(module, name, buffer):
- old_register_buffer(module, name, buffer)
- if buffer is not None:
- module._buffers[name] = module._buffers[name].to(device)
- if include_buffers:
- tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']}
- else:
- tensor_constructors_to_patch = {}
-
- def patch_tensor_constructor(fn):
-
- def wrapper(*args, **kwargs):
- kwargs['device'] = device
- return fn(*args, **kwargs)
- return wrapper
- try:
- nn.Module.register_parameter = register_empty_parameter
- if include_buffers:
- nn.Module.register_buffer = register_empty_buffer
- for torch_function_name in tensor_constructors_to_patch.keys():
- setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
- yield
- finally:
- nn.Module.register_parameter = old_register_parameter
- if include_buffers:
- nn.Module.register_buffer = old_register_buffer
- for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items():
- setattr(torch, torch_function_name, old_torch_function)
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/modeling_mpt.py b/llava/model/language_model/mpt/modeling_mpt.py
deleted file mode 100644
index 13313441b..000000000
--- a/llava/model/language_model/mpt/modeling_mpt.py
+++ /dev/null
@@ -1,331 +0,0 @@
-"""A simple, flexible implementation of a GPT model.
-
-Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
-"""
-import math
-import warnings
-from typing import List, Optional, Tuple, Union
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
-from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
-from .attention import attn_bias_shape, build_attn_bias
-from .blocks import MPTBlock
-from .custom_embedding import SharedEmbedding
-from .norm import NORM_CLASS_REGISTRY
-from .configuration_mpt import MPTConfig
-from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
-from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
-from .meta_init_context import init_empty_weights
-from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
-try:
- from .flash_attn_triton import flash_attn_func
-except:
- pass
-Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
-
-class MPTPreTrainedModel(PreTrainedModel):
- config_class = MPTConfig
- base_model_prefix = 'model'
- _no_split_modules = ['MPTBlock']
-
-class MPTModel(MPTPreTrainedModel):
-
- def __init__(self, config: MPTConfig):
- config._validate_config()
- super().__init__(config)
- self.attn_impl = config.attn_config['attn_impl']
- self.prefix_lm = config.attn_config['prefix_lm']
- self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
- self.alibi = config.attn_config['alibi']
- self.alibi_bias_max = config.attn_config['alibi_bias_max']
- if config.init_device == 'mixed':
- if dist.get_local_rank() == 0:
- config.init_device = 'cpu'
- else:
- config.init_device = 'meta'
- if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
- norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
- raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
- norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
- self.embedding_fraction = config.embedding_fraction
- self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
- if not self.alibi:
- self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
- self.emb_drop = nn.Dropout(config.emb_pdrop)
- self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
- self.norm_f = norm_class(config.d_model, device=config.init_device)
- if config.init_device != 'meta':
- print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
- self.apply(self.param_init_fn)
- self.is_causal = not self.prefix_lm
- self._attn_bias_initialized = False
- self.attn_bias = None
- self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
- if config.no_bias:
- for module in self.modules():
- if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
- if config.verbose:
- warnings.warn(f'Removing bias ({module.bias}) from {module}.')
- module.register_parameter('bias', None)
- if config.verbose and config.verbose > 2:
- print(self)
- if 'verbose' not in self.config.init_config:
- self.config.init_config['verbose'] = self.config.verbose
- if self.config.init_config['verbose'] > 1:
- init_fn_name = self.config.init_config['name']
- warnings.warn(f'Using {init_fn_name} initialization.')
- self.gradient_checkpointing = False
-
- def get_input_embeddings(self):
- return self.wte
-
- def set_input_embeddings(self, value):
- self.wte = value
-
- @torch.no_grad()
- def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
- if not self._attn_bias_initialized:
- if self.attn_bias_shape:
- self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
- self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
- self._attn_bias_initialized = True
- if self.attn_impl == 'flash':
- return (self.attn_bias, attention_mask)
- if self.attn_bias is not None:
- self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
- attn_bias = self.attn_bias
- if self.prefix_lm:
- assert isinstance(attn_bias, torch.Tensor)
- assert isinstance(prefix_mask, torch.Tensor)
- attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
- if self.attn_uses_sequence_id and sequence_id is not None:
- assert isinstance(attn_bias, torch.Tensor)
- attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
- if attention_mask is not None:
- s_k = attention_mask.shape[-1]
- if attn_bias is None:
- attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
- else:
- _s_k = max(0, attn_bias.size(-1) - s_k)
- attn_bias = attn_bias[:, :, :, _s_k:]
- if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
- raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
- min_val = torch.finfo(attn_bias.dtype).min
- attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
- return (attn_bias, None)
-
- def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
- (s_k, s_q) = attn_bias.shape[-2:]
- if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
- raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
- seq_len = prefix_mask.shape[-1]
- if seq_len > self.config.max_seq_len:
- raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
- attn_bias = attn_bias[..., :seq_len, :seq_len]
- causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
- prefix = prefix_mask.view(-1, 1, 1, seq_len)
- cannot_attend = ~torch.logical_or(causal, prefix.bool())
- min_val = torch.finfo(attn_bias.dtype).min
- attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
- return attn_bias
-
- def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
- seq_len = sequence_id.shape[-1]
- if seq_len > self.config.max_seq_len:
- raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
- attn_bias = attn_bias[..., :seq_len, :seq_len]
- cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
- min_val = torch.finfo(attn_bias.dtype).min
- attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
- return attn_bias
-
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
- return_dict = return_dict if return_dict is not None else self.config.return_dict
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- if attention_mask is not None:
- attention_mask = attention_mask.bool()
- if prefix_mask is not None:
- prefix_mask = prefix_mask.bool()
- if not return_dict:
- raise NotImplementedError('return_dict False is not implemented yet for MPT')
- if output_attentions:
- if self.attn_impl != 'torch':
- raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
- if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
- raise NotImplementedError('MPT does not support training with left padding.')
- if self.prefix_lm and prefix_mask is None:
- raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
- if self.training:
- if self.attn_uses_sequence_id and sequence_id is None:
- raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
- elif self.attn_uses_sequence_id is False and sequence_id is not None:
- warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
- if input_ids is not None:
- S = input_ids.size(1)
- assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
- tok_emb = self.wte(input_ids)
- else:
- assert inputs_embeds is not None
- assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
- S = inputs_embeds.size(1)
- tok_emb = inputs_embeds
- if self.alibi:
- x = tok_emb
- else:
- past_position = 0
- if past_key_values is not None:
- if len(past_key_values) != self.config.n_layers:
- raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
- past_position = past_key_values[0][0].size(1)
- if self.attn_impl == 'torch':
- past_position = past_key_values[0][0].size(3)
- if S + past_position > self.config.max_seq_len:
- raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
- pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
- if attention_mask is not None:
- pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
- pos_emb = self.wpe(pos)
- x = tok_emb + pos_emb
- if self.embedding_fraction == 1:
- x = self.emb_drop(x)
- else:
- x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
- assert isinstance(self.emb_drop, nn.Module)
- x = self.emb_drop(x_shrunk)
- (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
- if use_cache and past_key_values is None:
- past_key_values = [() for _ in range(self.config.n_layers)]
- all_hidden_states = () if output_hidden_states else None
- all_self_attns = () if output_attentions else None
- for (b_idx, block) in enumerate(self.blocks):
- if output_hidden_states:
- assert all_hidden_states is not None
- all_hidden_states = all_hidden_states + (x,)
- past_key_value = past_key_values[b_idx] if past_key_values is not None else None
- if self.gradient_checkpointing and self.training:
- (x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
- else:
- (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
- if past_key_values is not None:
- past_key_values[b_idx] = past_key_value
- if output_attentions:
- assert all_self_attns is not None
- all_self_attns = all_self_attns + (attn_weights,)
- x = self.norm_f(x)
- if output_hidden_states:
- assert all_hidden_states is not None
- all_hidden_states = all_hidden_states + (x,)
- return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
-
- def param_init_fn(self, module):
- init_fn_name = self.config.init_config['name']
- MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
-
- def fsdp_wrap_fn(self, module):
- return isinstance(module, MPTBlock)
-
- def activation_checkpointing_fn(self, module):
- return isinstance(module, MPTBlock)
-
-class MPTForCausalLM(MPTPreTrainedModel):
-
- def __init__(self, config: MPTConfig):
- super().__init__(config)
- if not config.tie_word_embeddings:
- raise ValueError('MPTForCausalLM only supports tied word embeddings')
- print(f'Instantiating an MPTForCausalLM model from {__file__}')
- self.transformer = MPTModel(config)
- for child in self.transformer.children():
- if isinstance(child, torch.nn.ModuleList):
- continue
- if isinstance(child, torch.nn.Module):
- child._fsdp_wrap = True
- self.logit_scale = None
- if config.logit_scale is not None:
- logit_scale = config.logit_scale
- if isinstance(logit_scale, str):
- if logit_scale == 'inv_sqrt_d_model':
- logit_scale = 1 / math.sqrt(config.d_model)
- else:
- raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
- self.logit_scale = logit_scale
-
- def get_input_embeddings(self):
- return self.transformer.wte
-
- def set_input_embeddings(self, value):
- self.transformer.wte = value
-
- def get_output_embeddings(self):
- return self.transformer.wte
-
- def set_output_embeddings(self, new_embeddings):
- self.transformer.wte = new_embeddings
-
- def set_decoder(self, decoder):
- self.transformer = decoder
-
- def get_decoder(self):
- return self.transformer
-
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None):
- return_dict = return_dict if return_dict is not None else self.config.return_dict
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- if inputs_embeds is not None:
- raise NotImplementedError('inputs_embeds has to be None (for hf/peft support).')
- outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)
- logits = self.transformer.wte(outputs.last_hidden_state.to(self.transformer.wte.weight.device), True)
- if self.logit_scale is not None:
- if self.logit_scale == 0:
- warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
- logits *= self.logit_scale
- loss = None
- if labels is not None:
- labels = torch.roll(labels, shifts=-1)
- labels[:, -1] = -100
- loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1))
- return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
-
- def param_init_fn(self, module):
- init_fn_name = self.config.init_config['name']
- MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
-
- def fsdp_wrap_fn(self, module):
- return isinstance(module, MPTBlock)
-
- def activation_checkpointing_fn(self, module):
- return isinstance(module, MPTBlock)
-
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
- if inputs_embeds is not None:
- raise NotImplementedError('inputs_embeds is not implemented for MPT yet')
- attention_mask = kwargs['attention_mask'].bool()
- if attention_mask[:, -1].sum() != attention_mask.shape[0]:
- raise NotImplementedError('MPT does not support generation with right padding.')
- if self.transformer.attn_uses_sequence_id and self.training:
- sequence_id = torch.zeros_like(input_ids[:1])
- else:
- sequence_id = None
- if past_key_values is not None:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- if self.transformer.prefix_lm:
- prefix_mask = torch.ones_like(attention_mask)
- if kwargs.get('use_cache') == False:
- raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
- else:
- prefix_mask = None
- return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True)}
-
- @staticmethod
- def _reorder_cache(past_key_values, beam_idx):
- """Used by HuggingFace generate when using beam search with kv-caching.
-
- See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
- for an example in transformers.
- """
- reordered_past = []
- for layer_past in past_key_values:
- reordered_past += [tuple((past_state.index_select(0, beam_idx) for past_state in layer_past))]
- return reordered_past
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/norm.py b/llava/model/language_model/mpt/norm.py
deleted file mode 100644
index 067b6140f..000000000
--- a/llava/model/language_model/mpt/norm.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import torch
-
-def _cast_if_autocast_enabled(tensor):
- if torch.is_autocast_enabled():
- if tensor.device.type == 'cuda':
- dtype = torch.get_autocast_gpu_dtype()
- elif tensor.device.type == 'cpu':
- dtype = torch.get_autocast_cpu_dtype()
- else:
- raise NotImplementedError()
- return tensor.to(dtype=dtype)
- return tensor
-
-class LPLayerNorm(torch.nn.LayerNorm):
-
- def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
- super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype)
-
- def forward(self, x):
- module_device = x.device
- downcast_x = _cast_if_autocast_enabled(x)
- downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
- downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
- with torch.autocast(enabled=False, device_type=module_device.type):
- return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
-
-def rms_norm(x, weight=None, eps=1e-05):
- output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
- if weight is not None:
- return output * weight
- return output
-
-class RMSNorm(torch.nn.Module):
-
- def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
- super().__init__()
- self.eps = eps
- if weight:
- self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device))
- else:
- self.register_parameter('weight', None)
-
- def forward(self, x):
- return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
-
-class LPRMSNorm(RMSNorm):
-
- def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
- super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device)
-
- def forward(self, x):
- downcast_x = _cast_if_autocast_enabled(x)
- downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
- with torch.autocast(enabled=False, device_type=x.device.type):
- return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
-NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}
\ No newline at end of file
diff --git a/llava/model/language_model/mpt/param_init_fns.py b/llava/model/language_model/mpt/param_init_fns.py
deleted file mode 100644
index 418b83ca2..000000000
--- a/llava/model/language_model/mpt/param_init_fns.py
+++ /dev/null
@@ -1,181 +0,0 @@
-import math
-import warnings
-from collections.abc import Sequence
-from functools import partial
-from typing import Optional, Tuple, Union
-import torch
-from torch import nn
-from .norm import NORM_CLASS_REGISTRY
-
-def torch_default_param_init_fn_(module: nn.Module, verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f"Initializing network using module's reset_parameters attribute")
- if hasattr(module, 'reset_parameters'):
- module.reset_parameters()
-
-def fused_init_helper_(module: nn.Module, init_fn_):
- _fused = getattr(module, '_fused', None)
- if _fused is None:
- raise RuntimeError(f'Internal logic error')
- (dim, splits) = _fused
- splits = (0, *splits, module.weight.size(dim))
- for (s, e) in zip(splits[:-1], splits[1:]):
- slice_indices = [slice(None)] * module.weight.ndim
- slice_indices[dim] = slice(s, e)
- init_fn_(module.weight[slice_indices])
-
-def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f'If model has bias parameters they are initialized to 0.')
- init_div_is_residual = init_div_is_residual
- if init_div_is_residual is False:
- div_is_residual = 1.0
- elif init_div_is_residual is True:
- div_is_residual = math.sqrt(2 * n_layers)
- elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
- div_is_residual = init_div_is_residual
- elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
- div_is_residual = float(init_div_is_residual)
- else:
- div_is_residual = 1.0
- raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')
- if init_div_is_residual is not False:
- if verbose > 1:
- warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')
- if isinstance(module, nn.Linear):
- if hasattr(module, '_fused'):
- fused_init_helper_(module, init_fn_)
- else:
- init_fn_(module.weight)
- if module.bias is not None:
- torch.nn.init.zeros_(module.bias)
- if init_div_is_residual is not False and getattr(module, '_is_residual', False):
- with torch.no_grad():
- module.weight.div_(div_is_residual)
- elif isinstance(module, nn.Embedding):
- if emb_init_std is not None:
- std = emb_init_std
- if std == 0:
- warnings.warn(f'Embedding layer initialized to 0.')
- emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
- if verbose > 1:
- warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')
- elif emb_init_uniform_lim is not None:
- lim = emb_init_uniform_lim
- if isinstance(lim, Sequence):
- if len(lim) > 2:
- raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')
- if lim[0] == lim[1]:
- warnings.warn(f'Embedding layer initialized to {lim[0]}.')
- else:
- if lim == 0:
- warnings.warn(f'Embedding layer initialized to 0.')
- lim = [-lim, lim]
- (a, b) = lim
- emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
- if verbose > 1:
- warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')
- else:
- emb_init_fn_ = init_fn_
- emb_init_fn_(module.weight)
- elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
- if verbose > 1:
- warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')
- if hasattr(module, 'weight') and module.weight is not None:
- torch.nn.init.ones_(module.weight)
- if hasattr(module, 'bias') and module.bias is not None:
- torch.nn.init.zeros_(module.bias)
- elif isinstance(module, nn.MultiheadAttention):
- if module._qkv_same_embed_dim:
- assert module.in_proj_weight is not None
- assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)
- assert d_model is not None
- _d = d_model
- splits = (0, _d, 2 * _d, 3 * _d)
- for (s, e) in zip(splits[:-1], splits[1:]):
- init_fn_(module.in_proj_weight[s:e])
- else:
- assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)
- assert module.in_proj_weight is None
- init_fn_(module.q_proj_weight)
- init_fn_(module.k_proj_weight)
- init_fn_(module.v_proj_weight)
- if module.in_proj_bias is not None:
- torch.nn.init.zeros_(module.in_proj_bias)
- if module.bias_k is not None:
- torch.nn.init.zeros_(module.bias_k)
- if module.bias_v is not None:
- torch.nn.init.zeros_(module.bias_v)
- init_fn_(module.out_proj.weight)
- if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):
- with torch.no_grad():
- module.out_proj.weight.div_(div_is_residual)
- if module.out_proj.bias is not None:
- torch.nn.init.zeros_(module.out_proj.bias)
- else:
- for _ in module.parameters(recurse=False):
- raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')
-
-def _normal_init_(std, mean=0.0):
- return partial(torch.nn.init.normal_, mean=mean, std=std)
-
-def _normal_param_init_fn_(module: nn.Module, std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- init_fn_ = _normal_init_(std=std)
- if verbose > 1:
- warnings.warn(f'Using torch.nn.init.normal_ init fn mean=0.0, std={std}')
- generic_param_init_fn_(module=module, init_fn_=init_fn_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def baseline_param_init_fn_(module: nn.Module, init_std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- if init_std is None:
- raise ValueError("You must set model.init_config['init_std'] to a float value to use the default initialization scheme.")
- _normal_param_init_fn_(module=module, std=init_std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def small_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- std = math.sqrt(2 / (5 * d_model))
- _normal_param_init_fn_(module=module, std=std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def neox_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- """From section 2.3.1 of GPT-NeoX-20B:
-
- An Open-Source AutoregressiveLanguage Model — Black et. al. (2022)
- see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151
- and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
- """
- del kwargs
- residual_div = n_layers / math.sqrt(10)
- if verbose > 1:
- warnings.warn(f'setting init_div_is_residual to {residual_div}')
- small_param_init_fn_(module=module, d_model=d_model, n_layers=n_layers, init_div_is_residual=residual_div, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def kaiming_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f'Using nn.init.kaiming_uniform_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')
- kaiming_uniform_ = partial(nn.init.kaiming_uniform_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
- generic_param_init_fn_(module=module, init_fn_=kaiming_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def kaiming_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f'Using nn.init.kaiming_normal_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')
- kaiming_normal_ = partial(torch.nn.init.kaiming_normal_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
- generic_param_init_fn_(module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def xavier_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, verbose: int=0, **kwargs):
- del kwargs
- xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
- if verbose > 1:
- warnings.warn(f'Using torch.nn.init.xavier_uniform_ init fn with parameters: ' + f'gain={init_gain}')
- generic_param_init_fn_(module=module, init_fn_=xavier_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def xavier_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, verbose: int=0, **kwargs):
- xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
- if verbose > 1:
- warnings.warn(f'Using torch.nn.init.xavier_normal_ init fn with parameters: ' + f'gain={init_gain}')
- generic_param_init_fn_(module=module, init_fn_=xavier_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}
\ No newline at end of file
diff --git a/llava/model/llava_arch.py b/llava/model/llava_arch.py
index d2f396f39..5a5e62dfc 100644
--- a/llava/model/llava_arch.py
+++ b/llava/model/llava_arch.py
@@ -23,6 +23,8 @@
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from llava.mm_utils import get_anyres_image_grid_shape
+
class LlavaMetaModel:
@@ -33,6 +35,11 @@ def __init__(self, config):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
+ if 'unpad' in getattr(config, 'mm_patch_merge_type', ''):
+ self.image_newline = nn.Parameter(
+ torch.empty(config.hidden_size, dtype=self.dtype)
+ )
+
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
@@ -44,6 +51,7 @@ def initialize_vision_modules(self, model_args, fsdp=None):
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
+ mm_patch_merge_type = model_args.mm_patch_merge_type
self.config.mm_vision_tower = vision_tower
@@ -66,9 +74,16 @@ def initialize_vision_modules(self, model_args, fsdp=None):
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
+ self.config.mm_patch_merge_type = mm_patch_merge_type
if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
+
+ if 'unpad' in mm_patch_merge_type:
+ embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
+ self.image_newline = nn.Parameter(
+ torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std
+ )
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
@@ -82,6 +97,37 @@ def get_w(weights, keyword):
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
+def unpad_image(tensor, original_size):
+ """
+ Unpads a PyTorch tensor of a padded and resized image.
+
+ Args:
+ tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
+ original_size (tuple): The original size of the image (height, width).
+
+ Returns:
+ torch.Tensor: The unpadded image tensor.
+ """
+ original_width, original_height = original_size
+ current_height, current_width = tensor.shape[1:]
+
+ original_aspect_ratio = original_width / original_height
+ current_aspect_ratio = current_width / current_height
+
+ if original_aspect_ratio > current_aspect_ratio:
+ scale_factor = current_width / original_width
+ new_height = int(original_height * scale_factor)
+ padding = (current_height - new_height) // 2
+ unpadded_tensor = tensor[:, padding:current_height - padding, :]
+ else:
+ scale_factor = current_height / original_height
+ new_width = int(original_width * scale_factor)
+ padding = (current_width - new_width) // 2
+ unpadded_tensor = tensor[:, :, padding:current_width - padding]
+
+ return unpadded_tensor
+
+
class LlavaMetaForCausalLM(ABC):
@abstractmethod
@@ -97,28 +143,63 @@ def encode_images(self, images):
return image_features
def prepare_inputs_labels_for_multimodal(
- self, input_ids, position_ids, attention_mask, past_key_values, labels, images
+ self, input_ids, position_ids, attention_mask, past_key_values, labels,
+ images, image_sizes=None
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
- if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
- target_shape = past_key_values[-1][-1].shape[-2] + 1
- attention_mask = torch.cat((attention_mask, torch.ones(
- (attention_mask.shape[0], target_shape - attention_mask.shape[1]),
- dtype=attention_mask.dtype,
- device=attention_mask.device
- )), dim=1)
- position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
return input_ids, position_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
+ if type(images) is list:
+ images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images]
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
- image_features = [x.flatten(0, 1).to(self.device) for x in image_features]
+ mm_patch_merge_type = getattr(self.config, 'mm_patch_merge_type', 'flat')
+ image_aspect_ratio = getattr(self.config, 'image_aspect_ratio', 'square')
+ if mm_patch_merge_type == 'flat':
+ image_features = [x.flatten(0, 1) for x in image_features]
+ elif mm_patch_merge_type.startswith('spatial'):
+ new_image_features = []
+ for image_idx, image_feature in enumerate(image_features):
+ if image_feature.shape[0] > 1:
+ base_image_feature = image_feature[0]
+ image_feature = image_feature[1:]
+ height = width = self.get_vision_tower().num_patches_per_side
+ assert height * width == base_image_feature.shape[0]
+ if image_aspect_ratio == 'anyres':
+ num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.get_vision_tower().config.image_size)
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
+ else:
+ raise NotImplementedError
+ if 'unpad' in mm_patch_merge_type:
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
+ image_feature = torch.cat((
+ image_feature,
+ self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1)
+ ), dim=-1)
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
+ else:
+ image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
+ image_feature = image_feature.flatten(0, 3)
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
+ else:
+ image_feature = image_feature[0]
+ if 'unpad' in mm_patch_merge_type:
+ image_feature = torch.cat((
+ image_feature,
+ self.model.image_newline[None]
+ ), dim=0)
+ new_image_features.append(image_feature)
+ image_features = new_image_features
+ else:
+ raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}")
else:
- image_features = self.encode_images(images).to(self.device)
+ image_features = self.encode_images(images)
# TODO: image start / end is not implemented here to support pretraining.
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
@@ -140,7 +221,8 @@ def prepare_inputs_labels_for_multimodal(
if labels is None:
labels = torch.full_like(input_ids, IGNORE_INDEX)
- # remove the padding using attention_mask -- TODO: double check
+ # remove the padding using attention_mask -- FIXME
+ _input_ids = input_ids
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
@@ -180,6 +262,8 @@ def prepare_inputs_labels_for_multimodal(
cur_new_input_embeds.append(cur_image_features)
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
+ cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
+
cur_new_input_embeds = torch.cat(cur_new_input_embeds)
cur_new_labels = torch.cat(cur_new_labels)
diff --git a/llava/model/multimodal_encoder/builder.py b/llava/model/multimodal_encoder/builder.py
index 2b13589d4..e89507c49 100644
--- a/llava/model/multimodal_encoder/builder.py
+++ b/llava/model/multimodal_encoder/builder.py
@@ -5,7 +5,7 @@
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
- if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
+ if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion") or "ShareGPT4V" in vision_tower:
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}')
diff --git a/llava/model/multimodal_encoder/clip_encoder.py b/llava/model/multimodal_encoder/clip_encoder.py
index dbb9015b0..211781135 100644
--- a/llava/model/multimodal_encoder/clip_encoder.py
+++ b/llava/model/multimodal_encoder/clip_encoder.py
@@ -16,6 +16,8 @@ def __init__(self, vision_tower, args, delay_load=False):
if not delay_load:
self.load_model()
+ elif getattr(args, 'unfreeze_mm_vision_tower', False):
+ self.load_model()
else:
self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
@@ -73,6 +75,10 @@ def config(self):
def hidden_size(self):
return self.config.hidden_size
+ @property
+ def num_patches_per_side(self):
+ return self.config.image_size // self.config.patch_size
+
@property
def num_patches(self):
return (self.config.image_size // self.config.patch_size) ** 2
diff --git a/llava/serve/gradio_web_server.py b/llava/serve/gradio_web_server.py
index e506334bd..0a7145421 100644
--- a/llava/serve/gradio_web_server.py
+++ b/llava/serve/gradio_web_server.py
@@ -168,6 +168,15 @@ def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request:
if "llava" in model_name.lower():
if 'llama-2' in model_name.lower():
template_name = "llava_llama_2"
+ elif "mistral" in model_name.lower() or "mixtral" in model_name.lower():
+ if 'orca' in model_name.lower():
+ template_name = "mistral_orca"
+ elif 'hermes' in model_name.lower():
+ template_name = "chatml_direct"
+ else:
+ template_name = "mistral_instruct"
+ elif 'llava-v1.6-34b' in model_name.lower():
+ template_name = "chatml_direct"
elif "v1" in model_name.lower():
if 'mmtag' in model_name.lower():
template_name = "v1_mmtag"
@@ -280,7 +289,7 @@ def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request:
title_markdown = ("""
# 🌋 LLaVA: Large Language and Vision Assistant
-[[Project Page](https://llava-vl.github.io)] [[Code](https://github.com/haotian-liu/LLaVA)] [[Model](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)] | 📚 [[LLaVA](https://arxiv.org/abs/2304.08485)] [[LLaVA-v1.5](https://arxiv.org/abs/2310.03744)]
+[[Project Page](https://llava-vl.github.io)] [[Code](https://github.com/haotian-liu/LLaVA)] [[Model](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)] | 📚 [[LLaVA](https://arxiv.org/abs/2304.08485)] [[LLaVA-v1.5](https://arxiv.org/abs/2310.03744)] [[LLaVA-v1.6](https://llava-vl.github.io/blog/2024-01-30-llava-1-6/)]
""")
tos_markdown = ("""
diff --git a/llava/serve/model_worker.py b/llava/serve/model_worker.py
index a7bcd0829..dc897ef2d 100644
--- a/llava/serve/model_worker.py
+++ b/llava/serve/model_worker.py
@@ -133,6 +133,7 @@ def generate_stream(self, params):
raise ValueError("Number of images does not match number of tokens in prompt")
images = [load_image_from_base64(image) for image in images]
+ image_sizes = [image.size for image in images]
images = process_images(images, image_processor, model.config)
if type(images) is list:
@@ -148,7 +149,8 @@ def generate_stream(self, params):
num_image_tokens = prompt.count(replace_token) * model.get_vision_tower().num_patches
else:
images = None
- image_args = {"images": images}
+ image_sizes = None
+ image_args = {"images": images, "image_sizes": image_sizes}
else:
images = None
image_args = {}
diff --git a/llava/serve/sglang_worker.py b/llava/serve/sglang_worker.py
new file mode 100644
index 000000000..fb7a555fc
--- /dev/null
+++ b/llava/serve/sglang_worker.py
@@ -0,0 +1,244 @@
+"""
+A model worker executes the model.
+"""
+import argparse
+import asyncio
+from concurrent.futures import ThreadPoolExecutor
+import json
+import time
+import threading
+import uuid
+
+from fastapi import FastAPI, Request, BackgroundTasks
+from fastapi.responses import StreamingResponse
+import requests
+import re
+import uvicorn
+from functools import partial
+
+from llava.constants import WORKER_HEART_BEAT_INTERVAL
+from llava.utils import (build_logger, server_error_msg,
+ pretty_print_semaphore)
+from llava.mm_utils import process_images, load_image_from_base64, tokenizer_image_token, expand2square
+from llava.constants import DEFAULT_IMAGE_TOKEN
+
+import sglang as sgl
+from sglang.backend.runtime_endpoint import RuntimeEndpoint
+
+
+GB = 1 << 30
+
+worker_id = str(uuid.uuid4())[:6]
+logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
+global_counter = 0
+
+model_semaphore = None
+
+
+def heart_beat_worker(controller):
+ while True:
+ time.sleep(WORKER_HEART_BEAT_INTERVAL)
+ controller.send_heart_beat()
+
+
+@sgl.function
+def pipeline(s, prompt, max_tokens):
+ for p in prompt:
+ if type(p) is str:
+ s += p
+ else:
+ s += sgl.image(p)
+ s += sgl.gen("response", max_tokens=max_tokens)
+
+
+class ModelWorker:
+ def __init__(self, controller_addr, worker_addr, sgl_endpoint,
+ worker_id, no_register, model_name):
+ self.controller_addr = controller_addr
+ self.worker_addr = worker_addr
+ self.worker_id = worker_id
+
+ # Select backend
+ backend = RuntimeEndpoint(sgl_endpoint)
+ sgl.set_default_backend(backend)
+ model_path = backend.model_info["model_path"]
+
+ if model_path.endswith("/"):
+ model_path = model_path[:-1]
+ if model_name is None:
+ model_paths = model_path.split("/")
+ if model_paths[-1].startswith('checkpoint-'):
+ self.model_name = model_paths[-2] + "_" + model_paths[-1]
+ else:
+ self.model_name = model_paths[-1]
+ else:
+ self.model_name = model_name
+
+ logger.info(f"Loading the SGLANG model {self.model_name} on worker {worker_id} ...")
+
+ if not no_register:
+ self.register_to_controller()
+ self.heart_beat_thread = threading.Thread(
+ target=heart_beat_worker, args=(self,))
+ self.heart_beat_thread.start()
+
+ def register_to_controller(self):
+ logger.info("Register to controller")
+
+ url = self.controller_addr + "/register_worker"
+ data = {
+ "worker_name": self.worker_addr,
+ "check_heart_beat": True,
+ "worker_status": self.get_status()
+ }
+ r = requests.post(url, json=data)
+ assert r.status_code == 200
+
+ def send_heart_beat(self):
+ logger.info(f"Send heart beat. Models: {[self.model_name]}. "
+ f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
+ f"global_counter: {global_counter}")
+
+ url = self.controller_addr + "/receive_heart_beat"
+
+ while True:
+ try:
+ ret = requests.post(url, json={
+ "worker_name": self.worker_addr,
+ "queue_length": self.get_queue_length()}, timeout=5)
+ exist = ret.json()["exist"]
+ break
+ except requests.exceptions.RequestException as e:
+ logger.error(f"heart beat error: {e}")
+ time.sleep(5)
+
+ if not exist:
+ self.register_to_controller()
+
+ def get_queue_length(self):
+ if model_semaphore is None:
+ return 0
+ else:
+ return args.limit_model_concurrency - model_semaphore._value + (len(
+ model_semaphore._waiters) if model_semaphore._waiters is not None else 0)
+
+ def get_status(self):
+ return {
+ "model_names": [self.model_name],
+ "speed": 1,
+ "queue_length": self.get_queue_length(),
+ }
+
+ async def generate_stream(self, params):
+ ori_prompt = prompt = params["prompt"]
+ images = params.get("images", None)
+ if images is not None and len(images) > 0:
+ if len(images) > 0:
+ if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
+ raise ValueError("Number of images does not match number of tokens in prompt")
+
+ images = [load_image_from_base64(image) for image in images]
+
+ # FIXME: for image-start/end token
+ # replace_token = DEFAULT_IMAGE_TOKEN
+ # if getattr(self.model.config, 'mm_use_im_start_end', False):
+ # replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
+ # prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+ prompt = prompt.replace(' ' + DEFAULT_IMAGE_TOKEN + '\n', DEFAULT_IMAGE_TOKEN)
+ prompt_split = prompt.split(DEFAULT_IMAGE_TOKEN)
+ prompt = []
+ for i in range(len(prompt_split)):
+ prompt.append(prompt_split[i])
+ if i < len(images):
+ prompt.append(images[i])
+ else:
+ prompt = [prompt]
+
+ temperature = float(params.get("temperature", 1.0))
+ top_p = float(params.get("top_p", 1.0))
+ # max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
+ max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
+ stop_str = params.get("stop", None)
+ stop_str = [stop_str] if stop_str is not None else None
+
+ print({'prompt': prompt, 'max_new_tokens': max_new_tokens, 'temperature': temperature, 'top_p': top_p})
+ state = pipeline.run(prompt, max_new_tokens, temperature=temperature, top_p=top_p, stream=True)
+
+ generated_text = ori_prompt
+ async for text_outputs in state.text_async_iter(var_name="response"):
+ generated_text += text_outputs
+ yield json.dumps({"text": generated_text, "error_code": 0}).encode() + b"\0"
+
+ async def generate_stream_gate(self, params):
+ try:
+ async for x in self.generate_stream(params):
+ yield x
+ except ValueError as e:
+ print("Caught ValueError:", e)
+ ret = {
+ "text": server_error_msg,
+ "error_code": 1,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+ except Exception as e:
+ print("Caught Unknown Error", e)
+ ret = {
+ "text": server_error_msg,
+ "error_code": 1,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+
+
+app = FastAPI()
+
+
+def release_model_semaphore(fn=None):
+ model_semaphore.release()
+ if fn is not None:
+ fn()
+
+
+@app.post("/worker_generate_stream")
+async def generate_stream(request: Request):
+ global model_semaphore, global_counter
+ global_counter += 1
+ params = await request.json()
+
+ if model_semaphore is None:
+ model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
+ await model_semaphore.acquire()
+ worker.send_heart_beat()
+ generator = worker.generate_stream_gate(params)
+ background_tasks = BackgroundTasks()
+ background_tasks.add_task(partial(release_model_semaphore, fn=worker.send_heart_beat))
+ return StreamingResponse(generator, background=background_tasks)
+
+
+@app.post("/worker_get_status")
+async def get_status(request: Request):
+ return worker.get_status()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="localhost")
+ parser.add_argument("--port", type=int, default=21002)
+ parser.add_argument("--worker-address", type=str,
+ default="http://localhost:21002")
+ parser.add_argument("--controller-address", type=str,
+ default="http://localhost:21001")
+ parser.add_argument("--model-name", type=str)
+ parser.add_argument("--sgl-endpoint", type=str)
+ parser.add_argument("--limit-model-concurrency", type=int, default=5)
+ parser.add_argument("--stream-interval", type=int, default=1)
+ parser.add_argument("--no-register", action="store_true")
+ args = parser.parse_args()
+ logger.info(f"args: {args}")
+
+ worker = ModelWorker(args.controller_address,
+ args.worker_address,
+ args.sgl_endpoint,
+ worker_id,
+ args.no_register,
+ args.model_name)
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
diff --git a/pyproject.toml b/pyproject.toml
index 13d4e0144..bc78c8e15 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -14,7 +14,7 @@ classifiers = [
]
dependencies = [
"torch==2.0.1", "torchvision==0.15.2",
- "transformers==4.31.0", "tokenizers>=0.12.1,<0.14", "sentencepiece==0.1.99", "shortuuid",
+ "transformers==4.36.2", "tokenizers==0.15.0", "sentencepiece==0.1.99", "shortuuid",
"accelerate==0.21.0", "peft==0.4.0", "bitsandbytes==0.41.0",
"pydantic<2,>=1", "markdown2[all]", "numpy", "scikit-learn==1.2.2",
"gradio==3.35.2", "gradio_client==0.2.9",