-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
63 lines (50 loc) · 2.64 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import onnxruntime_genai as og
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Application to load and switch ONNX LoRA adapters')
parser.add_argument('-m', '--model', type=str, help='The ONNX base model')
parser.add_argument('-a', '--adapters', nargs='+', type=str, help='List of adapters in .onnx_adapters format')
parser.add_argument('-t', '--template', type=str, help='The template with which to format the prompt')
parser.add_argument('-s', '--system', type=str, help='The system prompt to pass to the model')
parser.add_argument('-p', '--prompt', type=str, help='The user prompt to pass to the model')
args = parser.parse_args()
model = og.Model(args.model)
if args.adapters:
adapters = og.Adapters(model)
for adapter in args.adapters:
adapters.load(adapter, adapter)
tokenizer = og.Tokenizer(model)
tokenizer_stream = tokenizer.create_stream()
# Example prompt templates
# Phi-3
# <|user|>\n{input} <|end|>\n<|assistant|>
# Llama-2
# "<s>{input}"
# "<s>[INST] <<SYS>>\nAnswer as briefly as possible\n<</SYS>>\n\n{input} [/INST]"
# "System: You are a helpful and honest assistant\nPrompt: {input}\nResponse:\n"
# Llama-3
# <|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{input}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n
# System: You are an in car virtual assistant that maps user's inputs to the corresponding function call in the vehicle. You must respond with only a JSON object matching the following schema: {"function_name": <name of the function>, "arguments": <arguments of the function>}
prompt = args.template.format(system=args.system, input=args.prompt)
params = og.GeneratorParams(model)
params.set_search_options(max_length=2048, past_present_share_buffer=False)
# This input is generated for transformers versions > 4.45
#params.set_model_input("onnx::Neg_67", np.array(0, dtype=np.int64))
params.input_ids = tokenizer.encode(prompt)
generator = og.Generator(model, params)
if args.adapters:
for adapter in args.adapters:
print(f"[{adapter}]: {prompt}")
generator.set_active_adapter(adapters, adapter)
while not generator.is_done():
generator.compute_logits()
generator.generate_next_token()
new_token = generator.get_next_tokens()[0]
print(tokenizer_stream.decode(new_token), end='', flush=True)
else:
print(f"[Base]: {prompt}")
while not generator.is_done():
generator.compute_logits()
generator.generate_next_token()
new_token = generator.get_next_tokens()[0]
print(tokenizer_stream.decode(new_token), end='', flush=True)