Skip to content

Commit

Permalink
Merge pull request #67 from holoviz-topics/fix_compat
Browse files Browse the repository at this point in the history
Fix compat
  • Loading branch information
ahuang11 authored Oct 18, 2023
2 parents 73f8908 + 9e74f9e commit d087b38
Show file tree
Hide file tree
Showing 10 changed files with 75 additions and 29 deletions.
4 changes: 2 additions & 2 deletions docs/examples/features/feature_chained_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
}
instance.respond()
elif user == ARM_BOT:
user_entry = instance.value[-2]
user_contents = user_entry.value
user_message = instance.value[-2]
user_contents = user_message.value
yield {
"user": LEG_BOT,
"avatar": "🦿",
Expand Down
4 changes: 2 additions & 2 deletions docs/examples/langchain/langchain_llama_and_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,10 @@ async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
)
llm_chains[model] = _get_llm_chain(model)

entry = None
message = None
response = await _get_response(contents, model)
for chunk in response:
entry = instance.stream(chunk, user=model.title(), entry=entry)
message = instance.stream(chunk, user=model.title(), message=message)


chat_interface = pn.chat.ChatInterface(callback=callback, placeholder_threshold=0.1)
Expand Down
2 changes: 1 addition & 1 deletion docs/examples/langchain/langchain_math_assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
final_answer = await llm_math.arun(question=contents)
instance.stream(final_answer, entry=instance.value[-1])
instance.stream(final_answer, message=instance.value[-1])


chat_interface = pn.chat.ChatInterface(callback=callback, callback_user="Langchain")
Expand Down
2 changes: 1 addition & 1 deletion docs/examples/langchain/langchain_with_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory

pn.extension(design="material")
pn.extension()


async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
Expand Down
46 changes: 46 additions & 0 deletions docs/examples/mistral/mistral_and_llama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""
Demonstrates how to use the ChatInterface widget to create a chatbot using
Llama2 and Mistral.
"""

import panel as pn
from ctransformers import AutoModelForCausalLM

pn.extension()

MODEL_ARGUMENTS = {
"llama": {
"args": ["TheBloke/Llama-2-7b-Chat-GGUF"],
"kwargs": {"model_file": "llama-2-7b-chat.Q5_K_M.gguf"},
},
"mistral": {
"args": ["TheBloke/Mistral-7B-Instruct-v0.1-GGUF"],
"kwargs": {"model_file": "mistral-7b-instruct-v0.1.Q4_K_M.gguf"},
},
}


def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
for model in MODEL_ARGUMENTS:
if model not in pn.state.cache:
pn.state.cache[model] = AutoModelForCausalLM.from_pretrained(
*MODEL_ARGUMENTS[model]["args"],
**MODEL_ARGUMENTS[model]["kwargs"],
gpu_layers=1,
)

llm = pn.state.cache[model]
response = llm(contents, max_new_tokens=512, stream=True)

message = None
for chunk in response:
message = instance.stream(chunk, user=model.title(), message=message)


chat_interface = pn.chat.ChatInterface(callback=callback)
chat_interface.send(
"Send a message to get a reply from both Llama 2 and Mistral (7B)!",
user="System",
respond=False,
)
chat_interface.servable()
14 changes: 7 additions & 7 deletions docs/examples/mistral/mistral_with_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,16 @@


def apply_template(history):
history = [entry for entry in history if entry.user != "System"]
history = [message for message in history if message.user != "System"]
prompt = ""
for i, entry in enumerate(history):
for i, message in enumerate(history):
if i == 0:
prompt += f"<s>[INST]{SYSTEM_INSTRUCTIONS} {entry.value}[/INST]"
prompt += f"<s>[INST]{SYSTEM_INSTRUCTIONS} {message.value}[/INST]"
else:
if entry.user == "Mistral":
prompt += f"{entry.value}</s>"
if message.user == "Mistral":
prompt += f"{message.value}</s>"
else:
prompt += f"""[INST]{entry.value}[/INST]"""
prompt += f"""[INST]{message.value}[/INST]"""
return prompt


Expand All @@ -42,7 +42,7 @@ async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
)

llm = llms["mistral"]
history = [entry for entry in instance.value]
history = [message for message in instance.value]
prompt = apply_template(history)
response = llm(prompt, stream=True)
message = ""
Expand Down
2 changes: 1 addition & 1 deletion docs/examples/openai/openai_hvplot.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ async def respond_with_openai(contents: Union[pd.DataFrame, str]):
message = ""
async for chunk in response:
message += chunk["choices"][0]["delta"].get("content", "")
yield {"user": "ChatGPT", "value": message}
yield {"user": "ChatGPT", "object": message}


async def respond_with_executor(code: str):
Expand Down
6 changes: 3 additions & 3 deletions docs/features.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ async def callback(contents: str, user: str, instance: pn.widgets.ChatInterface)
}
instance.respond()
elif user == ARM_BOT:
user_entry = instance.value[-2]
user_contents = user_entry.value
user_message = instance.value[-2]
user_contents = user_message.value
yield {
"user": LEG_BOT,
"avatar": "🦿",
Expand Down Expand Up @@ -228,4 +228,4 @@ could add the object you are chatting about"""
sidebar_width=500,
).servable()
```
</details>
</details>
8 changes: 4 additions & 4 deletions docs/langchain.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ async def callback(contents: str, user: str, instance: pn.widgets.ChatInterface)
)
llm_chains[model] = _get_llm_chain(model)

entry = None
message = None
response = await _get_response(contents, model)
for chunk in response:
entry = instance.stream(chunk, user=model.title(), entry=entry)
message = instance.stream(chunk, user=model.title(), message=message)


chat_interface = pn.widgets.ChatInterface(callback=callback, placeholder_threshold=0.1)
Expand Down Expand Up @@ -133,7 +133,7 @@ pn.extension(design="material")

async def callback(contents: str, user: str, instance: pn.widgets.ChatInterface):
final_answer = await llm_math.arun(question=contents)
instance.stream(final_answer, entry=instance.value[-1])
instance.stream(final_answer, message=instance.value[-1])


chat_interface = pn.widgets.ChatInterface(callback=callback, callback_user="Langchain")
Expand Down Expand Up @@ -406,4 +406,4 @@ memory = ConversationBufferMemory()
chain = ConversationChain(llm=llm, memory=memory)
chat_interface.servable()
```
</details>
</details>
16 changes: 8 additions & 8 deletions docs/mistral.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,16 +111,16 @@ SYSTEM_INSTRUCTIONS = "Do what the user requests."


def apply_template(history):
history = [entry for entry in history if entry.user != "System"]
history = [message for message in history if message.user != "System"]
prompt = ""
for i, entry in enumerate(history):
for i, message in enumerate(history):
if i == 0:
prompt += f"<s>[INST]{SYSTEM_INSTRUCTIONS} {entry.value}[/INST]"
prompt += f"<s>[INST]{SYSTEM_INSTRUCTIONS} {message.value}[/INST]"
else:
if entry.user == "Mistral":
prompt += f"{entry.value}</s>"
if message.user == "Mistral":
prompt += f"{message.value}</s>"
else:
prompt += f"""[INST]{entry.value}[/INST]"""
prompt += f"""[INST]{message.value}[/INST]"""
return prompt


Expand All @@ -139,7 +139,7 @@ async def callback(contents: str, user: str, instance: pn.widgets.ChatInterface)
)

llm = llms["mistral"]
history = [entry for entry in instance.value]
history = [message for message in instance.value]
prompt = apply_template(history)
response = llm(prompt, stream=True)
message = ""
Expand All @@ -158,4 +158,4 @@ chat_interface.send(
)
chat_interface.servable()
```
</details>
</details>

0 comments on commit d087b38

Please sign in to comment.