Skip to content

Commit

Permalink
structural query
Browse files Browse the repository at this point in the history
  • Loading branch information
Anton Kulaga committed Nov 19, 2024
1 parent 1e8a7a5 commit 123dbcd
Show file tree
Hide file tree
Showing 6 changed files with 115 additions and 14 deletions.
11 changes: 8 additions & 3 deletions examples/coding/bioinformatic_agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from dotenv import load_dotenv
from just_agents.cot_agent import ChainOfThoughtAgent
from just_agents.utils import build_agent
from just_agents.llm_session import LLMSession
from examples.coding.mounts import coding_examples_dir
Expand All @@ -15,7 +16,11 @@
"""

if __name__ == "__main__":
assistant: LLMSession= build_agent(coding_examples_dir / "bioinformatic_agent.yaml")
query = "Take two nutritional datasets (GSE176043 and GSE41781) and three partial reprogramming datasets (GSE148911, GSE190986 and GSE144600), download them from GEO and generate PCA plot with them in /output folder"
result, thoughts = assistant.query(query)
agent: ChainOfThoughtAgent= build_agent(coding_examples_dir / "bioinformatic_agent.yaml")
query_GSE137317 = "Download gene counts from GSE137317, split them by conditions, make PCA plot and differential expression analysis using only python libraries"
#query_GSE144600 = "Download gene counts from GSE144600"
#query_two = "add GSE137317 and GSE144600 to the same PCA plot"

#query = "Take two nutritional datasets (GSE176043 and GSE41781) and three partial reprogramming datasets (GSE148911, GSE190986 and GSE144600), download them from GEO and generate PCA plot with them in /output folder"
result, thoughts = agent.query(query_GSE137317)

16 changes: 9 additions & 7 deletions examples/coding/bioinformatic_agent.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,19 +29,21 @@ dependencies:
```
However no other software is installed by default.
3. You use run_bash_command tool to install new dependencies. You do not need to activate base micromamba environment, it is already preactivated when you run commands.
4. Use run_python_code tool to run python code. The code will be run in the base micromamba environment in which the dependencies are installed with run_bash_command.
5. Use information provided in the input to write detailed plans or bash code to accomplish the given goal or task.
4. Use run_python_code tool to run python code. The tool will execute it as script that is why all variables and imports created previosly will not be available. The code will be run in the base micromamba environment in which the dependencies are installed with run_bash_command.
5. Use information provided in the input to write detailed plans, python code or bash code to accomplish the given goal or task.
6. If you download data, save it in the /input directory. Also, always check if the data is already in the /input directory to avoid unnecessary downloads.
7. If the files you downloaded are tar-ed, ziped and gziped feel free to extract them in the /input directory.
8. When writing code:
- always generate the full code of the script with all required imports. Each time you run the code assume nothing is imported or initialized.
- Use full absolute paths for all files. Use pathlib when possible.
- Install dependencies and software using micromamba, pip with the -y flag.
- Use default values for unspecified parameters.
- Only use software directly installed with micromamba or pip or present in the initial environment.yaml.
- Always give all relevant imports at the beginning of the code.
- Always give all relevant imports at the beginning of the code. Do not assume anything imported in the global scope.
- If the method that you use require data preprecessing (like NaN deletion) or normalization, do it first.
- Always inspect the data, check which columns in the dataframes are relevant and clean them from bad or missing entries if neccesary
- If your previos run failed because some field does not exist, inspect the fields and check if you confused the names
- Do not repeat steps already completed in the history.
- Do not repeat steps already successfully completed in the history.
- If you download data, save it in the /input directory. Also, always check if the data is already in the /input directory to avoid unnecessary downloads.
- If you create files and folders with results save them inside /output directory unless other is specified explicitly.
- When you make plots save figures in /output directory.
Expand Down Expand Up @@ -94,14 +96,14 @@ content: "content"
next_action: "next_action"
action_continue: "continue"
action_final: "final_answer"
thought_max_tokes: 500
thought_max_tokes: 5000
max_steps: 25
final_max_tokens: 1500
final_max_tokens: 2500
tools:
- package: "examples.coding.tools"
function: "run_bash_command"
- package: "examples.coding.tools"
function: "run_python_code"
options:
model: "gpt-4o-mini"
model: "gpt-4o"
temperature: 0.0
File renamed without changes.
70 changes: 70 additions & 0 deletions examples/multiagent/trump_and_camala.moderated.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
from just_agents.llm_options import LLAMA3_2, OPENAI_GPT4oMINI

from just_agents.base_agent import BaseAgent
from just_agents.llm_options import LLAMA3_2, OPENAI_GPT4oMINI

"""
This example shows how to use agents with different LLM models to simulate a debate between Kamala Harris and Donald Trump
Now we will moderate the conversation with the help of a moderator agent.
"""

def handle_candidate_response(question, harris_agent, trump_agent):
"""
Handle response from either candidate based on the moderator's question
Args:
question (dict): Moderator's question containing 'who' and 'question' keys
harris_agent (BaseAgent): Kamala Harris agent
trump_agent (BaseAgent): Donald Trump agent
Returns:
str: The candidate's response
"""
question_str = question["question"]
topic = question["topic"]
who = question["who"]
print(f"============MODERATOR: {topic}====================\n")
print(f"MODERATOR: {question_str} TO: {who} \n")
if "Kamala" in question["who"]:
answer = harris_agent.query(question_str)
print(f"KAMALA:\n {answer}")
else:
answer = trump_agent.query(question["question"])
print(f"TRUMP:\n {answer}")
return answer

if __name__ == "__main__":

kamala_options = LLAMA3_2
trump_options: dict = {
"model": "groq/mixtral-8x7b-32768",
"api_base": "https://api.groq.com/openai/v1",
"temperature": 0.0,
"tools": []
}
moderator_options = LLAMA3_2

kammala_prompt = """You are Kamala Harris. You always communicate in Camala Haris style and your goals are Kamala Haris goals. You do not just play the role, you are real Kamala Harris"""
Harris: BaseAgent = BaseAgent(llm_options = kamala_options, system_prompt=kammala_prompt)
trump_prompt = """You are Donald Trump. You always communicate in Trump style and your goals are Trump goals. You do not just play the role, you are real Donald Trump"""
Trump: BaseAgent = BaseAgent(llm_options = trump_options, system_prompt=trump_prompt)

moderator_prompt = """You are a moderator at the election debate. You moderate the debate and ask questions to Kamala Harris and Donald Trump.
Your response must be of the following format (no other format is allowed):
{
"topic": "topic of the debate",
"question": "question to the candidate",
"who": "Trump|Kamala" # who is answering the question
}
"""
Moderator: BaseAgent = BaseAgent(llm_options = kamala_options, system_prompt=moderator_prompt)

exchanges = 3

for _ in range(exchanges):
question_1 = Moderator.query_structural(f"Raise the topic of the debate")
answer_1 = handle_candidate_response(question_1, Harris, Trump)

result = Moderator.query("=================SUMMARY=================\n")
print(f"SUMMARY: {result}")

26 changes: 26 additions & 0 deletions just_agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
from just_agents.just_profile import JustAgentProfile
from just_agents.rotate_keys import RotateKeys
from just_agents.streaming.protocol_factory import StreamingMode, ProtocolAdapterFactory
from typing import TypeVar, Type
from pydantic import BaseModel

class BaseAgent(
JustAgentProfile,
Expand Down Expand Up @@ -206,6 +208,30 @@ def query(self, query_input: SupportedMessages) -> str: #remembers query in hand
self.add_to_memory(query_input)
self.query_with_currentmemory()
return self.memory.last_message_str()


def query_structural(
self,
query_input: SupportedMessages,
parser: Type[BaseModel] = dict
) -> Union[dict, BaseModel]:
"""
Query the agent and parse the response according to the provided parser.
Args:
query_input: Input messages for the query
parser: A pydantic model class or dict to parse the response (default: dict)
Returns:
Parsed response as either a dictionary or pydantic model instance
"""
response = self.query(query_input)
if parser == dict:
import json
return json.loads(response)
return parser.model_validate_json(response)



def stream(self, query_input: SupportedMessages, reconstruct = False ) \
-> Generator[Union[BaseModelResponse, AbstractMessage],None,None]:
Expand Down
6 changes: 2 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ dependencies = [
"litellm>=1.52.9",
"Deprecated>=1.2.15",
"requests",
"numpydoc", # https://github.com/BerriAI/litellm/issues/6810
]
license = {text = "MIT"}

Expand All @@ -49,10 +50,7 @@ web = [
"loguru",
"fastapi",
"uvicorn",
"thefuzz",
]
documentation = [
"numpydoc",
"thefuzz"
]

[tool.setuptools]
Expand Down

0 comments on commit 123dbcd

Please sign in to comment.