-
Notifications
You must be signed in to change notification settings - Fork 33
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #334 from Undertone0809/v1.12.0/add-beta-tool-agen…
…t-v1 feat: add beta ToolAgent
- Loading branch information
Showing
14 changed files
with
428 additions
and
140 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
from promptulate.agents.assistant_agent.agent import AssistantAgent | ||
|
||
__all__ = ["AssistantAgent"] |
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,167 @@ | ||
import time | ||
from typing import Callable, List, Optional, Union | ||
|
||
from pydantic import BaseModel, Field | ||
|
||
from promptulate.agents.base import BaseAgent | ||
from promptulate.agents.tool_agent.prompt import ( | ||
SYSTEM_PROMPT_TEMPLATE, | ||
) | ||
from promptulate.hook import Hook, HookTable | ||
from promptulate.llms.base import BaseLLM | ||
from promptulate.llms.openai import ChatOpenAI | ||
from promptulate.output_formatter import ( | ||
formatting_result, | ||
get_formatted_instructions, | ||
) | ||
from promptulate.schema import MessageSet | ||
from promptulate.tools.base import BaseTool, Tool | ||
from promptulate.tools.manager import ToolManager | ||
from promptulate.utils.logger import logger | ||
|
||
|
||
class ReActResponse(BaseModel): | ||
thought: str = Field(description="The thought of what to do and why.") | ||
self_criticism: str = Field( | ||
description="Constructive self-criticism of the thought" | ||
) | ||
tool_name: str = Field(description="The name of tool name.") | ||
tool_parameters: dict = Field( | ||
description="The input parameters of tool, string type json parameters." | ||
) | ||
|
||
|
||
# Finish tool for ToolAgent | ||
def finish(result: str): | ||
"""Use final answer until you think you have the final answer and can return the | ||
result. | ||
Args: | ||
result: final result content | ||
""" | ||
return result | ||
|
||
|
||
def _build_output_format_instruction(): | ||
return get_formatted_instructions( | ||
json_schema=ReActResponse, | ||
examples=[ | ||
ReActResponse( | ||
thought="From the search results, it seems that multiple sources have different weather data formats and information. I will identify the relevant data which is the forecast for tomorrow in Beijing, and then use the finish tool to provide this specific forecast.", # noqa | ||
self_criticism="While there is quite a bit of information in the search results, I need to focus on only providing the required weather forecast for tomorrow in Beijing and not get distracted by additional data such as historical weather data or weather for other days.", # noqa | ||
tool_name="finish", | ||
tool_parameters={ | ||
"result": """In Beijing, the weather forecast for tomorrow is a high of 2\u00b0C and a low of -7\u00b0C.\\""" # noqa | ||
}, | ||
) | ||
], | ||
) | ||
|
||
|
||
class ToolAgent(BaseAgent): | ||
def __init__( | ||
self, | ||
*, | ||
tools: List[Union[BaseTool, Tool, Callable]], | ||
llm: Optional[BaseLLM] = None, | ||
max_iterations: int = 20, | ||
**kwargs, | ||
): | ||
super().__init__(**kwargs) | ||
self.tool_manager: ToolManager = ToolManager(tools + [finish]) | ||
self.llm: BaseLLM = llm or ChatOpenAI( | ||
model="gpt-3.5-turbo-16k", | ||
temperature=0.0, | ||
enable_default_system_prompt=False, | ||
) | ||
self.max_iterations: int = max_iterations | ||
self.current_iteration: int = 1 | ||
self.max_execution_time: Optional[float] = None | ||
self.current_time_elapsed: float = 0.0 | ||
|
||
self.system_prompt: str = "" | ||
self.current_process: str = "" | ||
self.task: str = "" | ||
|
||
def _build_system_prompt(self): | ||
"""ToolAgent use dynamic system prompt. Therefore, this method will be called | ||
every time before the llm generates a response.""" | ||
self.system_prompt = SYSTEM_PROMPT_TEMPLATE.format( | ||
task=self.task, | ||
tool_descriptions=self.tool_manager.tool_descriptions, | ||
current_process=self.current_process, | ||
output_format=_build_output_format_instruction(), | ||
) | ||
logger.info(f"[pne] ToolAgent system prompt: {self.system_prompt}") | ||
|
||
def _build_current_process(self, result: ReActResponse, tool_result: str): | ||
"""Build current process of task.""" | ||
if self.current_process == "": | ||
self.current_process = """## Current Process of Task | ||
Here is what you have already done. You need to infer what the next task needs to be done based on the previous one.\n""" # noqa | ||
|
||
self.current_process += f"Step {self.current_iteration}:\n" | ||
self.current_process += f"Thought: {result.thought}\n" | ||
self.current_process += f"Self Criticism: {result.self_criticism}\n" | ||
self.current_process += f"Tool: {result.tool_name}\n" | ||
self.current_process += f"Tool Parameters: {result.tool_parameters}\n" | ||
self.current_process += f"Observation: {tool_result}\n\n" | ||
self.current_process += "---\n" | ||
|
||
def _run(self, task: str, *args, **kwargs) -> str: | ||
self.task: str = task | ||
|
||
start_time = time.time() | ||
|
||
while self._should_continue(): | ||
self._build_system_prompt() | ||
|
||
messages = MessageSet.from_listdict_data( | ||
[{"role": "system", "content": self.system_prompt}] | ||
) | ||
llm_response: str = self.llm.predict(messages=messages).content | ||
result: ReActResponse = formatting_result(ReActResponse, llm_response) | ||
|
||
Hook.call_hook( | ||
HookTable.ON_AGENT_ACTION, | ||
self, | ||
thought=f"{result.thought}\n{result.self_criticism}", | ||
action=result.tool_name, | ||
action_input=result.tool_parameters, | ||
) | ||
|
||
if result.tool_name == "finish": | ||
return result.tool_parameters["result"] | ||
|
||
tool_result: str = self.tool_manager.run_tool( | ||
result.tool_name, result.tool_parameters | ||
) | ||
self._build_current_process(result, tool_result) | ||
|
||
Hook.call_hook( | ||
HookTable.ON_AGENT_OBSERVATION, self, observation=tool_result | ||
) | ||
|
||
self.current_iteration += 1 | ||
self.current_time_elapsed += time.time() - start_time | ||
|
||
def _should_continue(self) -> bool: | ||
"""Determine whether to stop, both timeout and exceeding the maximum number of | ||
iterations will stop. | ||
Returns: | ||
Whether to stop. | ||
""" | ||
if self.current_iteration >= self.max_iterations: | ||
return False | ||
|
||
if ( | ||
self.max_execution_time | ||
and self.current_time_elapsed >= self.max_execution_time | ||
): | ||
return False | ||
|
||
return True | ||
|
||
def get_llm(self) -> BaseLLM: | ||
return self.llm |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"source": [ | ||
"This notebook show how to use beta version ToolAgent." | ||
], | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"id": "7804b4b4e92eef16" | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"\u001B[31;1m\u001B[1;3mAgent Start...\u001B[0m\n", | ||
"\u001B[36;1m\u001B[1;3m[User] What is the weather in Beijing tomorrow? What is the square root of the temperature?\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Thought] I need to search for the weather forecast in Beijing for tomorrow to find out the temperature, and then I can use the temperature to calculate the square root.\n", | ||
"I have not yet gathered any actual weather data for the calculation, so it’s premature to consider the calculation step.\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Action] ddg_websearch args: {'query': 'Beijing weather forecast tomorrow'}\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Observation] -6 °C Mo -5 °C Tu -2 °C We 2 °C Th 3 °C Fr 2 °C Sa 2 °C Su 0 °C Meteogram Temperature (°C) Precipitation (mm) Gust (km/h) New snow (cm) -11 -12 -11 -7 -5 -5 -6 -4 -3 -3 -3 -5 -5 -6 -5 -2 2 3 2 2 0 1 -1 0 -1 0 Webcams Olympic Park Zone 8 km view.iap.ac.cn:8080 Air quality (AQI) Today 59 °F 40 °F 4 mph - 9 h 1 hour view The weather forecast has very high predictability. Compare different forecasts with MultiModel. Weather report for Beijing During the night and in the afternoon clear skies prevail. Before noon a few clouds are expected. It is a sunny day. Temperatures peaking at 59 °F. Beijing Weather Tomorrow - Accurate Forecast for Beijing Today (Beijing) World China Beijing Beijing Beijing Weather Forecast Now 09:28, 05 Dec +32 °F RealFeel +28°F Pressure 29.8 inHg Humidity 60% Wind N, 4.5 mph Wind gust 6.7 mph Cloudiness 0% Visibility 10 mi. More Sunrise: 07:19 Sunset: 16:49 Daylight: 09:30 Moon Phase: Third quarter China Beijing Beijing Weather Forecast. Providing a local hourly Beijing weather forecast of rain, sun, wind, humidity and temperature. The Long-range 12 day forecast also includes detail for Beijing weather today. Live weather reports from Beijing weather stations and weather warnings that include risk of thunder, high UV index and forecast gales. Today 12 September +20°C +26°C Tomorrow 13 September +18°C +25°C Thursday 14 September +19°C +26°C Friday 15 September +20°C +27°C Week Night +20°C Feels like +21°C 0% 0 mm 3 mps 76% 755 mm Morning +20°C\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Thought] The search results show the temperature for tomorrow as -5 °C in Beijing. Now, I need to calculate the square root of this temperature. However, since the square root of a negative number is not a real number, I will assume the task refers to the square root of the absolute value of the temperature. I will now use the calculator tool to find the square root of 5.\n", | ||
"I correctly identified the need to calculate the square root of the temperature in Beijing for tomorrow, but I must ensure to handle the absolute value since the original temperature is negative.\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Action] calculator args: {'expression': 'sqrt(5)'}\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Observation] 2.23606797749979\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Thought] Now that I have calculated the square root of tomorrow's forecasted temperature in Beijing, I should use the finish tool to provide the final result.\n", | ||
"There is no self-criticism needed at this stage since the next step is clear and there are no outstanding issues or tasks.\u001B[0m\n", | ||
"\u001B[33;1m\u001B[1;3m[Action] finish args: {'result': 'The temperature in Beijing tomorrow is forecasted to be -5°C. The square root of the absolute value of this temperature is 2.23606797749979.'}\u001B[0m\n", | ||
"\u001B[32;1m\u001B[1;3m[Agent Result] The temperature in Beijing tomorrow is forecasted to be -5°C. The square root of the absolute value of this temperature is 2.23606797749979.\u001B[0m\n", | ||
"\u001B[38;5;200m\u001B[1;3mAgent End.\u001B[0m\n", | ||
"The temperature in Beijing tomorrow is forecasted to be -5°C. The square root of the absolute value of this temperature is 2.23606797749979.\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"from promptulate.beta.agents.tool_agent.agent import ToolAgent\n", | ||
"import promptulate as pne\n", | ||
"\n", | ||
"tool_list = [pne.tools.ddg_websearch, pne.tools.calculator]\n", | ||
"agent = ToolAgent(tools=tool_list, llm=pne.ChatOpenAI(model=\"gpt-4-1106-preview\"))\n", | ||
"resp: str = agent.run(\"What is the weather in Beijing tomorrow? What is the square root of the temperature?\")\n", | ||
"print(resp)" | ||
], | ||
"metadata": { | ||
"collapsed": true, | ||
"ExecuteTime": { | ||
"end_time": "2024-01-22T14:43:46.711562700Z", | ||
"start_time": "2024-01-22T14:43:00.772953800Z" | ||
} | ||
}, | ||
"id": "initial_id", | ||
"execution_count": 1 | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 2 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython2", | ||
"version": "2.7.6" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 5 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,7 @@ | ||
from promptulate.tools.duckduckgo.tools import DuckDuckGoReferenceTool, DuckDuckGoTool | ||
from promptulate.tools.duckduckgo.tools import ( | ||
DuckDuckGoReferenceTool, | ||
DuckDuckGoTool, | ||
ddg_websearch, | ||
) | ||
|
||
__all__ = [ | ||
"DuckDuckGoTool", | ||
"DuckDuckGoReferenceTool", | ||
] | ||
__all__ = ["DuckDuckGoTool", "DuckDuckGoReferenceTool", "ddg_websearch"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,3 @@ | ||
from promptulate.tools.math.tools import Calculator | ||
from promptulate.tools.math.tools import Calculator, calculator | ||
|
||
__all__ = ["Calculator"] | ||
__all__ = ["Calculator", "calculator"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters