Skip to content

Commit

Permalink
Merge pull request #334 from Undertone0809/v1.12.0/add-beta-tool-agen…
Browse files Browse the repository at this point in the history
…t-v1

feat: add beta ToolAgent
  • Loading branch information
Undertone0809 authored Jan 22, 2024
2 parents 4761fd5 + b959ea4 commit 52c6c32
Show file tree
Hide file tree
Showing 14 changed files with 428 additions and 140 deletions.
6 changes: 3 additions & 3 deletions docs/use_cases/chat_usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ print(response.content) # response string
print(response.additional_kwargs) # metadata
```

I am an AI assistant here to help you with any questions or tasks you may have. How can I assist you today?
{'id': 'chatcmpl-8UK0tfwlkixWyaxKJ2XWNGMVGFPo0', 'choices': [{'finish_reason': 'stop', 'index': 0, 'message': {'content': 'I am an AI assistant here to help you with any questions or tasks you may have. How can I assist you today?', 'role': 'assistant'}}], 'created': 1702237461, 'model': 'gpt-3.5-turbo-0613', 'object': 'chat.completion', 'system_fingerprint': None, 'usage': {'completion_tokens': 25, 'prompt_tokens': 20, 'total_tokens': 45}, '_response_ms': 2492.372}

I am an AI assistant here to help you with any questions or tasks you may have. How can I assist you today?
{'id': 'chatcmpl-8UK0tfwlkixWyaxKJ2XWNGMVGFPo0', 'choices': [{'finish_reason': 'stop', 'index': 0, 'message': {'content': 'I am an AI assistant here to help you with any questions or tasks you may have. How can I assist you today?', 'role': 'assistant'}}], 'created': 1702237461, 'model': 'gpt-3.5-turbo-0613', 'object': 'chat.completion', 'system_fingerprint': None, 'usage': {'completion_tokens': 25, 'prompt_tokens': 20, 'total_tokens': 45}, '_response_ms': 2492.372}

## Using any model
You can call 100+ LLMs using the same Input/Output Format(OpenAI format) in `pne.chat()`. The follow example show how to use `claude-2`, make sure you have key ANTHROPIC_API_KEY.
Expand Down
248 changes: 124 additions & 124 deletions poetry.lock

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions promptulate/agents/assistant_agent/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from promptulate.agents.assistant_agent.agent import AssistantAgent

__all__ = ["AssistantAgent"]
Empty file.
Empty file.
167 changes: 167 additions & 0 deletions promptulate/beta/agents/tool_agent/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
import time
from typing import Callable, List, Optional, Union

from pydantic import BaseModel, Field

from promptulate.agents.base import BaseAgent
from promptulate.agents.tool_agent.prompt import (
SYSTEM_PROMPT_TEMPLATE,
)
from promptulate.hook import Hook, HookTable
from promptulate.llms.base import BaseLLM
from promptulate.llms.openai import ChatOpenAI
from promptulate.output_formatter import (
formatting_result,
get_formatted_instructions,
)
from promptulate.schema import MessageSet
from promptulate.tools.base import BaseTool, Tool
from promptulate.tools.manager import ToolManager
from promptulate.utils.logger import logger


class ReActResponse(BaseModel):
thought: str = Field(description="The thought of what to do and why.")
self_criticism: str = Field(
description="Constructive self-criticism of the thought"
)
tool_name: str = Field(description="The name of tool name.")
tool_parameters: dict = Field(
description="The input parameters of tool, string type json parameters."
)


# Finish tool for ToolAgent
def finish(result: str):
"""Use final answer until you think you have the final answer and can return the
result.
Args:
result: final result content
"""
return result


def _build_output_format_instruction():
return get_formatted_instructions(
json_schema=ReActResponse,
examples=[
ReActResponse(
thought="From the search results, it seems that multiple sources have different weather data formats and information. I will identify the relevant data which is the forecast for tomorrow in Beijing, and then use the finish tool to provide this specific forecast.", # noqa
self_criticism="While there is quite a bit of information in the search results, I need to focus on only providing the required weather forecast for tomorrow in Beijing and not get distracted by additional data such as historical weather data or weather for other days.", # noqa
tool_name="finish",
tool_parameters={
"result": """In Beijing, the weather forecast for tomorrow is a high of 2\u00b0C and a low of -7\u00b0C.\\""" # noqa
},
)
],
)


class ToolAgent(BaseAgent):
def __init__(
self,
*,
tools: List[Union[BaseTool, Tool, Callable]],
llm: Optional[BaseLLM] = None,
max_iterations: int = 20,
**kwargs,
):
super().__init__(**kwargs)
self.tool_manager: ToolManager = ToolManager(tools + [finish])
self.llm: BaseLLM = llm or ChatOpenAI(
model="gpt-3.5-turbo-16k",
temperature=0.0,
enable_default_system_prompt=False,
)
self.max_iterations: int = max_iterations
self.current_iteration: int = 1
self.max_execution_time: Optional[float] = None
self.current_time_elapsed: float = 0.0

self.system_prompt: str = ""
self.current_process: str = ""
self.task: str = ""

def _build_system_prompt(self):
"""ToolAgent use dynamic system prompt. Therefore, this method will be called
every time before the llm generates a response."""
self.system_prompt = SYSTEM_PROMPT_TEMPLATE.format(
task=self.task,
tool_descriptions=self.tool_manager.tool_descriptions,
current_process=self.current_process,
output_format=_build_output_format_instruction(),
)
logger.info(f"[pne] ToolAgent system prompt: {self.system_prompt}")

def _build_current_process(self, result: ReActResponse, tool_result: str):
"""Build current process of task."""
if self.current_process == "":
self.current_process = """## Current Process of Task
Here is what you have already done. You need to infer what the next task needs to be done based on the previous one.\n""" # noqa

self.current_process += f"Step {self.current_iteration}:\n"
self.current_process += f"Thought: {result.thought}\n"
self.current_process += f"Self Criticism: {result.self_criticism}\n"
self.current_process += f"Tool: {result.tool_name}\n"
self.current_process += f"Tool Parameters: {result.tool_parameters}\n"
self.current_process += f"Observation: {tool_result}\n\n"
self.current_process += "---\n"

def _run(self, task: str, *args, **kwargs) -> str:
self.task: str = task

start_time = time.time()

while self._should_continue():
self._build_system_prompt()

messages = MessageSet.from_listdict_data(
[{"role": "system", "content": self.system_prompt}]
)
llm_response: str = self.llm.predict(messages=messages).content
result: ReActResponse = formatting_result(ReActResponse, llm_response)

Hook.call_hook(
HookTable.ON_AGENT_ACTION,
self,
thought=f"{result.thought}\n{result.self_criticism}",
action=result.tool_name,
action_input=result.tool_parameters,
)

if result.tool_name == "finish":
return result.tool_parameters["result"]

tool_result: str = self.tool_manager.run_tool(
result.tool_name, result.tool_parameters
)
self._build_current_process(result, tool_result)

Hook.call_hook(
HookTable.ON_AGENT_OBSERVATION, self, observation=tool_result
)

self.current_iteration += 1
self.current_time_elapsed += time.time() - start_time

def _should_continue(self) -> bool:
"""Determine whether to stop, both timeout and exceeding the maximum number of
iterations will stop.
Returns:
Whether to stop.
"""
if self.current_iteration >= self.max_iterations:
return False

if (
self.max_execution_time
and self.current_time_elapsed >= self.max_execution_time
):
return False

return True

def get_llm(self) -> BaseLLM:
return self.llm
80 changes: 80 additions & 0 deletions promptulate/beta/agents/tool_agent/notebook.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
{
"cells": [
{
"cell_type": "markdown",
"source": [
"This notebook show how to use beta version ToolAgent."
],
"metadata": {
"collapsed": false
},
"id": "7804b4b4e92eef16"
},
{
"cell_type": "code",
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001B[31;1m\u001B[1;3mAgent Start...\u001B[0m\n",
"\u001B[36;1m\u001B[1;3m[User] What is the weather in Beijing tomorrow? What is the square root of the temperature?\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Thought] I need to search for the weather forecast in Beijing for tomorrow to find out the temperature, and then I can use the temperature to calculate the square root.\n",
"I have not yet gathered any actual weather data for the calculation, so it’s premature to consider the calculation step.\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Action] ddg_websearch args: {'query': 'Beijing weather forecast tomorrow'}\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Observation] -6 °C Mo -5 °C Tu -2 °C We 2 °C Th 3 °C Fr 2 °C Sa 2 °C Su 0 °C Meteogram Temperature (°C) Precipitation (mm) Gust (km/h) New snow (cm) -11 -12 -11 -7 -5 -5 -6 -4 -3 -3 -3 -5 -5 -6 -5 -2 2 3 2 2 0 1 -1 0 -1 0 Webcams Olympic Park Zone 8 km view.iap.ac.cn:8080 Air quality (AQI) Today 59 °F 40 °F 4 mph - 9 h 1 hour view The weather forecast has very high predictability. Compare different forecasts with MultiModel. Weather report for Beijing During the night and in the afternoon clear skies prevail. Before noon a few clouds are expected. It is a sunny day. Temperatures peaking at 59 °F. Beijing Weather Tomorrow - Accurate Forecast for Beijing Today (Beijing) World China Beijing Beijing Beijing Weather Forecast Now 09:28, 05 Dec +32 °F RealFeel +28°F Pressure 29.8 inHg Humidity 60% Wind N, 4.5 mph Wind gust 6.7 mph Cloudiness 0% Visibility 10 mi. More Sunrise: 07:19 Sunset: 16:49 Daylight: 09:30 Moon Phase: Third quarter China Beijing Beijing Weather Forecast. Providing a local hourly Beijing weather forecast of rain, sun, wind, humidity and temperature. The Long-range 12 day forecast also includes detail for Beijing weather today. Live weather reports from Beijing weather stations and weather warnings that include risk of thunder, high UV index and forecast gales. Today 12 September +20°C +26°C Tomorrow 13 September +18°C +25°C Thursday 14 September +19°C +26°C Friday 15 September +20°C +27°C Week Night +20°C Feels like +21°C 0% 0 mm 3 mps 76% 755 mm Morning +20°C\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Thought] The search results show the temperature for tomorrow as -5 °C in Beijing. Now, I need to calculate the square root of this temperature. However, since the square root of a negative number is not a real number, I will assume the task refers to the square root of the absolute value of the temperature. I will now use the calculator tool to find the square root of 5.\n",
"I correctly identified the need to calculate the square root of the temperature in Beijing for tomorrow, but I must ensure to handle the absolute value since the original temperature is negative.\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Action] calculator args: {'expression': 'sqrt(5)'}\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Observation] 2.23606797749979\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Thought] Now that I have calculated the square root of tomorrow's forecasted temperature in Beijing, I should use the finish tool to provide the final result.\n",
"There is no self-criticism needed at this stage since the next step is clear and there are no outstanding issues or tasks.\u001B[0m\n",
"\u001B[33;1m\u001B[1;3m[Action] finish args: {'result': 'The temperature in Beijing tomorrow is forecasted to be -5°C. The square root of the absolute value of this temperature is 2.23606797749979.'}\u001B[0m\n",
"\u001B[32;1m\u001B[1;3m[Agent Result] The temperature in Beijing tomorrow is forecasted to be -5°C. The square root of the absolute value of this temperature is 2.23606797749979.\u001B[0m\n",
"\u001B[38;5;200m\u001B[1;3mAgent End.\u001B[0m\n",
"The temperature in Beijing tomorrow is forecasted to be -5°C. The square root of the absolute value of this temperature is 2.23606797749979.\n"
]
}
],
"source": [
"from promptulate.beta.agents.tool_agent.agent import ToolAgent\n",
"import promptulate as pne\n",
"\n",
"tool_list = [pne.tools.ddg_websearch, pne.tools.calculator]\n",
"agent = ToolAgent(tools=tool_list, llm=pne.ChatOpenAI(model=\"gpt-4-1106-preview\"))\n",
"resp: str = agent.run(\"What is the weather in Beijing tomorrow? What is the square root of the temperature?\")\n",
"print(resp)"
],
"metadata": {
"collapsed": true,
"ExecuteTime": {
"end_time": "2024-01-22T14:43:46.711562700Z",
"start_time": "2024-01-22T14:43:00.772953800Z"
}
},
"id": "initial_id",
"execution_count": 1
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
13 changes: 12 additions & 1 deletion promptulate/tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@
from promptulate.tools.duckduckgo.tools import (
DuckDuckGoReferenceTool,
DuckDuckGoTool,
ddg_websearch,
)
from promptulate.tools.huggingface.tools import HuggingFaceTool
from promptulate.tools.human_feedback import HumanFeedBackTool
from promptulate.tools.iot_swith_mqtt import IotSwitchTool
from promptulate.tools.langchain.tools import LangchainTool
from promptulate.tools.math.tools import Calculator
from promptulate.tools.math.tools import Calculator, calculator
from promptulate.tools.paper.tools import PaperSummaryTool
from promptulate.tools.python_repl import PythonREPLTool
from promptulate.tools.semantic_scholar import (
Expand Down Expand Up @@ -74,6 +75,10 @@ def __getattr__(name):
from promptulate.tools.duckduckgo.tools import DuckDuckGoTool

return DuckDuckGoTool
elif name == "ddg_websearch":
from promptulate.tools.duckduckgo.tools import ddg_websearch

return ddg_websearch
elif name == "SemanticScholarCitationTool":
from promptulate.tools.semantic_scholar import SemanticScholarCitationTool

Expand Down Expand Up @@ -106,6 +111,10 @@ def __getattr__(name):
from promptulate.tools.math.tools import Calculator

return Calculator
elif name == "calculator":
from promptulate.tools.math.tools import calculator

return calculator
elif name == "sleep_tool":
from promptulate.tools.sleep.tool import sleep_tool

Expand Down Expand Up @@ -136,6 +145,8 @@ def __getattr__(name):
"HuggingFaceTool",
"DuckDuckGoReferenceTool",
"DuckDuckGoTool",
"ddg_websearch",
"calculator",
"PaperSummaryTool",
"ArxivSummaryTool",
"ArxivReferenceTool",
Expand Down
11 changes: 6 additions & 5 deletions promptulate/tools/duckduckgo/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from promptulate.tools.duckduckgo.tools import DuckDuckGoReferenceTool, DuckDuckGoTool
from promptulate.tools.duckduckgo.tools import (
DuckDuckGoReferenceTool,
DuckDuckGoTool,
ddg_websearch,
)

__all__ = [
"DuckDuckGoTool",
"DuckDuckGoReferenceTool",
]
__all__ = ["DuckDuckGoTool", "DuckDuckGoReferenceTool", "ddg_websearch"]
10 changes: 10 additions & 0 deletions promptulate/tools/duckduckgo/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,3 +83,13 @@ def _run(self, keyword: str, **kwargs) -> Union[str, List[Dict[str, str]]]:
if "return_type" in kwargs and kwargs["return_type"] == "original":
return result
return listdict_to_string(result, item_suffix="\n")


def ddg_websearch(query: str) -> str:
"""Run duckduckgo search and get search result.
Args:
query: query keyword
"""
tool = DuckDuckGoTool()
return tool.run(query)
4 changes: 2 additions & 2 deletions promptulate/tools/math/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from promptulate.tools.math.tools import Calculator
from promptulate.tools.math.tools import Calculator, calculator

__all__ = ["Calculator"]
__all__ = ["Calculator", "calculator"]
16 changes: 16 additions & 0 deletions promptulate/tools/math/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,3 +119,19 @@ def _run(self, question: str) -> str:
return _evaluate_expression(expression)
except Exception as e:
raise ValueError(f"Unknown format from LLM: {llm_output}, error: {e}")


def calculator(expression: str):
"""Evaluate a mathematical expression.
Args:
expression: A mathematical expression, eg: 18^0.43
Attention:
Expressions can not exist variables!
eg: (current age)^0.43 is wrong, you should use 18^0.43 instead.
Returns:
The result of the evaluation.
"""
return Calculator().run(expression)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ description = "A powerful LLM Application development framework."
name = "promptulate"
readme = "README.md"
repository = "https://github.com/Undertone0809/promptulate"
version = "1.11.2"
version = "1.12.0"
keywords = [
"promptulate",
"pne",
Expand Down
8 changes: 4 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,18 @@ hyperframe==6.0.1 ; python_version >= "3.8" and python_version < "4.0"
idna==3.6 ; python_version >= "3.8" and python_version < "4.0"
importlib-metadata==7.0.1 ; python_version >= "3.8" and python_version < "4.0"
jinja2==3.1.3 ; python_version >= "3.8" and python_version < "4.0"
litellm==1.17.13 ; python_version >= "3.8" and python_version < "4.0"
litellm==1.18.8 ; python_version >= "3.8" and python_version < "4.0"
loguru==0.7.2 ; python_version >= "3.8" and python_version < "4.0"
lxml==5.1.0 ; python_version >= "3.8" and python_version < "4.0"
markupsafe==2.1.3 ; python_version >= "3.8" and python_version < "4.0"
markupsafe==2.1.4 ; python_version >= "3.8" and python_version < "4.0"
multidict==6.0.4 ; python_version >= "3.8" and python_version < "4.0"
numexpr==2.8.6 ; python_full_version >= "3.8.0" and python_version < "4.0"
numpy==1.24.4 ; python_version >= "3.8" and python_version < "4.0"
openai==1.8.0 ; python_version >= "3.8" and python_version < "4.0"
openai==1.9.0 ; python_version >= "3.8" and python_version < "4.0"
packaging==23.2 ; python_version >= "3.8" and python_version < "4.0"
prompt-toolkit==3.0.36 ; python_version >= "3.8" and python_version < "4.0"
pycparser==2.21 ; python_version >= "3.8" and platform_python_implementation != "CPython" and python_version < "4.0"
pydantic==1.10.13 ; python_version >= "3.8" and python_version < "4.0"
pydantic==1.10.14 ; python_version >= "3.8" and python_version < "4.0"
python-dotenv==1.0.0 ; python_version >= "3.8" and python_version < "4.0"
pyyaml==6.0.1 ; python_version >= "3.8" and python_version < "4.0"
questionary==2.0.1 ; python_version >= "3.8" and python_version < "4.0"
Expand Down

0 comments on commit 52c6c32

Please sign in to comment.