From 0d8ccdd20b118a6b0d779aad805cce4c065888ba Mon Sep 17 00:00:00 2001 From: Harish Mohan Raj Date: Wed, 25 Sep 2024 13:45:02 +0530 Subject: [PATCH] WIP --- fastagency_studio/{studio => }/app.py | 0 .../{studio => auth_token}/__init__.py | 0 .../{studio => }/auth_token/auth.py | 0 .../{studio/auth_token => db}/__init__.py | 0 fastagency_studio/{studio => }/db/base.py | 0 fastagency_studio/{studio => }/db/inmemory.py | 0 fastagency_studio/{studio => }/db/prisma.py | 0 .../{studio => }/faststream_app.py | 0 fastagency_studio/{studio => }/helpers.py | 10 +- .../{studio/db => io}/__init__.py | 0 fastagency_studio/{studio => }/io/app.py | 0 fastagency_studio/{studio => }/io/ionats.py | 0 .../{studio => }/models/__init__.py | 0 .../{studio => }/models/agents/__init__.py | 0 .../{studio => }/models/agents/assistant.py | 0 .../{studio => }/models/agents/base.py | 0 .../{studio => }/models/agents/user_proxy.py | 2 +- .../{studio => }/models/agents/web_surfer.py | 16 +- fastagency_studio/{studio => }/models/base.py | 2 +- .../models/deployments/__init__.py | 0 .../models/deployments/deployment.py | 0 .../{studio => }/models/llms/__init__.py | 0 .../{studio => }/models/llms/anthropic.py | 0 .../{studio => }/models/llms/azure.py | 0 .../{studio => }/models/llms/openai.py | 0 .../{studio => }/models/llms/together.py | 5 +- .../{studio => }/models/registry.py | 0 .../{studio => }/models/secrets/__init__.py | 0 .../{studio => }/models/secrets/fly_token.py | 0 .../models/secrets/github_token.py | 0 .../{studio => }/models/teams/__init__.py | 0 .../{studio => }/models/teams/base.py | 0 .../models/teams/multi_agent_team.py | 0 .../models/teams/two_agent_teams.py | 0 .../{studio => }/models/toolboxes/__init__.py | 0 .../{studio => }/models/toolboxes/toolbox.py | 0 .../{studio => }/saas_app_generator.py | 0 .../models/agents/web_surfer_autogen.py | 327 ------- scripts/lint.sh | 2 +- scripts/run-server.sh | 4 +- tests/app/test_get_schemas.py | 50 ++ tests/app/test_model_routes.py | 744 ++++++++++++++++ tests/app/test_openai_extensively.py | 356 ++++++++ .../io => tests/auth_token}/__init__.py | 0 tests/auth_token/test_auth_token.py | 232 +++++ tests/conftest.py | 819 ++++++++++++++++++ tests/db/__init__.py | 0 tests/db/test_inmemory.py | 185 ++++ tests/db/test_prisma.py | 186 ++++ tests/faststream_app/__init__.py | 0 tests/faststream_app/test_faststream_app.py | 97 +++ tests/helpers.py | 172 ++++ tests/models/__init__.py | 0 tests/models/agents/__init__.py | 0 tests/models/agents/test_assistant.py | 289 ++++++ tests/models/agents/test_user_proxy.py | 37 + tests/models/agents/test_web_surfer.py | 490 +++++++++++ tests/models/deployments/__init__.py | 0 tests/models/deployments/test_deployment.py | 291 +++++++ tests/models/llms/__init__.py | 0 tests/models/llms/test_anthropic.py | 187 ++++ tests/models/llms/test_azure.py | 223 +++++ tests/models/llms/test_end2end.py | 66 ++ tests/models/llms/test_llm_keys.py | 35 + tests/models/llms/test_openai.py | 237 +++++ tests/models/llms/test_together.py | 220 +++++ tests/models/secrets/__init__.py | 0 tests/models/secrets/test_fly_token.py | 55 ++ tests/models/secrets/test_github_token.py | 55 ++ tests/models/teams/__init__.py | 0 tests/models/teams/test_base.py | 6 + tests/models/teams/test_multi_agents_team.py | 443 ++++++++++ tests/models/teams/test_two_agents_team.py | 251 ++++++ tests/models/test_base.py | 77 ++ tests/models/test_registry.py | 230 +++++ tests/models/toolboxes/__init__.py | 0 tests/models/toolboxes/test_toolbox.py | 81 ++ tests/test_conftest.py | 112 +++ tests/test_nats.py | 481 ++++++++++ tests/test_saas_app_generator.py | 381 ++++++++ 80 files changed, 7109 insertions(+), 347 deletions(-) rename fastagency_studio/{studio => }/app.py (100%) rename fastagency_studio/{studio => auth_token}/__init__.py (100%) rename fastagency_studio/{studio => }/auth_token/auth.py (100%) rename fastagency_studio/{studio/auth_token => db}/__init__.py (100%) rename fastagency_studio/{studio => }/db/base.py (100%) rename fastagency_studio/{studio => }/db/inmemory.py (100%) rename fastagency_studio/{studio => }/db/prisma.py (100%) rename fastagency_studio/{studio => }/faststream_app.py (100%) rename fastagency_studio/{studio => }/helpers.py (99%) rename fastagency_studio/{studio/db => io}/__init__.py (100%) rename fastagency_studio/{studio => }/io/app.py (100%) rename fastagency_studio/{studio => }/io/ionats.py (100%) rename fastagency_studio/{studio => }/models/__init__.py (100%) rename fastagency_studio/{studio => }/models/agents/__init__.py (100%) rename fastagency_studio/{studio => }/models/agents/assistant.py (100%) rename fastagency_studio/{studio => }/models/agents/base.py (100%) rename fastagency_studio/{studio => }/models/agents/user_proxy.py (96%) rename fastagency_studio/{studio => }/models/agents/web_surfer.py (90%) rename fastagency_studio/{studio => }/models/base.py (98%) rename fastagency_studio/{studio => }/models/deployments/__init__.py (100%) rename fastagency_studio/{studio => }/models/deployments/deployment.py (100%) rename fastagency_studio/{studio => }/models/llms/__init__.py (100%) rename fastagency_studio/{studio => }/models/llms/anthropic.py (100%) rename fastagency_studio/{studio => }/models/llms/azure.py (100%) rename fastagency_studio/{studio => }/models/llms/openai.py (100%) rename fastagency_studio/{studio => }/models/llms/together.py (99%) rename fastagency_studio/{studio => }/models/registry.py (100%) rename fastagency_studio/{studio => }/models/secrets/__init__.py (100%) rename fastagency_studio/{studio => }/models/secrets/fly_token.py (100%) rename fastagency_studio/{studio => }/models/secrets/github_token.py (100%) rename fastagency_studio/{studio => }/models/teams/__init__.py (100%) rename fastagency_studio/{studio => }/models/teams/base.py (100%) rename fastagency_studio/{studio => }/models/teams/multi_agent_team.py (100%) rename fastagency_studio/{studio => }/models/teams/two_agent_teams.py (100%) rename fastagency_studio/{studio => }/models/toolboxes/__init__.py (100%) rename fastagency_studio/{studio => }/models/toolboxes/toolbox.py (100%) rename fastagency_studio/{studio => }/saas_app_generator.py (100%) delete mode 100644 fastagency_studio/studio/models/agents/web_surfer_autogen.py create mode 100644 tests/app/test_get_schemas.py create mode 100644 tests/app/test_model_routes.py create mode 100644 tests/app/test_openai_extensively.py rename {fastagency_studio/studio/io => tests/auth_token}/__init__.py (100%) create mode 100644 tests/auth_token/test_auth_token.py create mode 100644 tests/db/__init__.py create mode 100644 tests/db/test_inmemory.py create mode 100644 tests/db/test_prisma.py create mode 100644 tests/faststream_app/__init__.py create mode 100644 tests/faststream_app/test_faststream_app.py create mode 100644 tests/helpers.py create mode 100644 tests/models/__init__.py create mode 100644 tests/models/agents/__init__.py create mode 100644 tests/models/agents/test_assistant.py create mode 100644 tests/models/agents/test_user_proxy.py create mode 100644 tests/models/agents/test_web_surfer.py create mode 100644 tests/models/deployments/__init__.py create mode 100644 tests/models/deployments/test_deployment.py create mode 100644 tests/models/llms/__init__.py create mode 100644 tests/models/llms/test_anthropic.py create mode 100644 tests/models/llms/test_azure.py create mode 100644 tests/models/llms/test_end2end.py create mode 100644 tests/models/llms/test_llm_keys.py create mode 100644 tests/models/llms/test_openai.py create mode 100644 tests/models/llms/test_together.py create mode 100644 tests/models/secrets/__init__.py create mode 100644 tests/models/secrets/test_fly_token.py create mode 100644 tests/models/secrets/test_github_token.py create mode 100644 tests/models/teams/__init__.py create mode 100644 tests/models/teams/test_base.py create mode 100644 tests/models/teams/test_multi_agents_team.py create mode 100644 tests/models/teams/test_two_agents_team.py create mode 100644 tests/models/test_base.py create mode 100644 tests/models/test_registry.py create mode 100644 tests/models/toolboxes/__init__.py create mode 100644 tests/models/toolboxes/test_toolbox.py create mode 100644 tests/test_conftest.py create mode 100644 tests/test_nats.py create mode 100644 tests/test_saas_app_generator.py diff --git a/fastagency_studio/studio/app.py b/fastagency_studio/app.py similarity index 100% rename from fastagency_studio/studio/app.py rename to fastagency_studio/app.py diff --git a/fastagency_studio/studio/__init__.py b/fastagency_studio/auth_token/__init__.py similarity index 100% rename from fastagency_studio/studio/__init__.py rename to fastagency_studio/auth_token/__init__.py diff --git a/fastagency_studio/studio/auth_token/auth.py b/fastagency_studio/auth_token/auth.py similarity index 100% rename from fastagency_studio/studio/auth_token/auth.py rename to fastagency_studio/auth_token/auth.py diff --git a/fastagency_studio/studio/auth_token/__init__.py b/fastagency_studio/db/__init__.py similarity index 100% rename from fastagency_studio/studio/auth_token/__init__.py rename to fastagency_studio/db/__init__.py diff --git a/fastagency_studio/studio/db/base.py b/fastagency_studio/db/base.py similarity index 100% rename from fastagency_studio/studio/db/base.py rename to fastagency_studio/db/base.py diff --git a/fastagency_studio/studio/db/inmemory.py b/fastagency_studio/db/inmemory.py similarity index 100% rename from fastagency_studio/studio/db/inmemory.py rename to fastagency_studio/db/inmemory.py diff --git a/fastagency_studio/studio/db/prisma.py b/fastagency_studio/db/prisma.py similarity index 100% rename from fastagency_studio/studio/db/prisma.py rename to fastagency_studio/db/prisma.py diff --git a/fastagency_studio/studio/faststream_app.py b/fastagency_studio/faststream_app.py similarity index 100% rename from fastagency_studio/studio/faststream_app.py rename to fastagency_studio/faststream_app.py diff --git a/fastagency_studio/studio/helpers.py b/fastagency_studio/helpers.py similarity index 99% rename from fastagency_studio/studio/helpers.py rename to fastagency_studio/helpers.py index 00c88fcb..b0d823bc 100644 --- a/fastagency_studio/studio/helpers.py +++ b/fastagency_studio/helpers.py @@ -4,17 +4,17 @@ from uuid import UUID from asyncer import asyncify -from fastagency.studio.saas_app_generator import ( - InvalidFlyTokenError, - InvalidGHTokenError, - SaasAppGenerator, -) from fastapi import BackgroundTasks, HTTPException from .auth_token.auth import create_deployment_auth_token from .db.base import DefaultDB from .models.base import Model, ObjectReference from .models.registry import Registry +from .saas_app_generator import ( + InvalidFlyTokenError, + InvalidGHTokenError, + SaasAppGenerator, +) T = TypeVar("T", bound=Model) diff --git a/fastagency_studio/studio/db/__init__.py b/fastagency_studio/io/__init__.py similarity index 100% rename from fastagency_studio/studio/db/__init__.py rename to fastagency_studio/io/__init__.py diff --git a/fastagency_studio/studio/io/app.py b/fastagency_studio/io/app.py similarity index 100% rename from fastagency_studio/studio/io/app.py rename to fastagency_studio/io/app.py diff --git a/fastagency_studio/studio/io/ionats.py b/fastagency_studio/io/ionats.py similarity index 100% rename from fastagency_studio/studio/io/ionats.py rename to fastagency_studio/io/ionats.py diff --git a/fastagency_studio/studio/models/__init__.py b/fastagency_studio/models/__init__.py similarity index 100% rename from fastagency_studio/studio/models/__init__.py rename to fastagency_studio/models/__init__.py diff --git a/fastagency_studio/studio/models/agents/__init__.py b/fastagency_studio/models/agents/__init__.py similarity index 100% rename from fastagency_studio/studio/models/agents/__init__.py rename to fastagency_studio/models/agents/__init__.py diff --git a/fastagency_studio/studio/models/agents/assistant.py b/fastagency_studio/models/agents/assistant.py similarity index 100% rename from fastagency_studio/studio/models/agents/assistant.py rename to fastagency_studio/models/agents/assistant.py diff --git a/fastagency_studio/studio/models/agents/base.py b/fastagency_studio/models/agents/base.py similarity index 100% rename from fastagency_studio/studio/models/agents/base.py rename to fastagency_studio/models/agents/base.py diff --git a/fastagency_studio/studio/models/agents/user_proxy.py b/fastagency_studio/models/agents/user_proxy.py similarity index 96% rename from fastagency_studio/studio/models/agents/user_proxy.py rename to fastagency_studio/models/agents/user_proxy.py index 0ccef538..07431575 100644 --- a/fastagency_studio/studio/models/agents/user_proxy.py +++ b/fastagency_studio/models/agents/user_proxy.py @@ -2,7 +2,7 @@ from uuid import UUID import autogen -from fastagency.api.openapi.client import OpenAPI +from fastagency.api.openapi import OpenAPI from ..base import Field, Model from ..registry import register diff --git a/fastagency_studio/studio/models/agents/web_surfer.py b/fastagency_studio/models/agents/web_surfer.py similarity index 90% rename from fastagency_studio/studio/models/agents/web_surfer.py rename to fastagency_studio/models/agents/web_surfer.py index aabc2a01..b9edb24d 100644 --- a/fastagency_studio/studio/models/agents/web_surfer.py +++ b/fastagency_studio/models/agents/web_surfer.py @@ -4,7 +4,7 @@ from asyncer import syncify from autogen.agentchat import AssistantAgent as AutoGenAssistantAgent from autogen.agentchat import ConversableAgent as AutoGenConversableAgent -from fastagency.studio.models.agents.web_surfer_autogen import WebSurferChat +from fastagency.runtime.autogen.tools import WebSurferTool from typing_extensions import TypeAlias from ..base import Field, Model @@ -34,19 +34,19 @@ async def create_autogen(cls, model_id: UUID, user_id: UUID, **kwargs: Any) -> s class WebSurferToolbox: - def __init__(self, websurfer_chat: WebSurferChat): + def __init__(self, websurfer_tool: WebSurferTool): """Create a toolbox for the web surfer agent. This toolbox will contain functions to delegate web surfing tasks to the internal web surfer agent. Args: - websurfer_chat (WebSurferChat): The web surfer chat agent + websurfer_tool (WebSurferTool): The web surfer tool to be used in the toolbox """ - self.websurfer_chat = websurfer_chat + self.websurfer_tool = websurfer_tool def create_new_task( task: Annotated[str, "task for websurfer"], ) -> str: try: - return syncify(self.websurfer_chat.create_new_task)(task) # type: ignore [no-any-return] + return syncify(self.websurfer_tool.a_create_new_task)(task) # type: ignore [no-any-return] except Exception as e: raise e @@ -62,7 +62,7 @@ def continue_task_with_additional_instructions( ) -> str: try: return syncify( # type: ignore [no-any-return] - self.websurfer_chat.continue_task_with_additional_instructions + self.websurfer_tool.a_continue_task_with_additional_instructions )(message) except Exception as e: raise e @@ -131,7 +131,7 @@ async def create_autogen( viewport_size = websurfer_model.viewport_size - websurfer_chat = WebSurferChat( + websurfer_tool = WebSurferTool( name_prefix=websurfer_model.name, llm_config=llm_config, summarizer_llm_config=summarizer_llm_config, @@ -139,7 +139,7 @@ async def create_autogen( bing_api_key=bing_api_key, ) - web_surfer_toolbox = WebSurferToolbox(websurfer_chat) + web_surfer_toolbox = WebSurferToolbox(websurfer_tool) agent_name = websurfer_model.name diff --git a/fastagency_studio/studio/models/base.py b/fastagency_studio/models/base.py similarity index 98% rename from fastagency_studio/studio/models/base.py rename to fastagency_studio/models/base.py index 06a296c2..bbc7cb0b 100644 --- a/fastagency_studio/studio/models/base.py +++ b/fastagency_studio/models/base.py @@ -148,7 +148,7 @@ def create_reference_model( __base__=ObjectReference, ) reference_model.__module__ = ( - f"fastagency.studio.models.{type_name}.{model_type_name}._generated" + f"fastagency_studio.models.{type_name}.{model_type_name}._generated" ) reference_model._data_class = model_class # type: ignore[attr-defined] diff --git a/fastagency_studio/studio/models/deployments/__init__.py b/fastagency_studio/models/deployments/__init__.py similarity index 100% rename from fastagency_studio/studio/models/deployments/__init__.py rename to fastagency_studio/models/deployments/__init__.py diff --git a/fastagency_studio/studio/models/deployments/deployment.py b/fastagency_studio/models/deployments/deployment.py similarity index 100% rename from fastagency_studio/studio/models/deployments/deployment.py rename to fastagency_studio/models/deployments/deployment.py diff --git a/fastagency_studio/studio/models/llms/__init__.py b/fastagency_studio/models/llms/__init__.py similarity index 100% rename from fastagency_studio/studio/models/llms/__init__.py rename to fastagency_studio/models/llms/__init__.py diff --git a/fastagency_studio/studio/models/llms/anthropic.py b/fastagency_studio/models/llms/anthropic.py similarity index 100% rename from fastagency_studio/studio/models/llms/anthropic.py rename to fastagency_studio/models/llms/anthropic.py diff --git a/fastagency_studio/studio/models/llms/azure.py b/fastagency_studio/models/llms/azure.py similarity index 100% rename from fastagency_studio/studio/models/llms/azure.py rename to fastagency_studio/models/llms/azure.py diff --git a/fastagency_studio/studio/models/llms/openai.py b/fastagency_studio/models/llms/openai.py similarity index 100% rename from fastagency_studio/studio/models/llms/openai.py rename to fastagency_studio/models/llms/openai.py diff --git a/fastagency_studio/studio/models/llms/together.py b/fastagency_studio/models/llms/together.py similarity index 99% rename from fastagency_studio/studio/models/llms/together.py rename to fastagency_studio/models/llms/together.py index c34789c9..14d28bc8 100644 --- a/fastagency_studio/studio/models/llms/together.py +++ b/fastagency_studio/models/llms/together.py @@ -36,21 +36,21 @@ "Gemma-2 Instruct (9B)": "google/gemma-2-9b-it", "Meta Llama 3 8B Instruct Reference": "meta-llama/Llama-3-8b-chat-hf", "Meta Llama 3.1 70B Instruct Turbo": "albert/meta-llama-3-1-70b-instruct-turbo", - "Meta Llama 3.1 8B Instruct Turbo": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", "WizardLM-2 (8x22B)": "microsoft/WizardLM-2-8x22B", "Mixtral-8x7B Instruct v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1", "Meta Llama 3.1 405B Instruct Turbo": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", "Meta Llama 3 70B Instruct Reference": "meta-llama/Llama-3-70b-chat-hf", + "LLaVa-Next (Mistral-7B)": "llava-hf/llava-v1.6-mistral-7b-hf", "DBRX Instruct": "databricks/dbrx-instruct", "Nous Hermes 2 - Mixtral 8x7B-DPO ": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "Meta Llama 3 8B Instruct Turbo": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo", "Meta Llama 3 8B Instruct Lite": "meta-llama/Meta-Llama-3-8B-Instruct-Lite", "Meta Llama 3.1 8B Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Reference", + "Meta Llama 3.1 8B Instruct Turbo": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", "Mixtral-8x22B Instruct v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1", "Gryphe MythoMax L2 Lite (13B)": "Gryphe/MythoMax-L2-13b-Lite", "Hermes 3 - Llama-3.1 405B": "NousResearch/Hermes-3-Llama-3.1-405B-Turbo", "LLaMA-2 Chat (7B)": "togethercomputer/llama-2-7b-chat", - "LLaVa-Next (Mistral-7B)": "llava-hf/llava-v1.6-mistral-7b-hf", "WizardLM v1.2 (13B)": "WizardLM/WizardLM-13B-V1.2", "Koala (7B)": "togethercomputer/Koala-7B", "Qwen 2 Instruct (1.5B)": "Qwen/Qwen2-1.5B-Instruct", @@ -96,6 +96,7 @@ "Meta Llama 3 70B Instruct": "meta-llama/Meta-Llama-3-70B-Instruct", "Code Llama Instruct (70B)": "codellama/CodeLlama-70b-Instruct-hf", "Hermes 2 Theta Llama-3 70B": "NousResearch/Hermes-2-Theta-Llama-3-70B", + "Test 11": "test/test11", "Qwen 1.5 Chat (7B)": "Qwen/Qwen1.5-7B-Chat", "Dolphin 2.5 Mixtral 8x7b": "cognitivecomputations/dolphin-2.5-mixtral-8x7b", "LLaMA-2 Chat (70B)": "meta-llama/Llama-2-70b-chat-hf", diff --git a/fastagency_studio/studio/models/registry.py b/fastagency_studio/models/registry.py similarity index 100% rename from fastagency_studio/studio/models/registry.py rename to fastagency_studio/models/registry.py diff --git a/fastagency_studio/studio/models/secrets/__init__.py b/fastagency_studio/models/secrets/__init__.py similarity index 100% rename from fastagency_studio/studio/models/secrets/__init__.py rename to fastagency_studio/models/secrets/__init__.py diff --git a/fastagency_studio/studio/models/secrets/fly_token.py b/fastagency_studio/models/secrets/fly_token.py similarity index 100% rename from fastagency_studio/studio/models/secrets/fly_token.py rename to fastagency_studio/models/secrets/fly_token.py diff --git a/fastagency_studio/studio/models/secrets/github_token.py b/fastagency_studio/models/secrets/github_token.py similarity index 100% rename from fastagency_studio/studio/models/secrets/github_token.py rename to fastagency_studio/models/secrets/github_token.py diff --git a/fastagency_studio/studio/models/teams/__init__.py b/fastagency_studio/models/teams/__init__.py similarity index 100% rename from fastagency_studio/studio/models/teams/__init__.py rename to fastagency_studio/models/teams/__init__.py diff --git a/fastagency_studio/studio/models/teams/base.py b/fastagency_studio/models/teams/base.py similarity index 100% rename from fastagency_studio/studio/models/teams/base.py rename to fastagency_studio/models/teams/base.py diff --git a/fastagency_studio/studio/models/teams/multi_agent_team.py b/fastagency_studio/models/teams/multi_agent_team.py similarity index 100% rename from fastagency_studio/studio/models/teams/multi_agent_team.py rename to fastagency_studio/models/teams/multi_agent_team.py diff --git a/fastagency_studio/studio/models/teams/two_agent_teams.py b/fastagency_studio/models/teams/two_agent_teams.py similarity index 100% rename from fastagency_studio/studio/models/teams/two_agent_teams.py rename to fastagency_studio/models/teams/two_agent_teams.py diff --git a/fastagency_studio/studio/models/toolboxes/__init__.py b/fastagency_studio/models/toolboxes/__init__.py similarity index 100% rename from fastagency_studio/studio/models/toolboxes/__init__.py rename to fastagency_studio/models/toolboxes/__init__.py diff --git a/fastagency_studio/studio/models/toolboxes/toolbox.py b/fastagency_studio/models/toolboxes/toolbox.py similarity index 100% rename from fastagency_studio/studio/models/toolboxes/toolbox.py rename to fastagency_studio/models/toolboxes/toolbox.py diff --git a/fastagency_studio/studio/saas_app_generator.py b/fastagency_studio/saas_app_generator.py similarity index 100% rename from fastagency_studio/studio/saas_app_generator.py rename to fastagency_studio/saas_app_generator.py diff --git a/fastagency_studio/studio/models/agents/web_surfer_autogen.py b/fastagency_studio/studio/models/agents/web_surfer_autogen.py deleted file mode 100644 index 6843d436..00000000 --- a/fastagency_studio/studio/models/agents/web_surfer_autogen.py +++ /dev/null @@ -1,327 +0,0 @@ -import os -from typing import Annotated, Any, Optional - -from asyncer import asyncify -from autogen.agentchat import AssistantAgent as AutoGenAssistantAgent -from autogen.agentchat.chat import ChatResult -from autogen.agentchat.contrib.web_surfer import WebSurferAgent as AutoGenWebSurferAgent -from pydantic import BaseModel, Field, HttpUrl - -__all__ = ["WebSurferAnswer", "WebSurferChat"] - - -class WebSurferAnswer(BaseModel): - task: Annotated[str, Field(..., description="The task to be completed")] - is_successful: Annotated[ - bool, Field(..., description="Whether the task was successful") - ] - short_answer: Annotated[ - str, - Field( - ..., - description="The short answer to the task without any explanation", - ), - ] - long_answer: Annotated[ - str, - Field(..., description="The long answer to the task with explanation"), - ] - visited_links: Annotated[ - list[HttpUrl], - Field(..., description="The list of visited links to generate the answer"), - ] - - @staticmethod - def get_example_answer() -> "WebSurferAnswer": - return WebSurferAnswer( - task="What is the most popular QLED TV to buy on amazon.com?", - is_successful=True, - short_answer='Amazon Fire TV 55" Omni QLED Series 4K UHD smart TV', - long_answer='Amazon has the best selling page by different categories and there is a category for QLED TVs under electroincs. The most popular QLED TV is Amazon Fire TV 55" Omni QLED Series 4K UHD smart TV, Dolby Vision IQ, Fire TV Ambient Experience, local dimming, hands-free with Alexa. It is the best selling QLED TV on Amazon.', - visited_links=[ - "https://www.amazon.com/Best-Sellers/", - "https://www.amazon.com/Best-Sellers-Electronics-QLED-TVs/", - ], - ) - - -class WebSurferChat: - def __init__( - self, - name_prefix: str, - llm_config: dict[str, Any], - summarizer_llm_config: dict[str, Any], - viewport_size: int, - bing_api_key: Optional[str], - max_consecutive_auto_reply: int = 30, - max_links_to_click: int = 10, - websurfer_kwargs: dict[str, Any] = {}, # noqa: B006 - assistant_kwargs: dict[str, Any] = {}, # noqa: B006 - ): - """Create a new WebSurferChat instance. - - Args: - name_prefix (str): The name prefix of the inner AutoGen agents - llm_config (Dict[str, Any]): The LLM configuration - summarizer_llm_config (Dict[str, Any]): The summarizer LLM configuration - viewport_size (int): The viewport size of the browser - bing_api_key (Optional[str]): The Bing API key for the browser - max_consecutive_auto_reply (int, optional): The maximum consecutive auto reply. Defaults to 30. - max_links_to_click (int, optional): The maximum links to click. Defaults to 10. - websurfer_kwargs (Dict[str, Any], optional): The keyword arguments for the websurfer. Defaults to {}. - assistant_kwargs (Dict[str, Any], optional): The keyword arguments for the assistant. Defaults to {}. - - """ - self.name_prefix = name_prefix - self.llm_config = llm_config - self.summarizer_llm_config = summarizer_llm_config - self.viewport_size = viewport_size - self.bing_api_key = ( - bing_api_key if bing_api_key is not None else os.getenv("BING_API_KEY") - ) - self.max_consecutive_auto_reply = max_consecutive_auto_reply - self.max_links_to_click = max_links_to_click - self.websurfer_kwargs = websurfer_kwargs - self.assistant_kwargs = assistant_kwargs - - self.task = "not set yet" - self.last_is_termination_msg_error = "" - - self.browser_config = { - "viewport_size": self.viewport_size, - "bing_api_key": self.bing_api_key, - "request_kwargs": { - "headers": { - "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36", - } - }, - } - - if "human_input_mode" in self.websurfer_kwargs: - self.websurfer_kwargs.pop("human_input_mode") - - self.websurfer = AutoGenWebSurferAgent( - name=f"{self.name_prefix}_inner_websurfer", - llm_config=self.llm_config, - summarizer_llm_config=self.summarizer_llm_config, - browser_config=self.browser_config, - human_input_mode="NEVER", - is_termination_msg=self.is_termination_msg, - **self.websurfer_kwargs, - ) - - if "human_input_mode" in self.assistant_kwargs: - self.assistant_kwargs.pop("human_input_mode") - - self.assistant = AutoGenAssistantAgent( - name=f"{self.name_prefix}_inner_assistant", - llm_config=self.llm_config, - human_input_mode="NEVER", - system_message=self.system_message, - max_consecutive_auto_reply=self.max_consecutive_auto_reply, - # is_termination_msg=self.is_termination_msg, - **self.assistant_kwargs, - ) - - def is_termination_msg(self, msg: dict[str, Any]) -> bool: - # print(f"is_termination_msg({msg=})") - if ( - "content" in msg - and msg["content"] is not None - and "TERMINATE" in msg["content"] - ): - return True - try: - WebSurferAnswer.model_validate_json(msg["content"]) - return True - except Exception as e: - self.last_is_termination_msg_error = str(e) - return False - - def _get_error_message(self, chat_result: ChatResult) -> Optional[str]: - messages = [msg["content"] for msg in chat_result.chat_history] - last_message = messages[-1] - if "TERMINATE" in last_message: - return self.error_message - - try: - WebSurferAnswer.model_validate_json(last_message) - except Exception: - return self.error_message - - return None - - def _get_answer(self, chat_result: ChatResult) -> WebSurferAnswer: - messages = [msg["content"] for msg in chat_result.chat_history] - last_message = messages[-1] - return WebSurferAnswer.model_validate_json(last_message) - - def _chat_with_websurfer( - self, message: str, clear_history: bool, **kwargs: Any - ) -> WebSurferAnswer: - msg: Optional[str] = message - - while msg is not None: - chat_result = self.websurfer.initiate_chat( - self.assistant, - clear_history=clear_history, - message=msg, - ) - msg = self._get_error_message(chat_result) - clear_history = False - - return self._get_answer(chat_result) - - def _get_error_from_exception(self, task: str, e: Exception) -> str: - answer = WebSurferAnswer( - task=task, - is_successful=False, - short_answer="unexpected error occurred", - long_answer=str(e), - visited_links=[], - ) - - return self.create_final_reply(task, answer) - - def create_final_reply(self, task: str, message: WebSurferAnswer) -> str: - retval = ( - "We have successfully completed the task:\n\n" - if message.is_successful - else "We have failed to complete the task:\n\n" - ) - retval += f"{task}\n\n" - retval += f"Short answer: {message.short_answer}\n\n" - retval += f"Explanation: {message.long_answer}\n\n" - retval += "Visited links:\n" - for link in message.visited_links: - retval += f" - {link}\n" - - return retval - - async def create_new_task(self, task: str) -> str: - self.task = task - try: - answer = await asyncify(self._chat_with_websurfer)( - message=self.initial_message, - clear_history=True, - ) - except Exception as e: - return self._get_error_from_exception(task, e) - - return self.create_final_reply(task, answer) - - async def continue_task_with_additional_instructions(self, message: str) -> str: - try: - answer = await asyncify(self._chat_with_websurfer)( - message=message, - clear_history=False, - ) - except Exception as e: - return self._get_error_from_exception(message, e) - - return self.create_final_reply(message, answer) - - @property - def example_answer(self) -> WebSurferAnswer: - return WebSurferAnswer.get_example_answer() - - @property - def initial_message(self) -> str: - return f"""We are tasked with the following task: - -{self.task} - -If no link is provided in the task, you should search the internet first to find the relevant information. - -The focus is on the provided url and its subpages, we do NOT care about the rest of the website i.e. parent pages. -e.g. If the url is 'https://www.example.com/products/air-conditioners', we are interested ONLY in the 'air-conditioners' and its subpages. - -AFTER visiting the home page, create a step-by-step plan BEFORE visiting the other pages. -You can click on MAXIMUM {self.max_links_to_click} links. Do NOT try to click all the links on the page, but only the ones which are most relevant for the task (MAX {self.max_links_to_click})! -Do NOT visit the same page multiple times, but only once! -If your co-speaker repeats the same message, inform him that you have already answered to that message and ask him to proceed with the task. -e.g. "I have already answered to that message, please proceed with the task or you will be penalized!" -""" - - @property - def error_message(self) -> str: - return f"""Please output the JSON-encoded answer only in the following message before trying to terminate the chat. - -IMPORTANT: - - NEVER enclose JSON-encoded answer in any other text or formatting including '```json' ... '```' or similar! - - NEVER write TERMINATE in the same message as the JSON-encoded answer! - -EXAMPLE: - -{self.example_answer.model_dump_json()} - -NEGATIVE EXAMPLES: - -1. Do NOT include 'TERMINATE' in the same message as the JSON-encoded answer! - -{self.example_answer.model_dump_json()} - -TERMINATE - -2. Do NOT include triple backticks or similar! - -```json -{self.example_answer.model_dump_json()} -``` - -THE LAST ERROR MESSAGE: - -{self.last_is_termination_msg_error} - -""" - - @property - def system_message(self) -> str: - return f"""You are in charge of navigating the web_surfer agent to scrape the web. -web_surfer is able to CLICK on links, SCROLL down, and scrape the content of the web page. e.g. you cen tell him: "Click the 'Getting Started' result". -Each time you receive a reply from web_surfer, you need to tell him what to do next. e.g. "Click the TV link" or "Scroll down". -It is very important that you explore ONLY the page links relevant for the task! - -GUIDELINES: -- Once you retrieve the content from the received url, you can tell web_surfer to CLICK on links, SCROLL down... -By using these capabilities, you will be able to retrieve MUCH BETTER information from the web page than by just scraping the given URL! -You MUST use these capabilities when you receive a task for a specific category/product etc. -- do NOT try to create a summary without clicking on any link, because you will be missing a lot of information! -- if needed, you can instruct web surfer to SEARCH THE WEB for information. - -Examples: -"Click the 'TVs' result" - This way you will navigate to the TVs section of the page and you will find more information about TVs. -"Click 'Electronics' link" - This way you will navigate to the Electronics section of the page and you will find more information about Electronics. -"Click the 'Next' button" -"Search the internet for the best TV to buy" - this will get links to initial pages to start the search - -- Do NOT try to click all the links on the page, but only the ones which are RELEVANT for the task! Web pages can be very long and you will be penalized if spend too much time on this task! -- Your final goal is to summarize the findings for the given task. The summary must be in English! -- Create a summary after you successfully retrieve the information from the web page. -- It is useful to include in the summary relevant links where more information can be found. -e.g. If the page is offering to sell TVs, you can include a link to the TV section of the page. -- If you get some 40x error, please do NOT give up immediately, but try to navigate to another page and continue with the task. -Give up only if you get 40x error on ALL the pages which you tried to navigate to. - - -FINAL MESSAGE: -Once you have retrieved he wanted information, YOU MUST create JSON-encoded string. Summary created by the web_surfer is not enough! -You MUST not include any other text or formatting in the message, only JSON-encoded summary! - -An example of the JSON-encoded summary: -{self.example_answer.model_dump_json()} - -TERMINATION: -When YOU are finished and YOU have created JSON-encoded answer, write a single 'TERMINATE' to end the task. - -OFTEN MISTAKES: -- Web surfer expects you to tell him what LINK NAME to click next, not the relative link. E.g. in case of '[Hardware](/Hardware), the proper command would be 'Click into 'Hardware''. -- Links presented are often RELATIVE links, so you need to ADD the DOMAIN to the link to make it work. E.g. link '/products/air-conditioners' should be 'https://www.example.com/products/air-conditioners' -- You do NOT need to click on MAX number of links. If you have enough information from the first xy links, you do NOT need to click on the rest of the links! -- Do NOT repeat the steps you have already completed! -- ALWAYS include the NEXT steps in the message! -- Do NOT instruct web_surfer to click on the same link multiple times. If there are some problems with the link, MOVE ON to the next one! -- Also, if web_surfer does not understand your message, just MOVE ON to the next link! -- NEVER REPEAT the same instructions to web_surfer! If he does not understand the first time, MOVE ON to the next link! -- NEVER enclose JSON-encoded answer in any other text or formatting including '```json' ... '```' or similar! -""" diff --git a/scripts/lint.sh b/scripts/lint.sh index 77f15cb0..4e15facc 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash echo "Running pyup_dirs..." -pyup_dirs --py38-plus --recursive fastagency examples tests docs +pyup_dirs --py38-plus --recursive fastagency_studio examples tests docs echo "Running ruff linter (isort, flake, pyupgrade, etc. replacement)..." ruff check diff --git a/scripts/run-server.sh b/scripts/run-server.sh index c7e19bdd..6233e5f6 100755 --- a/scripts/run-server.sh +++ b/scripts/run-server.sh @@ -10,6 +10,6 @@ fi prisma migrate deploy prisma generate --schema=schema.prisma --generator=pyclient -faststream run fastagency.studio.io.ionats:app --workers $workers > faststream.log 2>&1 & +faststream run fastagency_studio.io.ionats:app --workers $workers > faststream.log 2>&1 & -uvicorn fastagency.studio.app:app --workers $workers --host 0.0.0.0 --proxy-headers +uvicorn fastagency_studio.app:app --workers $workers --host 0.0.0.0 --proxy-headers diff --git a/tests/app/test_get_schemas.py b/tests/app/test_get_schemas.py new file mode 100644 index 00000000..56ce5675 --- /dev/null +++ b/tests/app/test_get_schemas.py @@ -0,0 +1,50 @@ +from fastapi.testclient import TestClient + +from fastagency_studio.app import app +from fastagency_studio.models.registry import Schemas + +client = TestClient(app) + + +class TestGetSchema: + def test_return_all(self) -> None: + response = client.get("/models/schemas") + assert response.status_code == 200 + + schemas = Schemas(**response.json()) + + types = {schemas.name: schemas.schemas for schemas in schemas.list_of_schemas} + assert set(types.keys()) == { + "secret", + "llm", + "agent", + "team", + "toolbox", + "deployment", + } + + model_names = { + type_name: {model.name for model in model_schema_list} + for type_name, model_schema_list in types.items() + } + expected = { + "secret": { + "AnthropicAPIKey", + "AzureOAIAPIKey", + "OpenAIAPIKey", + "BingAPIKey", + "FlyToken", + "GitHubToken", + "OpenAPIAuth", + "OpenAPIAuthToken", + "TogetherAIAPIKey", + }, + "llm": {"Anthropic", "AzureOAI", "OpenAI", "TogetherAI"}, + "agent": {"AssistantAgent", "WebSurferAgent", "UserProxyAgent"}, + # "team": {"TwoAgentTeam", "MultiAgentTeam"}, + "team": {"TwoAgentTeam"}, + "toolbox": {"Toolbox"}, + "deployment": {"Deployment"}, + } + # print(model_names) + assert model_names == expected, f"{model_names}!={expected}" diff --git a/tests/app/test_model_routes.py b/tests/app/test_model_routes.py new file mode 100644 index 00000000..fc3b8872 --- /dev/null +++ b/tests/app/test_model_routes.py @@ -0,0 +1,744 @@ +import random +import uuid +from typing import Any, Optional +from unittest.mock import AsyncMock, patch + +import pytest +from fastapi.testclient import TestClient + +from fastagency_studio.app import app, mask +from fastagency_studio.db.base import DefaultDB +from fastagency_studio.models.llms.azure import AzureOAIAPIKey +from fastagency_studio.saas_app_generator import SaasAppGenerator + +client = TestClient(app) + + +class Function: + def __init__(self, arguments: str, name: str): + """Function class.""" + self.arguments = arguments + self.name = name + + +class ChatCompletionMessageToolCall: + def __init__(self, id: str, function: Function, type: str): + """ChatCompletionMessageToolCall class.""" + self.id = id + self.function = function + self.type = type + + +class ChatCompletionMessage: + def __init__( + self, + content: Optional[str], + role: str, + function_call: Optional[str], + tool_calls: list[ChatCompletionMessageToolCall], + ): + """ChatCompletionMessage class.""" + self.content = content + self.role = role + self.function_call = function_call + self.tool_calls = tool_calls + + +class Choice: + def __init__(self, message: ChatCompletionMessage): + """Choice class.""" + self.message = message + + +class MockChatCompletion: + def __init__(self, id: str, choices: list[Choice]): + """MockChatCompletion class.""" + self.id = id + self.choices = choices + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "api_key,expected", # noqa: PT006 + [ + ("whatever", "wha*ever"), + ("some_other_key", "som*******_key"), # codespell:ignore som + ], +) +async def test_mask(api_key: str, expected: str) -> None: + assert await mask(api_key) == expected + + +@pytest.mark.db +class TestModelRoutes: + @pytest.mark.asyncio + async def test_get_all_models( + self, user_uuid: str, monkeypatch: pytest.MonkeyPatch + ) -> None: + key_uuid = str(uuid.uuid4()) + azure_oai_api_key = AzureOAIAPIKey(api_key="whatever", name="whatever") + type_name = "secret" + model_name = "AzureOAIAPIKey" + + # Create model + response = client.post( + f"/user/{user_uuid}/models/{type_name}/{model_name}/{key_uuid}", + json=azure_oai_api_key.model_dump(), + ) + assert response.status_code == 200 + + response = client.get(f"/user/{user_uuid}/models") + assert response.status_code == 200 + + expected = [ + { + "json_str": { + "api_key": "wha*ever", # pragma: allowlist secret + "name": "whatever", + }, + "uuid": key_uuid, + "type_name": "secret", + "model_name": "AzureOAIAPIKey", + "user_uuid": user_uuid, + } + ] + actual = response.json() + assert len(actual) >= len(expected) + + matched_item_from_actual = [item for item in actual if item["uuid"] == key_uuid] + + assert len(matched_item_from_actual) == len(expected) + for i in range(len(expected)): + for key in expected[i]: + assert matched_item_from_actual[i][key] == expected[i][key] + + @pytest.mark.asyncio + async def test_setup_user(self) -> None: + random_id = random.randint(1, 1_000_000) + user_uuid = await DefaultDB.frontend()._create_user( + user_uuid=uuid.uuid4(), + email=f"user{random_id}@airt.ai", + username=f"user{random_id}", + ) + # Call setup route for user + response = client.get(f"/user/{user_uuid}/setup") + assert response.status_code == 200, response.text + expected_setup = { + "name": "WeatherToolbox", + "openapi_url": "https://weather.tools.staging.fastagency.ai/openapi.json", + "openapi_auth": None, + } + actual = response.json() + assert actual == expected_setup + + # Call get all models route to check for the newly added weather toolbox + response = client.get( + f"/user/{user_uuid}/models", params={"type_name": "toolbox"} + ) + assert response.status_code == 200 + expected_toolbox_model = { + "user_uuid": str(user_uuid), + "type_name": "toolbox", + "model_name": "Toolbox", + "json_str": { + "name": "WeatherToolbox", + "openapi_url": "https://weather.tools.staging.fastagency.ai/openapi.json", + "openapi_auth": None, + }, + } + actual_toolbox_model = next( + iter( + [ + model + for model in response.json() + if model["json_str"]["name"] == "WeatherToolbox" + ] + ) + ) + + for key, value in expected_toolbox_model.items(): + assert actual_toolbox_model[key] == value + + # Call the setup route again and check the response + response = client.get(f"/user/{user_uuid}/setup") + assert response.status_code == 400 + expected_setup_again = {"detail": "Weather toolbox already exists"} + actual = response.json() + assert actual == expected_setup_again + + @pytest.mark.asyncio + async def test_add_model(self, user_uuid: str) -> None: + model_uuid = str(uuid.uuid4()) + azure_oai_api_key = AzureOAIAPIKey(api_key="whatever", name="who cares?") + response = client.post( + f"/user/{user_uuid}/models/secret/AzureOAIAPIKey/{model_uuid}", + json=azure_oai_api_key.model_dump(), + ) + + assert response.status_code == 200 + expected = { + "api_key": "whatever", # pragma: allowlist secret + "name": "who cares?", + } + actual = response.json() + assert actual == expected + + @pytest.mark.asyncio + async def test_add_model_with_duplicate_name(self, user_uuid: str) -> None: + model_uuid = str(uuid.uuid4()) + name = f"model_name_{model_uuid}" + azure_oai_api_key = AzureOAIAPIKey(api_key="whatever", name=name) + response = client.post( + f"/user/{user_uuid}/models/secret/AzureOAIAPIKey/{model_uuid}", + json=azure_oai_api_key.model_dump(), + ) + + assert response.status_code == 200 + expected = { + "api_key": "whatever", # pragma: allowlist secret + "name": name, + } + actual = response.json() + assert actual == expected + + response = client.get(f"/user/{user_uuid}/models") + assert response.status_code == 200 + + existing_name = name + new_model_uuid = str(uuid.uuid4()) + new_azure_oai_api_key = AzureOAIAPIKey( + api_key="whatever", # pragma: allowlist secret + name=existing_name, + ) + response = client.post( + f"/user/{user_uuid}/models/secret/AzureOAIAPIKey/{new_model_uuid}", + json=new_azure_oai_api_key.model_dump(), + ) + assert response.status_code == 422 + expected_error_response: dict[str, list[dict[str, Any]]] = { + "detail": [ + { + "loc": ["name"], + "msg": "Name already exists. Please enter a different name", + } + ] + } + actual = response.json() + assert actual == expected_error_response + + @pytest.mark.asyncio + async def test_update_model_with_duplicate_name(self, user_uuid: str) -> None: + models = [ + {"uuid": str(uuid.uuid4()), "name": f"model_name_{i}"} for i in range(2) + ] + # Add two models + for model in models: + model_uuid = model["uuid"] + name = model["name"] + azure_oai_api_key = AzureOAIAPIKey(api_key="whatever", name=name) + response = client.post( + f"/user/{user_uuid}/models/secret/AzureOAIAPIKey/{model_uuid}", + json=azure_oai_api_key.model_dump(), + ) + assert response.status_code == 200 + expected = { + "api_key": "whatever", # pragma: allowlist secret + "name": name, + } + actual = response.json() + assert actual == expected + + # update name of the second model + new_name = f"updated_{models[1]['name']}" + model_uuid = models[1]["uuid"] + updated_model = AzureOAIAPIKey(api_key="new_key", name=new_name) + response = client.put( + f"/user/{user_uuid}/models/secret/AzureOAIAPIKey/{model_uuid}", + json=updated_model.model_dump(), + ) + assert response.status_code == 200 + expected = { + "api_key": "new_key", # pragma: allowlist secret + "name": new_name, + } + actual = response.json() + assert actual == expected + + # Try to update the second model name with the first model name (should fail) + first_model_name = models[0]["name"] + updated_model = AzureOAIAPIKey(api_key="new_key", name=first_model_name) + response = client.put( + f"/user/{user_uuid}/models/secret/AzureOAIAPIKey/{model_uuid}", + json=updated_model.model_dump(), + ) + assert response.status_code == 422 + expected_error_response: dict[str, list[dict[str, Any]]] = { + "detail": [ + { + "loc": ["name"], + "msg": "Name already exists. Please enter a different name", + } + ] + } + actual = response.json() + assert actual == expected_error_response + + @pytest.mark.asyncio + async def test_add_model_deployment(self, user_uuid: str) -> None: + team_uuid = str(uuid.uuid4()) + deployment_uuid = str(uuid.uuid4()) + gh_token_uuid = str(uuid.uuid4()) + fly_token_uuid = str(uuid.uuid4()) + + model = { + "name": "name", + "repo_name": "repo_name", + "fly_app_name": "test the deployment name char", # within the character limit. Max 30 + "team": {"uuid": team_uuid, "type": "team", "name": "TwoAgentTeam"}, + "gh_token": { + "uuid": gh_token_uuid, + "type": "secret", + "name": "GitHubToken", + }, + "fly_token": {"uuid": fly_token_uuid, "type": "secret", "name": "FlyToken"}, + "uuid": deployment_uuid, + "type_name": "deployment", + "model_name": "Deployment", + } + type_name = "deployment" + model_name = "Deployment" + model_uuid = str(uuid.uuid4()) + + # Mock the background task + fly_api_token = "some-token" + fastagency_deployment_uuid = "some-uuid" + github_token = "some-github-token" + app_name = "test fastagency template" + repo_name = "test-fastagency-template" + fly_app_name = "test-fastagency-template" + saas_app = SaasAppGenerator( + fly_api_token, + fastagency_deployment_uuid, + github_token, + app_name, + repo_name, + fly_app_name, + ) + saas_app.gh_repo_url = "https://some-git-url" + with ( + patch( + "fastagency_studio.helpers.validate_tokens_and_create_gh_repo", + return_value=saas_app, + ) as mock_task, + patch("fastagency_studio.helpers.deploy_saas_app"), + ): + response = client.post( + f"/user/{user_uuid}/models/{type_name}/{model_name}/{model_uuid}", + json=model, + ) + mock_task.assert_called_once() + + assert response.status_code == 200 + expected = { + "name": "name", + "repo_name": "repo_name", + "fly_app_name": "test the deployment name char", + "team": {"type": "team", "name": "TwoAgentTeam", "uuid": team_uuid}, + "gh_token": { + "type": "secret", + "name": "GitHubToken", + "uuid": gh_token_uuid, + }, + "fly_token": {"type": "secret", "name": "FlyToken", "uuid": fly_token_uuid}, + "app_deploy_status": "inprogress", + "gh_repo_url": "https://some-git-url", + } + + actual = response.json() + assert actual == expected + + @pytest.mark.asyncio + async def test_add_model_deployment_with_long_name(self, user_uuid: str) -> None: + team_uuid = str(uuid.uuid4()) + deployment_uuid = str(uuid.uuid4()) + gh_token_uuid = str(uuid.uuid4()) + fly_token_uuid = str(uuid.uuid4()) + + model = { + "name": "name", + "repo_name": "repo_name", + "fly_app_name": "test the deployment name charc", # beyond the character limit. Max 30 + "team": {"uuid": team_uuid, "type": "team", "name": "TwoAgentTeam"}, + "gh_token": { + "uuid": gh_token_uuid, + "type": "secret", + "name": "GitHubToken", + }, + "fly_token": {"uuid": fly_token_uuid, "type": "secret", "name": "FlyToken"}, + "uuid": deployment_uuid, + "type_name": "deployment", + "model_name": "Deployment", + } + type_name = "deployment" + model_name = "Deployment" + model_uuid = str(uuid.uuid4()) + + response = client.post( + f"/user/{user_uuid}/models/{type_name}/{model_name}/{model_uuid}", + json=model, + ) + + assert response.status_code != 200 + + @pytest.mark.asyncio + async def test_background_task_not_called_on_error(self, user_uuid: str) -> None: + team_uuid = str(uuid.uuid4()) + deployment_uuid = str(uuid.uuid4()) + gh_token_uuid = str(uuid.uuid4()) + fly_token_uuid = str(uuid.uuid4()) + + model = { + "name": "name", + "repo_name": "repo_name", + "fly_app_name": "Test", + "team": {"uuid": team_uuid, "type": "team", "name": "TwoAgentTeam"}, + "gh_token": { + "uuid": gh_token_uuid, + "type": "secret", + "name": "GitHubToken", + }, + "fly_token": {"uuid": fly_token_uuid, "type": "secret", "name": "FlyToken"}, + "uuid": deployment_uuid, + "type_name": "deployment", + "model_name": "Deployment", + } + type_name = "deployment" + model_name = "Deployment" + model_uuid = uuid.uuid4() + + with ( + patch( + "fastagency_studio.app.DefaultDB._frontend_db.get_user", + side_effect=Exception(), + ), + patch( + "fastagency_studio.db.prisma.PrismaBackendDB._get_db_connection", + side_effect=Exception(), + ), + patch("fastagency_studio.helpers.deploy_saas_app") as mock_task, + ): + response = client.post( + f"/user/{user_uuid}/models/{type_name}/{model_name}/{model_uuid}", + json=model, + ) + + mock_task.assert_not_called() + assert response.status_code != 200 + + @pytest.mark.asyncio + async def test_update_model( + self, user_uuid: str, monkeypatch: pytest.MonkeyPatch + ) -> None: + key_uuid = str(uuid.uuid4()) + unique_name = f"unique_name_{key_uuid}" + azure_oai_api_key = AzureOAIAPIKey(api_key="who cares", name=unique_name) + type_name = "secret" + model_name = "AzureOAIAPIKey" + + # Create model + response = client.post( + f"/user/{user_uuid}/models/{type_name}/{model_name}/{key_uuid}", + json=azure_oai_api_key.model_dump(), + ) + assert response.status_code == 200 + + response = client.put( + f"/user/{user_uuid}/models/secret/AzureOAIAPIKey/{key_uuid}", + json=azure_oai_api_key.model_dump(), + ) + + assert response.status_code == 200 + expected = { + "api_key": "who cares", # pragma: allowlist secret + "name": unique_name, + } + actual = response.json() + assert actual == expected + + @pytest.mark.asyncio + async def test_update_model_deployment(self, user_uuid: str) -> None: + team_uuid = str(uuid.uuid4()) + deployment_uuid = str(uuid.uuid4()) + gh_token_uuid = str(uuid.uuid4()) + fly_token_uuid = str(uuid.uuid4()) + unique_name = f"unique_name_{deployment_uuid}" + model = { + "name": unique_name, + "repo_name": "repo_name", + "fly_app_name": "Test", + "team": {"uuid": team_uuid, "type": "team", "name": "TwoAgentTeam"}, + "gh_token": { + "uuid": gh_token_uuid, + "type": "secret", + "name": "GitHubToken", + }, + "fly_token": {"uuid": fly_token_uuid, "type": "secret", "name": "FlyToken"}, + "uuid": deployment_uuid, + "type_name": "deployment", + "model_name": "Deployment", + } + type_name = "deployment" + model_name = "Deployment" + + model_uuid = str(uuid.uuid4()) + # Mock the background task + fly_api_token = "some-token" + fastagency_deployment_uuid = "some-uuid" + github_token = "some-github-token" + app_name = "test fastagency template" + repo_name = "test-fastagency-template" + fly_app_name = "test-fastagency-template" + saas_app = SaasAppGenerator( + fly_api_token, + fastagency_deployment_uuid, + github_token, + app_name, + repo_name, + fly_app_name, + ) + saas_app.gh_repo_url = "https://some-git-url" + with ( + patch( + "fastagency_studio.helpers.validate_tokens_and_create_gh_repo", + return_value=saas_app, + ) as mock_task, + patch("fastagency_studio.helpers.deploy_saas_app"), + ): + response = client.post( + f"/user/{user_uuid}/models/{type_name}/{model_name}/{model_uuid}", + json=model, + ) + mock_task.assert_called_once() + + assert response.status_code == 200 + # Update deployment + new_gh_token_uuid = str(uuid.uuid4()) + model = { + "name": unique_name, + "repo_name": "repo_name", + "fly_app_name": "Test", + "team": {"uuid": team_uuid, "type": "team", "name": "TwoAgentTeam"}, + "gh_token": { + "uuid": new_gh_token_uuid, + "type": "secret", + "name": "GitHubToken", + }, + "fly_token": {"uuid": fly_token_uuid, "type": "secret", "name": "FlyToken"}, + "uuid": deployment_uuid, + "type_name": "deployment", + "model_name": "Deployment", + } + response = client.put( + f"/user/{user_uuid}/models/deployment/Deployment/{model_uuid}", + json=model, + ) + + assert response.status_code == 200 + expected = { + "name": unique_name, + "repo_name": "repo_name", + "fly_app_name": "Test", + "team": { + "type": "team", + "name": "TwoAgentTeam", + "uuid": team_uuid, + }, + "gh_token": { + "type": "secret", + "name": "GitHubToken", + "uuid": new_gh_token_uuid, + }, + "fly_token": { + "type": "secret", + "name": "FlyToken", + "uuid": fly_token_uuid, + }, + } + + actual = response.json() + assert actual == expected + + @pytest.mark.asyncio + async def test_delete_model( + self, user_uuid: str, monkeypatch: pytest.MonkeyPatch + ) -> None: + key_uuid = str(uuid.uuid4()) + unique_name = f"unique_name_{key_uuid}" + azure_oai_api_key = AzureOAIAPIKey(api_key="whatever", name=unique_name) + type_name = "secret" + model_name = "AzureOAIAPIKey" + + # Create model + response = client.post( + f"/user/{user_uuid}/models/{type_name}/{model_name}/{key_uuid}", + json=azure_oai_api_key.model_dump(), + ) + assert response.status_code == 200 + + response = client.delete(f"/user/{user_uuid}/models/secret/{key_uuid}") + + assert response.status_code == 200 + expected = { + "api_key": "whatever", # pragma: allowlist secret + "name": unique_name, + } + actual = response.json() + assert actual == expected + + @pytest.mark.llm + @pytest.mark.asyncio + async def test_chat_with_no_function_calling( + self, user_uuid: str, monkeypatch: pytest.MonkeyPatch + ) -> None: + model_uuid = str(uuid.uuid4()) + model_name = "MultiAgentTeam" + # Mocking the aclient.chat.completions.create function + mock_create = AsyncMock() + monkeypatch.setattr( + "fastagency_studio.app.aclient.chat.completions.create", mock_create + ) + + # Define the mock return value + mock_create.return_value = AsyncMock( + choices=[AsyncMock(message=AsyncMock(tool_calls=None))] + ) + + # Define the request body + request_body = { + "message": [{"role": "user", "content": "Hello"}], + "chat_id": 123, + "user_id": 456, + } + + # Define the expected response + expected_response = { + "team_status": "inprogress", + "team_name": "456_123", + "team_id": 123, + "customer_brief": "Some customer brief", + "conversation_name": "Hello", + } + + response = client.post( + f"/user/{user_uuid}/chat/{model_name}/{model_uuid}", json=request_body + ) + + # Assert the status code and the response body + assert response.status_code == 200 + assert response.json() == expected_response + + # Assert the mock was called with the correct arguments + mock_create.assert_called_once() + + @pytest.mark.llm + @pytest.mark.asyncio + async def test_chat_error( + self, user_uuid: str, monkeypatch: pytest.MonkeyPatch + ) -> None: + model_uuid = str(uuid.uuid4()) + model_name = "MultiAgentTeam" + + mock_create = AsyncMock() + monkeypatch.setattr( + "fastagency_studio.app.aclient.chat.completions.create", mock_create + ) + mock_create.side_effect = Exception("Error creating chat completion") + + # Define the request body + request_body = { + "message": [{"role": "user", "content": "Hello"}], + "chat_id": 123, + "user_id": 456, + } + + # Define the expected response + expected_response = { + "team_status": "inprogress", + "team_name": "456_123", + "team_id": 123, + "customer_brief": "Some customer brief", + "conversation_name": "Hello", + } + + response = client.post( + f"/user/{user_uuid}/chat/{model_name}/{model_uuid}", json=request_body + ) + + # Assert the status code and the response body + assert response.status_code == 200 + assert response.json() == expected_response + + # Assert the mock was called with the correct arguments + mock_create.assert_called_once() + + @pytest.mark.llm + @pytest.mark.asyncio + async def test_chat_with_function_calling( + self, user_uuid: str, monkeypatch: pytest.MonkeyPatch + ) -> None: + model_uuid = str(uuid.uuid4()) + model_name = "MultiAgentTeam" + + mock_create = AsyncMock() + monkeypatch.setattr( + "fastagency_studio.app.aclient.chat.completions.create", mock_create + ) + + function = Function( + arguments='{\n "chat_name": "Calculate 2 * 2"\n}', + name="generate_chat_name", + ) + tool_call = ChatCompletionMessageToolCall( + id="1", function=function, type="function" + ) + message = ChatCompletionMessage( + content=None, role="assistant", function_call=None, tool_calls=[tool_call] + ) + choice = Choice(message=message) + chat_completion = MockChatCompletion(id="1", choices=[choice]) + + mock_create.return_value = chat_completion + + # Define the request body + request_body = { + "message": [{"role": "user", "content": "Hello"}], + "chat_id": 123, + "user_id": 456, + } + + # Define the expected response + expected_response = { + "team_status": "inprogress", + "team_name": "456_123", + "team_id": 123, + "customer_brief": "Some customer brief", + "conversation_name": "Calculate 2 * 2", + } + + response = client.post( + f"/user/{user_uuid}/chat/{model_name}/{model_uuid}", json=request_body + ) + + # Assert the status code and the response body + assert response.status_code == 200 + assert response.json() == expected_response + + # Assert the mock was called with the correct arguments + mock_create.assert_called_once() + + @pytest.mark.asyncio + async def test_ping(self) -> None: + deployment_uuid = str(uuid.uuid4()) + response = client.get(f"/deployment/{deployment_uuid}/ping") + assert response.status_code == 200 + assert response.json() == {"status": "ok"} diff --git a/tests/app/test_openai_extensively.py b/tests/app/test_openai_extensively.py new file mode 100644 index 00000000..374b74de --- /dev/null +++ b/tests/app/test_openai_extensively.py @@ -0,0 +1,356 @@ +import json +import uuid +from typing import Any + +import pytest +from fastapi import BackgroundTasks, HTTPException +from fastapi.testclient import TestClient + +from fastagency_studio.app import add_model, app, validate_toolbox +from fastagency_studio.models.llms.openai import OpenAI, OpenAIAPIKey +from fastagency_studio.models.registry import Schemas +from fastagency_studio.models.toolboxes.toolbox import OpenAPIAuth, Toolbox + +client = TestClient(app) + + +class TestValidateOpenAIKey: + @pytest.fixture + def model_dict(self) -> dict[str, Any]: + model = OpenAIAPIKey( + api_key="sk-sUeBP9asw6GiYHXqtg70T3BlbkFJJuLwJFco90bOpU0Ntest", # pragma: allowlist secret + name="Hello World!", + ) + + return json.loads(model.model_dump_json()) # type: ignore[no-any-return] + + def test_validate_success(self, model_dict: dict[str, Any]) -> None: + response = client.post( + "/models/secret/OpenAIAPIKey/validate", + json=model_dict, + ) + assert response.status_code == 200 + + def test_validate_incorrect_api_key(self, model_dict: dict[str, Any]) -> None: + model_dict["api_key"] = "whatever" # pragma: allowlist secret + + response = client.post( + "/models/secret/OpenAIAPIKey/validate", + json=model_dict, + ) + assert response.status_code == 422 + msg_dict = response.json()["detail"][0] + msg_dict.pop("input") + msg_dict.pop("url") + expected = { + "type": "value_error", + "loc": ["api_key"], + "msg": "Value error, Invalid OpenAI API Key", + "ctx": {"error": "Invalid OpenAI API Key"}, + } + assert msg_dict == expected + + @pytest.mark.db + @pytest.mark.asyncio + async def test_validate_secret_model( + self, + model_dict: dict[str, Any], + user_uuid: str, + ) -> None: + api_key = OpenAIAPIKey(**model_dict) + api_key_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="secret", + model_name=OpenAIAPIKey.__name__, # type: ignore [attr-defined] + model_uuid=api_key_model_uuid, + model=api_key.model_dump(), + background_tasks=BackgroundTasks(), + ) + + # Pass only name in the request, this should only update the name and retain the existing api_key + model_dict_with_updated_name = {"name": "Hello World! Updated"} + + response = client.post( + f"/user/{user_uuid}/models/secret/OpenAIAPIKey/{api_key_model_uuid}/validate", + json=model_dict_with_updated_name, + ) + assert response.status_code == 200 + + expected = {"name": "Hello World! Updated", "api_key": model_dict["api_key"]} + assert response.json() == expected + + # Pass both name and api_key in the request, this should update both name and api_key + model_dict_with_updated_name_and_api_key = { + "name": "Hello World! Updated Again", + "api_key": "sk-proj-SomeLengthStringWhichCanHave-and_inItAndTheLengthCanBeChangedAtAnyTime", # pragma: allowlist secret + } + response = client.post( + f"/user/{user_uuid}/models/secret/OpenAIAPIKey/{api_key_model_uuid}/validate", + json=model_dict_with_updated_name_and_api_key, + ) + assert response.status_code == 200 + assert response.json() == model_dict_with_updated_name_and_api_key + + +# we will do this for OpenAI only, the rest should be the same +class TestValidateOpenAI: + @pytest.fixture + def model_dict(self) -> dict[str, Any]: + key_uuid = uuid.uuid4() + OpenAIAPIKeyRef = OpenAIAPIKey.get_reference_model() # noqa: N806 + api_key = OpenAIAPIKeyRef(uuid=key_uuid) + + model = OpenAI(api_key=api_key, name="Hello World!") + + return json.loads(model.model_dump_json()) # type: ignore[no-any-return] + + def test_get_openai_schema(self) -> None: + response = client.get("/models/schemas") + assert response.status_code == 200 + + schemas = Schemas(**response.json()) + llm_schema = next( + schemas for schemas in schemas.list_of_schemas if schemas.name == "llm" + ) + + openai_schema = next( + schema for schema in llm_schema.schemas if schema.name == OpenAI.__name__ + ) + + assert len(openai_schema.json_schema) > 0 + + def test_validate_success(self, model_dict: dict[str, Any]) -> None: + response = client.post( + "/models/llm/OpenAI/validate", + json=model_dict, + ) + assert response.status_code == 200 + + def test_validate_missing_key(self, model_dict: dict[str, Any]) -> None: + model_dict.pop("api_key") + + response = client.post( + "/models/llm/OpenAI/validate", + json=model_dict, + ) + assert response.status_code == 422 + msg_dict = response.json()["detail"][0] + msg_dict.pop("input") + msg_dict.pop("url") + expected = { + "type": "missing", + "loc": ["api_key"], + "msg": "Field required", + } + assert msg_dict == expected + + def test_validate_incorrect_model(self, model_dict: dict[str, Any]) -> None: + model_dict["model"] = model_dict["model"] + "_turbo_diezel" + + response = client.post( + "/models/llm/OpenAI/validate", + json=model_dict, + ) + assert response.status_code == 422 + msg_dict = response.json()["detail"][0] + msg_dict.pop("input") + msg_dict.pop("url") + expected = { + "type": "literal_error", + "loc": ["model"], + "msg": "Input should be 'gpt-4o-2024-08-06', 'gpt-4-1106-preview', 'gpt-4-0613', 'gpt-4', 'chatgpt-4o-latest', 'gpt-4-turbo-preview', 'gpt-4-0125-preview', 'gpt-3.5-turbo', 'gpt-3.5-turbo-1106', 'gpt-4o-mini-2024-07-18', 'gpt-3.5-turbo-0125', 'gpt-4o-mini', 'gpt-3.5-turbo-16k', 'gpt-4-turbo-2024-04-09', 'gpt-3.5-turbo-instruct-0914', 'gpt-3.5-turbo-instruct', 'gpt-4o', 'gpt-4o-2024-05-13' or 'gpt-4-turbo'", + "ctx": { + "expected": "'gpt-4o-2024-08-06', 'gpt-4-1106-preview', 'gpt-4-0613', 'gpt-4', 'chatgpt-4o-latest', 'gpt-4-turbo-preview', 'gpt-4-0125-preview', 'gpt-3.5-turbo', 'gpt-3.5-turbo-1106', 'gpt-4o-mini-2024-07-18', 'gpt-3.5-turbo-0125', 'gpt-4o-mini', 'gpt-3.5-turbo-16k', 'gpt-4-turbo-2024-04-09', 'gpt-3.5-turbo-instruct-0914', 'gpt-3.5-turbo-instruct', 'gpt-4o', 'gpt-4o-2024-05-13' or 'gpt-4-turbo'" + }, + } + # print(f"{msg_dict=}") + assert msg_dict == expected + + def test_validate_incorrect_base_url(self, model_dict: dict[str, Any]) -> None: + model_dict["base_url"] = "mailto://api.openai.com/v1" + + response = client.post( + "/models/llm/OpenAI/validate", + json=model_dict, + ) + assert response.status_code == 422 + msg_dict = response.json()["detail"][0] + msg_dict.pop("input") + msg_dict.pop("url") + expected = { + "ctx": {"expected_schemes": "'http' or 'https'"}, + "loc": ["base_url"], + "msg": "URL scheme should be 'http' or 'https'", + "type": "url_scheme", + } + assert msg_dict == expected + + +def test_get_schemas() -> None: + response = client.get("/models/schemas") + assert response.status_code == 200 + + schemas = Schemas(**response.json()) + assert len(schemas.list_of_schemas) >= 2 + + +class TestToolbox: + @pytest.mark.db + @pytest.mark.asyncio + async def test_add_toolbox(self, user_uuid: str, fastapi_openapi_url: str) -> None: + openapi_auth = OpenAPIAuth( + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + openapi_auth_model_uuid = str(uuid.uuid4()) + response = client.post( + f"/user/{user_uuid}/models/secret/OpenAPIAuth/{openapi_auth_model_uuid}", + json=openapi_auth.model_dump(), + ) + assert response.status_code == 200 + + model_uuid = str(uuid.uuid4()) + toolbox = Toolbox( + name="test_toolbox_constructor", + openapi_url=fastapi_openapi_url, + openapi_auth=openapi_auth.get_reference_model()( + uuid=openapi_auth_model_uuid + ), + ) + toolbox_dump = toolbox.model_dump() + toolbox_dump["openapi_auth"]["uuid"] = str(toolbox_dump["openapi_auth"]["uuid"]) + + response = client.post( + f"/user/{user_uuid}/models/toolbox/Toolbox/{model_uuid}", + json=toolbox_dump, + ) + + assert response.status_code == 200 + expected = { + "name": "test_toolbox_constructor", + "openapi_url": fastapi_openapi_url, + "openapi_auth": { + "type": "secret", + "name": "OpenAPIAuth", + "uuid": str(openapi_auth_model_uuid), + }, + } + actual = response.json() + assert actual == expected + + @pytest.mark.asyncio + async def test_validate_toolbox(self, fastapi_openapi_url: str) -> None: + openapi_auth = OpenAPIAuth( + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + openapi_auth_model_uuid = str(uuid.uuid4()) + + toolbox = Toolbox( + name="test_toolbox_constructor", + openapi_url=fastapi_openapi_url, + openapi_auth=openapi_auth.get_reference_model()( + uuid=openapi_auth_model_uuid + ), + ) + + await validate_toolbox(toolbox) + + @pytest.mark.asyncio + async def test_validate_toolbox_route(self, fastapi_openapi_url: str) -> None: + openapi_auth = OpenAPIAuth( + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + openapi_auth_model_uuid = str(uuid.uuid4()) + + toolbox = Toolbox( + name="test_toolbox_constructor", + openapi_url=fastapi_openapi_url, + openapi_auth=openapi_auth.get_reference_model()( + uuid=openapi_auth_model_uuid + ), + ) + toolbox_dump = toolbox.model_dump() + toolbox_dump["openapi_auth"]["uuid"] = str(toolbox_dump["openapi_auth"]["uuid"]) + + response = client.post( + "/models/toolbox/Toolbox/validate", + json=toolbox_dump, + ) + assert response.status_code == 200 + + @pytest.mark.asyncio + async def test_validate_toolbox_with_404_url(self) -> None: + invalid_url = "http://i.dont.exist.airt.ai/openapi.json" + + openapi_auth = OpenAPIAuth( + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + openapi_auth_model_uuid = str(uuid.uuid4()) + + toolbox = Toolbox( + name="test_toolbox_constructor", + openapi_url=invalid_url, + openapi_auth=openapi_auth.get_reference_model()( + uuid=openapi_auth_model_uuid + ), + ) + + with pytest.raises(HTTPException) as e: + await validate_toolbox(toolbox) + + assert e.value.status_code == 422 + assert e.value.detail == "OpenAPI URL is invalid" + + @pytest.mark.asyncio + async def test_validate_toolbox_with_invalid_openapi_spec(self) -> None: + invalid_url = "http://echo.jsontest.com/key/value/one/two" + + openapi_auth = OpenAPIAuth( + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + openapi_auth_model_uuid = str(uuid.uuid4()) + + toolbox = Toolbox( + name="test_toolbox_constructor", + openapi_url=invalid_url, + openapi_auth=openapi_auth.get_reference_model()( + uuid=openapi_auth_model_uuid + ), + ) + + with pytest.raises(HTTPException) as e: + await validate_toolbox(toolbox) + + assert e.value.status_code == 422 + assert e.value.detail == "OpenAPI URL does not contain a valid OpenAPI spec" + + @pytest.mark.asyncio + async def test_validate_toolbox_with_yaml_openapi_spec(self) -> None: + invalid_url = "https://raw.githubusercontent.com/OAI/OpenAPI-Specification/main/examples/v3.0/petstore.yaml" + + openapi_auth = OpenAPIAuth( + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + openapi_auth_model_uuid = str(uuid.uuid4()) + + toolbox = Toolbox( + name="test_toolbox_constructor", + openapi_url=invalid_url, + openapi_auth=openapi_auth.get_reference_model()( + uuid=openapi_auth_model_uuid + ), + ) + + await validate_toolbox(toolbox) diff --git a/fastagency_studio/studio/io/__init__.py b/tests/auth_token/__init__.py similarity index 100% rename from fastagency_studio/studio/io/__init__.py rename to tests/auth_token/__init__.py diff --git a/tests/auth_token/test_auth_token.py b/tests/auth_token/test_auth_token.py new file mode 100644 index 00000000..b82a6086 --- /dev/null +++ b/tests/auth_token/test_auth_token.py @@ -0,0 +1,232 @@ +import uuid +from datetime import datetime +from typing import Any, Union +from uuid import UUID + +import pytest +from fastapi import HTTPException +from fastapi.testclient import TestClient + +import fastagency_studio.app +import fastagency_studio.auth_token.auth +import fastagency_studio.db +import fastagency_studio.db.inmemory +import fastagency_studio.db.prisma +from fastagency_studio.app import app +from fastagency_studio.auth_token.auth import ( + create_deployment_auth_token, + generate_auth_token, + hash_auth_token, + parse_expiry, + verify_auth_token, +) + +client = TestClient(app) + + +def test_generate_auth_token() -> None: + token = generate_auth_token() + assert isinstance(token, str) + assert len(token) == 32 + + +def test_hash_auth_token() -> None: + token = generate_auth_token() + hashed_token = hash_auth_token(token) + assert isinstance(hashed_token, str) + assert len(hashed_token) == 97 + assert ":" in hashed_token + + +def test_verify_auth_token() -> None: + token = generate_auth_token() + hashed_token = hash_auth_token(token) + assert verify_auth_token(token, hashed_token) + assert not verify_auth_token(token, "wrong_hash") + assert not verify_auth_token("wrong_token", hashed_token) + assert not verify_auth_token("wrong_token", "wrong_hash") + + +@pytest.mark.asyncio +async def test_parse_expiry() -> None: + expiry = await parse_expiry("1d") + assert expiry is not None + assert isinstance(expiry, datetime) + assert expiry > datetime.utcnow() + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expiry_str, expected", # noqa: PT006 + [ + ("1", "Invalid expiry format - 1; expected format: d"), + ("1h", "Invalid expiry format - 1h; expected format: d"), + ("1w", "Invalid expiry format - 1w; expected format: d"), + ("1m", "Invalid expiry format - 1m; expected format: d"), + ("1y", "Invalid expiry format - 1y; expected format: d"), + ("0d", "Expiry date cannot be in the past"), + ("-1d", "Invalid expiry format - -1d; expected format: d"), + ], +) +async def test_parse_expiry_with_invalid_expiry(expiry_str: str, expected: str) -> None: + with pytest.raises(HTTPException) as e: + await parse_expiry(expiry_str) + assert e.value.status_code == 400 + assert e.value.detail == expected + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_create_deployment_token( + user_uuid: str, monkeypatch: pytest.MonkeyPatch +) -> None: + deployment_uuid = uuid.uuid4() + + async def mock_find_model(*args: Any, **kwargs: Any) -> dict[str, Union[str, UUID]]: + return { + "user_uuid": user_uuid, + "uuid": deployment_uuid, + } + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + + token = await create_deployment_auth_token(user_uuid, deployment_uuid) + assert isinstance(token.auth_token, str) + assert len(token.auth_token) == 32, token.auth_token + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_create_deployment_token_with_wrong_user_uuid( + user_uuid: str, monkeypatch: pytest.MonkeyPatch +) -> None: + deployment_uuid = uuid.uuid4() + + async def mock_find_model(*args: Any, **kwargs: Any) -> dict[str, Union[str, UUID]]: + return { + "user_uuid": "random_wrong_uuid", + "uuid": deployment_uuid, + } + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + + with pytest.raises(HTTPException) as e: + await create_deployment_auth_token(user_uuid, deployment_uuid) + + assert e.value.status_code == 403 + assert e.value.detail == "User does not have access to this deployment" + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_create_deployment_auth_token_route( + user_uuid: str, monkeypatch: pytest.MonkeyPatch +) -> None: + deployment_uuid = uuid.uuid4() + + async def mock_find_model(*args: Any, **kwargs: Any) -> dict[str, Union[str, UUID]]: + return { + "user_uuid": user_uuid, + "uuid": deployment_uuid, + } + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + + response = client.post( + f"/user/{user_uuid}/deployment/{deployment_uuid}", + json={"name": "Test token", "expiry": "99d"}, + ) + assert response.status_code == 200 + assert "auth_token" in response.json() + assert response.json()["auth_token"] is not None + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_get_all_deployment_auth_tokens( + user_uuid: str, monkeypatch: pytest.MonkeyPatch +) -> None: + deployment_uuid = uuid.uuid4() + + async def mock_find_model(*args: Any, **kwargs: Any) -> dict[str, Union[str, UUID]]: + return { + "user_uuid": user_uuid, + "uuid": deployment_uuid, + } + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + + response = client.post( + f"/user/{user_uuid}/deployment/{deployment_uuid}", + json={"name": "Test token", "expiry": "99d"}, + ) + assert response.status_code == 200 + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + response = client.get(f"/user/{user_uuid}/deployment/{deployment_uuid}") + assert response.status_code == 200 + response_json = response.json() + assert len(response_json) == 1 + assert "uuid" in response_json[0] + assert response_json[0]["name"] == "Test token" + assert response_json[0]["expiry"] == "99d" + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_delete_deployment_auth_token( + user_uuid: str, monkeypatch: pytest.MonkeyPatch +) -> None: + deployment_uuid = uuid.uuid4() + + async def mock_find_model(*args: Any, **kwargs: Any) -> dict[str, Union[str, UUID]]: + return { + "user_uuid": user_uuid, + "uuid": deployment_uuid, + } + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + + response = client.post( + f"/user/{user_uuid}/deployment/{deployment_uuid}", + json={"name": "Test token", "expiry": "99d"}, + ) + assert response.status_code == 200 + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + response = client.get(f"/user/{user_uuid}/deployment/{deployment_uuid}") + assert len(response.json()) == 1 + auth_token_uuid = str(response.json()[0]["uuid"]) + + response = client.delete( + url=f"/user/{user_uuid}/deployment/{deployment_uuid}/{auth_token_uuid}" + ) + assert response.status_code == 200 diff --git a/tests/conftest.py b/tests/conftest.py index e69de29b..05c1d627 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -0,0 +1,819 @@ +import contextlib +import os +import random +import socket +import threading +import time +import uuid +from collections.abc import AsyncGenerator, AsyncIterator, Iterator +from platform import system +from typing import ( + Annotated, + Any, + Callable, + Optional, + TypeVar, +) +from unittest.mock import MagicMock + +import fastapi +import openai +import pytest +import pytest_asyncio +import uvicorn +from fastagency.runtime.autogen.tools.web_surfer import WebSurferTool +from fastapi import FastAPI, Path +from pydantic import BaseModel +from pydantic import __version__ as version_of_pydantic + +from fastagency_studio.db.base import DefaultDB +from fastagency_studio.db.inmemory import InMemoryBackendDB, InMemoryFrontendDB +from fastagency_studio.helpers import create_autogen, create_model_ref, get_model_by_ref +from fastagency_studio.models.agents.assistant import AssistantAgent +from fastagency_studio.models.agents.user_proxy import UserProxyAgent +from fastagency_studio.models.agents.web_surfer import BingAPIKey, WebSurferAgent +from fastagency_studio.models.base import ObjectReference +from fastagency_studio.models.llms.anthropic import Anthropic, AnthropicAPIKey +from fastagency_studio.models.llms.azure import AzureOAI, AzureOAIAPIKey +from fastagency_studio.models.llms.openai import OpenAI, OpenAIAPIKey +from fastagency_studio.models.llms.together import TogetherAI, TogetherAIAPIKey +from fastagency_studio.models.teams.two_agent_teams import TwoAgentTeam +from fastagency_studio.models.toolboxes.toolbox import OpenAPIAuth, Toolbox + +from .helpers import add_random_suffix, expand_fixture, get_by_tag, tag, tag_list + +F = TypeVar("F", bound=Callable[..., Any]) + + +@pytest_asyncio.fixture(scope="session", autouse=True) # type: ignore[misc] +async def set_default_db() -> AsyncGenerator[None, None]: + backend_db = InMemoryBackendDB() + frontend_db = InMemoryFrontendDB() + + with ( + DefaultDB.set(backend_db=backend_db, frontend_db=frontend_db), + ): + yield + + +@pytest_asyncio.fixture(scope="session") # type: ignore[misc] +async def user_uuid() -> AsyncIterator[str]: + try: + random_id = random.randint(1, 1_000_000) + generated_uuid = uuid.uuid4() + email = f"user{random_id}@airt.ai" + username = f"user{random_id}" + + await DefaultDB.frontend()._create_user( + user_uuid=generated_uuid, email=email, username=username + ) + user = await DefaultDB.frontend().get_user(user_uuid=generated_uuid) + + yield user["uuid"] + finally: + pass + + +################################################################################ +### +# Fixtures for LLMs +### +################################################################################ + + +def azure_model_llm_config(model_env_name: str) -> dict[str, Any]: + api_key = os.getenv("AZURE_OPENAI_API_KEY", default="*" * 64) + api_base = os.getenv( + "AZURE_API_ENDPOINT", default="https://my-deployment.openai.azure.com" + ) + + def get_default_model_name(model_env_name: str) -> str: + if model_env_name == "AZURE_GPT35_MODEL": + return "gpt-35-turbo-16k" + elif model_env_name == "AZURE_GPT4_MODEL": + return "gpt-4" + elif model_env_name == "AZURE_GPT4o_MODEL": + return "gpt-4o" + else: + raise ValueError(f"Unknown model_env_name: {model_env_name}") + + default_model_env_name = get_default_model_name(model_env_name) + gpt_model_name = os.getenv(model_env_name, default=default_model_env_name) + + openai.api_type = "azure" + openai.api_version = os.getenv("AZURE_API_VERSION", default="2024-02-01") + + config_list = [ + { + "model": gpt_model_name, + "api_key": api_key, + "base_url": api_base, + "api_type": openai.api_type, + "api_version": openai.api_version, + } + ] + + llm_config = { + "config_list": config_list, + "temperature": 0.0, + } + + return llm_config + + +@tag("llm_config") +@pytest.fixture +def azure_gpt35_turbo_16k_llm_config() -> dict[str, Any]: + return azure_model_llm_config("AZURE_GPT35_MODEL") + + +@tag("llm_config") +@pytest.fixture +def azure_gpt4_llm_config() -> dict[str, Any]: + return azure_model_llm_config("AZURE_GPT4_MODEL") + + +@tag("llm_config") +@pytest.fixture +def azure_gpt4o_llm_config() -> dict[str, Any]: + return azure_model_llm_config("AZURE_GPT4o_MODEL") + + +def openai_llm_config(model: str) -> dict[str, Any]: + zeros = "0" * 20 + api_key = os.getenv("OPENAI_API_KEY", default=f"sk-{zeros}T3BlbkFJ{zeros}") + + config_list = [ + { + "model": model, + "api_key": api_key, + } + ] + + llm_config = { + "config_list": config_list, + "temperature": 0.0, + } + + return llm_config + + +@tag("llm_config") +@pytest.fixture +def openai_gpt35_turbo_16k_llm_config() -> dict[str, Any]: + return openai_llm_config("gpt-3.5-turbo") + + +@tag("llm_config") +@pytest.fixture +def openai_gpt4o_llm_config() -> dict[str, Any]: + return openai_llm_config("gpt-4o") + + +@tag("llm_config") +@pytest.fixture +def openai_gpt4o_mini_llm_config() -> dict[str, Any]: + return openai_llm_config("gpt-4o-mini") + + +# @tag("llm_config") +# @pytest.fixture() +# def openai_gpt4_llm_config() -> Dict[str, Any]: +# return openai_llm_config("gpt-4") + + +@tag("llm-key") +@pytest_asyncio.fixture() +async def azure_oai_key_ref( + user_uuid: str, azure_gpt35_turbo_16k_llm_config: dict[str, Any] +) -> ObjectReference: + api_key = azure_gpt35_turbo_16k_llm_config["config_list"][0]["api_key"] + return await create_model_ref( + AzureOAIAPIKey, + "secret", + user_uuid=user_uuid, + name=add_random_suffix("azure_oai_key"), + api_key=api_key, + ) + + +@tag("llm", "noapi", "weather-llm") +@pytest_asyncio.fixture() +async def azure_oai_gpt35_ref( + user_uuid: str, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + azure_oai_key_ref: ObjectReference, +) -> ObjectReference: + kwargs = azure_gpt35_turbo_16k_llm_config["config_list"][0].copy() + kwargs.pop("api_key") + temperature = azure_gpt35_turbo_16k_llm_config["temperature"] + return await create_model_ref( + AzureOAI, + "llm", + user_uuid=user_uuid, + name=add_random_suffix("azure_oai"), + api_key=azure_oai_key_ref, + temperature=temperature, + **kwargs, + ) + + +@tag("llm") +@pytest_asyncio.fixture() +async def azure_oai_gpt4_ref( + user_uuid: str, + azure_gpt4_llm_config: dict[str, Any], + azure_oai_key_ref: ObjectReference, +) -> ObjectReference: + kwargs = azure_gpt4_llm_config["config_list"][0].copy() + kwargs.pop("api_key") + temperature = azure_gpt4_llm_config["temperature"] + return await create_model_ref( + AzureOAI, + "llm", + user_uuid=user_uuid, + name=add_random_suffix("azure_oai"), + api_key=azure_oai_key_ref, + temperature=temperature, + **kwargs, + ) + + +@tag("llm", "websurfer-llm") +@pytest_asyncio.fixture() +async def azure_oai_gpt4o_ref( + user_uuid: str, + azure_gpt4o_llm_config: dict[str, Any], + azure_oai_key_ref: ObjectReference, +) -> ObjectReference: + kwargs = azure_gpt4o_llm_config["config_list"][0].copy() + kwargs.pop("api_key") + temperature = azure_gpt4o_llm_config["temperature"] + return await create_model_ref( + AzureOAI, + "llm", + user_uuid=user_uuid, + name=add_random_suffix("azure_oai"), + api_key=azure_oai_key_ref, + temperature=temperature, + **kwargs, + ) + + +async def openai_oai_key_ref( + user_uuid: str, openai_llm_config: dict[str, Any] +) -> ObjectReference: + api_key = openai_llm_config["config_list"][0]["api_key"] + model = openai_llm_config["config_list"][0]["model"] + return await create_model_ref( + OpenAIAPIKey, + "secret", + user_uuid=user_uuid, + name=add_random_suffix("openai_oai_key"), + api_key=api_key, + model=model, + ) + + +@tag("llm-key") +@pytest_asyncio.fixture() +async def openai_oai_key_gpt35_ref( + user_uuid: str, openai_gpt35_turbo_16k_llm_config: dict[str, Any] +) -> ObjectReference: + return await openai_oai_key_ref(user_uuid, openai_gpt35_turbo_16k_llm_config) + + +# @tag("llm-key") +# @pytest_asyncio.fixture() +# async def openai_oai_key_gpt4_ref( +# user_uuid: str, openai_gpt4_llm_config: Dict[str, Any] +# ) -> ObjectReference: +# return await openai_oai_key_ref(user_uuid, openai_gpt4_llm_config) + + +async def openai_oai_ref( + user_uuid: str, + openai_llm_config: dict[str, Any], + openai_oai_key_ref: ObjectReference, +) -> ObjectReference: + kwargs = openai_llm_config["config_list"][0].copy() + kwargs.pop("api_key") + temperature = openai_llm_config["temperature"] + return await create_model_ref( + OpenAI, + "llm", + user_uuid=user_uuid, + name=add_random_suffix("azure_oai"), + api_key=openai_oai_key_ref, + temperature=temperature, + **kwargs, + ) + + +@tag("llm", "noapi", "weather-llm", "openai-llm") +@pytest_asyncio.fixture() +async def openai_oai_gpt35_ref( + user_uuid: str, + openai_gpt35_turbo_16k_llm_config: dict[str, Any], + openai_oai_key_gpt35_ref: ObjectReference, +) -> ObjectReference: + return await openai_oai_ref( + user_uuid, openai_gpt35_turbo_16k_llm_config, openai_oai_key_gpt35_ref + ) + + +# @tag("openai-llm") +# @pytest_asyncio.fixture() +# async def openai_oai_gpt4_ref( +# user_uuid: str, +# openai_gpt4_llm_config: Dict[str, Any], +# openai_oai_key_gpt4_ref: ObjectReference, +# ) -> ObjectReference: +# return await openai_oai_ref( +# user_uuid, openai_gpt4_llm_config, openai_oai_key_gpt4_ref +# ) + + +@tag("llm-key") +@pytest_asyncio.fixture() +async def anthropic_key_ref(user_uuid: str) -> ObjectReference: + api_key = os.getenv( + "ANTHROPIC_API_KEY", + default="sk-ant-api03-" + "_" * 95, + ) + + return await create_model_ref( + AnthropicAPIKey, + "secret", + user_uuid=user_uuid, + name=add_random_suffix("anthropic_api_key"), + api_key=api_key, + ) + + +@tag("llm", "weather-llm") +@pytest_asyncio.fixture() +async def anthropic_ref( + user_uuid: str, + anthropic_key_ref: ObjectReference, +) -> ObjectReference: + return await create_model_ref( + Anthropic, + "llm", + user_uuid=user_uuid, + name=add_random_suffix("anthropic_api"), + api_key=anthropic_key_ref, + temperature=0.0, + ) + + +@tag("llm-key") +@pytest_asyncio.fixture() +async def together_ai_key_ref(user_uuid: str) -> ObjectReference: + api_key = os.getenv( + "TOGETHER_API_KEY", + default="*" * 64, + ) + + return await create_model_ref( + TogetherAIAPIKey, + "secret", + user_uuid=user_uuid, + name=add_random_suffix("togetherai_api_key"), + api_key=api_key, + ) + + +@tag("llm", "noapi") +@pytest_asyncio.fixture() +async def togetherai_ref( + user_uuid: str, + together_ai_key_ref: ObjectReference, +) -> ObjectReference: + return await create_model_ref( + TogetherAI, + "llm", + user_uuid=user_uuid, + name=add_random_suffix("togetherai"), + api_key=together_ai_key_ref, + model="Mixtral-8x7B Instruct v0.1", + temperature=0.0, + ) + + +################################################################################ +### +# Fixtures for Toolkit +### +################################################################################ + + +class Item(BaseModel): + name: str + description: Optional[str] = None + price: float + tax: Optional[float] = None + + +def create_fastapi_app(host: str, port: int) -> FastAPI: + app = FastAPI( + servers=[ + {"url": f"http://{host}:{port}", "description": "Local development server"} + ] + ) + + @app.get("/") + def read_root() -> dict[str, str]: + return {"Hello": "World"} + + @app.get("/items/{item_id}") + def read_item(item_id: int, q: Optional[str] = None) -> dict[str, Any]: + return {"item_id": item_id, "q": q} + + @app.post("/items") + async def create_item(item: Item) -> Item: + return item + + return app + + +def create_weather_fastapi_app(host: str, port: int) -> FastAPI: + app = FastAPI( + title="Weather", + servers=[ + {"url": f"http://{host}:{port}", "description": "Local development server"} + ], + ) + + @app.get("/forecast/{city}", description="Get the weather forecast for a city") + def forecast( + city: Annotated[str, Path(description="name of the city")], + ) -> str: + return f"Weather in {city} is sunny" + + return app + + +def create_gify_fastapi_app(host: str, port: int) -> FastAPI: + class Gif(BaseModel): + id: int + title: str + url: str + + app = FastAPI( + title="Gify", + servers=[ + {"url": f"http://{host}:{port}", "description": "Local development server"} + ], + ) + + @app.get("/gifs", response_model=list[Gif], tags=["gifs"]) + # TODO: API is failing if Query alias contains uppercase letters e.g. alias="Topic" + def get_gifs_for_topic(topic: str = fastapi.Query(..., alias="topic")) -> list[Gif]: + """Get GIFs for a topic.""" + return [ + Gif(id=1, title="Gif 1", url=f"https://gif.example.com/gif1?topic={topic}"), + Gif(id=2, title="Gif 2", url=f"https://gif.example.com/gif2?topic={topic}"), + ] + + @app.get("/gifs/{gifId}", response_model=Gif, tags=["gifs"]) + def get_gif_by_id(gif_id: int = fastapi.Path(..., alias="gifId")) -> Gif: + """Get GIF by Id.""" + return Gif(id=gif_id, title="Gif 1", url="https://gif.example.com/gif1") + + return app + + +def find_free_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) + return s.getsockname()[1] # type: ignore [no-any-return] + + +def run_server(app: FastAPI, host: str = "127.0.0.1", port: int = 8000) -> None: + uvicorn.run(app, host=host, port=port) + + +class Server(uvicorn.Server): # type: ignore [misc] + def install_signal_handlers(self) -> None: + pass + + @contextlib.contextmanager + def run_in_thread(self) -> Iterator[None]: + thread = threading.Thread(target=self.run) + thread.start() + try: + while not self.started: + time.sleep(1e-3) + yield + finally: + self.should_exit = True + thread.join() + + +@pytest.fixture(scope="session") +def fastapi_openapi_url() -> Iterator[str]: + host = "127.0.0.1" + port = find_free_port() + app = create_fastapi_app(host, port) + openapi_url = f"http://{host}:{port}/openapi.json" + + config = uvicorn.Config(app, host=host, port=port, log_level="info") + server = Server(config=config) + with server.run_in_thread(): + time.sleep(1 if system() != "Windows" else 5) # let the server start + + yield openapi_url + + +@pytest.fixture(scope="session") +def weather_fastapi_openapi_url() -> Iterator[str]: + host = "127.0.0.1" + port = find_free_port() + app = create_weather_fastapi_app(host, port) + openapi_url = f"http://{host}:{port}/openapi.json" + + config = uvicorn.Config(app, host=host, port=port, log_level="info") + server = Server(config=config) + with server.run_in_thread(): + time.sleep(1 if system() != "Windows" else 5) # let the server start + + yield openapi_url + + +@pytest.fixture(scope="session") +def gify_fastapi_openapi_url() -> Iterator[str]: + host = "127.0.0.1" + port = find_free_port() + app = create_gify_fastapi_app(host, port) + + openapi_url = f"http://{host}:{port}/openapi.json" + config = uvicorn.Config(app, host=host, port=port, log_level="info") + server = Server(config=config) + with server.run_in_thread(): + time.sleep(1 if system() != "Windows" else 5) # let the server start + + yield openapi_url + + +@tag("toolbox", "items") +@pytest_asyncio.fixture() # type: ignore[misc] +async def toolbox_ref(user_uuid: str, fastapi_openapi_url: str) -> ObjectReference: + openapi_auth = await create_model_ref( + OpenAPIAuth, + "secret", + user_uuid, + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + + toolbox = await create_model_ref( + Toolbox, + "toolbox", + user_uuid, + name="test_toolbox", + openapi_url=fastapi_openapi_url, + openapi_auth=openapi_auth, + ) + + return toolbox + + +@tag("toolbox", "weather") +@pytest_asyncio.fixture() # type: ignore[misc] +async def weather_toolbox_ref( + user_uuid: str, weather_fastapi_openapi_url: str +) -> ObjectReference: + openapi_auth = await create_model_ref( + OpenAPIAuth, + "secret", + user_uuid, + name="openapi_auth_secret", + username="test", + password="password", # pragma: allowlist secret + ) + + toolbox = await create_model_ref( + Toolbox, + "toolbox", + user_uuid, + name="test_toolbox", + openapi_url=weather_fastapi_openapi_url, + openapi_auth=openapi_auth, + ) + + return toolbox + + +@pytest.fixture +def pydantic_version() -> float: + return float(".".join(version_of_pydantic.split(".")[:2])) + + +################################################################################ +### +# Fixtures for Agents +### +################################################################################ + + +@tag_list("assistant", "noapi") +@expand_fixture( + dst_fixture_prefix="assistant_noapi", + src_fixtures_names=get_by_tag("llm", "noapi"), + placeholder_name="llm_ref", +) +async def placeholder_assistant_noapi_ref( + user_uuid: str, llm_ref: ObjectReference +) -> ObjectReference: + return await create_model_ref( + AssistantAgent, + "agent", + user_uuid=user_uuid, + name=add_random_suffix("assistant"), + llm=llm_ref, + ) + + +# @pytest_asyncio.fixture() +# async def assistant_noapi_openai_oai_gpt4_ref( +# user_uuid: str, openai_oai_gpt4_ref: ObjectReference +# ) -> ObjectReference: +# return await create_model_ref( +# AssistantAgent, +# "agent", +# user_uuid=user_uuid, +# name=add_random_suffix("assistant"), +# llm=openai_oai_gpt4_ref, +# ) + + +@tag_list("assistant", "weather") +@expand_fixture( + dst_fixture_prefix="assistant_weather", + src_fixtures_names=get_by_tag("weather-llm"), + placeholder_name="llm_ref", +) +async def placeholder_assistant_weatherapi_ref( + user_uuid: str, llm_ref: ObjectReference, weather_toolbox_ref: ObjectReference +) -> ObjectReference: + return await create_model_ref( + AssistantAgent, + "agent", + user_uuid=user_uuid, + name=add_random_suffix("assistant_weather"), + llm=llm_ref, + toolbox_1=weather_toolbox_ref, + system_message="You are a helpful assistant with access to Weather API. After you successfully answer the question asked and there are no new questions, terminate the chat by outputting 'TERMINATE' (in all caps, e.g.'Terminate' will be ignored).", + ) + + +@pytest_asyncio.fixture() +async def bing_api_key_ref(user_uuid: str) -> ObjectReference: + api_key = os.getenv( + "BING_API_KEY", + default="*" * 64, + ) + return await create_model_ref( + BingAPIKey, + "secret", + user_uuid=user_uuid, + name=add_random_suffix("bing_api_key"), + api_key=api_key, + ) + + +@tag_list("websurfer") +@expand_fixture( + dst_fixture_prefix="websurfer", + src_fixtures_names=get_by_tag("websurfer-llm"), + placeholder_name="llm_ref", +) +async def placeholder_websurfer_ref( + user_uuid: str, llm_ref: ObjectReference, bing_api_key_ref: ObjectReference +) -> ObjectReference: + return await create_model_ref( + WebSurferAgent, + "agent", + user_uuid=user_uuid, + name=add_random_suffix("websurfer"), + llm=llm_ref, + summarizer_llm=llm_ref, + bing_api_key=bing_api_key_ref, + ) + + +@tag_list("websurfer-chat") +@expand_fixture( + dst_fixture_prefix="websurfer_chat", + src_fixtures_names=get_by_tag("websurfer"), + placeholder_name="websurfer_ref", +) +async def placeholder_websurfer_chat( + user_uuid: str, websurfer_ref: ObjectReference, bing_api_key_ref: ObjectReference +) -> WebSurferTool: + websurfer_model: WebSurferAgent = await get_model_by_ref(websurfer_ref) # type: ignore [assignment] + llm_config = await create_autogen(websurfer_model.llm, user_uuid) + summarizer_llm_config = await create_autogen( + websurfer_model.summarizer_llm, user_uuid + ) + + bing_api_key = ( + await create_autogen(websurfer_model.bing_api_key, user_uuid) + if websurfer_model.bing_api_key + else None + ) + + viewport_size = websurfer_model.viewport_size + + return WebSurferTool( + name_prefix=websurfer_model.name, + llm_config=llm_config, + summarizer_llm_config=summarizer_llm_config, + viewport_size=viewport_size, + bing_api_key=bing_api_key, + ) + + +@pytest_asyncio.fixture() +async def user_proxy_agent_ref(user_uuid: str) -> ObjectReference: + return await create_model_ref( + UserProxyAgent, + "agent", + user_uuid=user_uuid, + name=add_random_suffix("user_proxy_agent"), + max_consecutive_auto_reply=10, + human_input_mode="NEVER", + ) + + +class InputMock: + def __init__(self, responses: list[str]) -> None: + """Initialize the InputMock.""" + self.responses = responses + self.mock = MagicMock() + + def __call__(self, *args: Any, **kwargs: Any) -> str: + self.mock(*args, **kwargs) + return self.responses.pop(0) + + +################################################################################ +### +# Fixtures for Two Agent Teams +### +################################################################################ + + +@tag_list("team", "noapi") +@expand_fixture( + dst_fixture_prefix="two_agent_team_noapi", + src_fixtures_names=get_by_tag("assistant", "noapi"), + placeholder_name="assistant_ref", +) +async def placeholder_team_noapi_ref( + user_uuid: str, + assistant_ref: ObjectReference, + user_proxy_agent_ref: ObjectReference, +) -> ObjectReference: + return await create_model_ref( + TwoAgentTeam, + "team", + user_uuid=user_uuid, + name=add_random_suffix("two_agent_team_noapi"), + initial_agent=user_proxy_agent_ref, + secondary_agent=assistant_ref, + human_input_mode="NEVER", + ) + + +@tag_list("team", "weather") +@expand_fixture( + dst_fixture_prefix="two_agent_team_weatherapi", + src_fixtures_names=get_by_tag("assistant", "weather"), + placeholder_name="assistant_ref", +) +async def placeholder_team_weatherapi_ref( + user_uuid: str, + assistant_ref: ObjectReference, + user_proxy_agent_ref: ObjectReference, +) -> ObjectReference: + return await create_model_ref( + TwoAgentTeam, + "team", + user_uuid=user_uuid, + name=add_random_suffix("two_agent_team_weather"), + initial_agent=user_proxy_agent_ref, + secondary_agent=assistant_ref, + human_input_mode="NEVER", + ) + + +# FastAPI app for testing + +################################################################################ +### +# Fixtures for application +### +################################################################################ diff --git a/tests/db/__init__.py b/tests/db/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/db/test_inmemory.py b/tests/db/test_inmemory.py new file mode 100644 index 00000000..1aeeb615 --- /dev/null +++ b/tests/db/test_inmemory.py @@ -0,0 +1,185 @@ +import random +import uuid +from datetime import datetime, timedelta +from typing import Any, Union +from uuid import UUID + +import pytest + +import fastagency_studio.db +import fastagency_studio.db.inmemory +from fastagency_studio.db.base import DefaultDB, KeyNotFoundError +from fastagency_studio.db.inmemory import InMemoryBackendDB, InMemoryFrontendDB +from fastagency_studio.models.llms.azure import AzureOAIAPIKey + + +@pytest.mark.asyncio +class TestInMemoryFrontendDB: + async def test_set(self) -> None: + frontend_db = InMemoryFrontendDB() + backend_db = InMemoryBackendDB() + with DefaultDB.set(backend_db=backend_db, frontend_db=frontend_db): + assert DefaultDB._frontend_db == frontend_db + assert DefaultDB._backend_db == backend_db + + async def test_db(self) -> None: + frontend_db = InMemoryFrontendDB() + backend_db = InMemoryBackendDB() + with DefaultDB.set(backend_db=backend_db, frontend_db=frontend_db): + assert DefaultDB.frontend() == frontend_db + assert DefaultDB.backend() == backend_db + + async def test_create_user_get_user(self) -> None: + frontend_db = InMemoryFrontendDB() + + random_id = random.randint(1, 1_000_000) + generated_uuid = uuid.uuid4() + email = f"user{random_id}@airt.ai" + username = f"user{random_id}" + + user_uuid = await frontend_db._create_user(generated_uuid, email, username) + assert user_uuid == generated_uuid + + user = await frontend_db.get_user(user_uuid) + assert user["uuid"] == str(user_uuid) + assert user["email"] == email + assert user["username"] == username + + async def test_user_exception(self) -> None: + frontend_db = InMemoryFrontendDB() + user_uuid = uuid.uuid4() + with pytest.raises(KeyNotFoundError) as e: + await frontend_db.get_user(user_uuid) + assert f"user_uuid {user_uuid} not found" == str(e.value) + + +@pytest.mark.db +@pytest.mark.asyncio +class TestInMemoryBackendDB: + async def test_model_CRUD(self) -> None: # noqa: N802 + # Setup + frontend_db = InMemoryFrontendDB() + backend_db = InMemoryBackendDB() + random_id = random.randint(1, 1_000_000) + user_uuid = await frontend_db._create_user( + uuid.uuid4(), f"user{random_id}@airt.ai", f"user{random_id}" + ) + model_uuid = uuid.uuid4() + azure_oai_api_key = AzureOAIAPIKey(api_key="whatever", name="who cares?") + + # Tests + model = await backend_db.create_model( + user_uuid=user_uuid, + model_uuid=model_uuid, + type_name="secret", + model_name="AzureOAIAPIKey", + json_str=azure_oai_api_key.model_dump_json(), + ) + assert model["uuid"] == str(model_uuid) + assert model["user_uuid"] == str(user_uuid) + assert model["type_name"] == "secret" + assert model["model_name"] == "AzureOAIAPIKey" + assert model["json_str"] == azure_oai_api_key.model_dump() + + found_model = await backend_db.find_model(model_uuid) + assert found_model["uuid"] == str(model_uuid) + + many_model = await backend_db.find_many_model(user_uuid) + assert len(many_model) == 1 + assert many_model[0]["uuid"] == str(model_uuid) + + updated_model = await backend_db.update_model( + model_uuid=model_uuid, + user_uuid=user_uuid, + type_name="secret", + model_name="AzureOAIAPIKey2", + json_str=azure_oai_api_key.model_dump_json(), + ) + assert updated_model["uuid"] == str(model_uuid) + assert updated_model["model_name"] == "AzureOAIAPIKey2" + + deleted_model = await backend_db.delete_model(model_uuid) + assert deleted_model["uuid"] == str(model_uuid) + + async def test_auth_token_CRUD(self, monkeypatch: pytest.MonkeyPatch) -> None: # noqa: N802 + # Setup + frontend_db = InMemoryFrontendDB() + backend_db = InMemoryBackendDB() + random_id = random.randint(1, 1_000_000) + user_uuid = await frontend_db._create_user( + uuid.uuid4(), f"user{random_id}@airt.ai", f"user{random_id}" + ) + deployment_uuid = uuid.uuid4() + auth_token_uuid = uuid.uuid4() + + async def mock_find_model( + *args: Any, **kwargs: Any + ) -> dict[str, Union[str, UUID]]: + return { + "user_uuid": user_uuid, + "uuid": deployment_uuid, + } + + monkeypatch.setattr( + fastagency_studio.db.inmemory.InMemoryBackendDB, + "find_model", + mock_find_model, + ) + + # Tests + auth_token = await backend_db.create_auth_token( + auth_token_uuid=auth_token_uuid, + name="Test token", + user_uuid=user_uuid, + deployment_uuid=deployment_uuid, + hashed_auth_token="whatever", + expiry="99d", + expires_at=datetime.utcnow() + timedelta(days=99), + ) + assert auth_token["uuid"] == str(auth_token_uuid) + assert auth_token["name"] == "Test token" + + many_auth_token = await backend_db.find_many_auth_token( + user_uuid, deployment_uuid + ) + assert len(many_auth_token) == 1 + assert many_auth_token[0]["uuid"] == str(auth_token_uuid) + + deleted_auth_token = await backend_db.delete_auth_token( + auth_token_uuid, deployment_uuid, user_uuid + ) + assert deleted_auth_token["uuid"] == str(auth_token_uuid) + + async def test_model_exception(self) -> None: + backend_db = InMemoryBackendDB() + model_uuid = uuid.uuid4() + user_uuid = uuid.uuid4() + with pytest.raises(KeyNotFoundError) as e: + await backend_db.find_model(model_uuid) + assert f"model_uuid {model_uuid} not found" == str(e.value) + + with pytest.raises(KeyNotFoundError) as e: + await backend_db.update_model( + model_uuid=model_uuid, + user_uuid=user_uuid, + type_name="secret", + model_name="AzureOAIAPIKey2", + json_str="[]", + ) + assert f"model_uuid {model_uuid} not found" == str(e.value) + + with pytest.raises(KeyNotFoundError) as e: + await backend_db.delete_model(model_uuid) + assert f"model_uuid {model_uuid} not found" == str(e.value) + + async def test_auth_token_exception(self) -> None: + backend_db = InMemoryBackendDB() + auth_token_uuid = uuid.uuid4() + deployment_uuid = uuid.uuid4() + user_uuid = uuid.uuid4() + + with pytest.raises(KeyNotFoundError) as e: + await backend_db.delete_auth_token( + auth_token_uuid, deployment_uuid, user_uuid + ) + assert f"auth_token_uuid {auth_token_uuid} not found" == str(e.value) diff --git a/tests/db/test_prisma.py b/tests/db/test_prisma.py new file mode 100644 index 00000000..b5a0596e --- /dev/null +++ b/tests/db/test_prisma.py @@ -0,0 +1,186 @@ +import random +import uuid +from datetime import datetime, timedelta +from typing import Any, Union +from uuid import UUID + +import pytest + +import fastagency_studio.db +import fastagency_studio.db.prisma +from fastagency_studio.db.base import DefaultDB, KeyNotFoundError +from fastagency_studio.db.prisma import PrismaBackendDB, PrismaFrontendDB +from fastagency_studio.models.llms.azure import AzureOAIAPIKey + + +@pytest.mark.db +@pytest.mark.asyncio +class TestPrismaFrontendDB: + async def test_set(self) -> None: + frontend_db = PrismaFrontendDB() + backend_db = PrismaBackendDB() + with DefaultDB.set(backend_db=backend_db, frontend_db=frontend_db): + assert DefaultDB._frontend_db == frontend_db + assert DefaultDB._backend_db == backend_db + + async def test_db(self) -> None: + frontend_db = PrismaFrontendDB() + backend_db = PrismaBackendDB() + with DefaultDB.set(backend_db=backend_db, frontend_db=frontend_db): + assert DefaultDB.frontend() == frontend_db + assert DefaultDB.backend() == backend_db + + async def test_create_user_get_user(self) -> None: + frontend_db = PrismaFrontendDB() + + random_id = random.randint(1, 1_000_000) + generated_uuid = uuid.uuid4() + email = f"user{random_id}@airt.ai" + username = f"user{random_id}" + + user_uuid = await frontend_db._create_user(generated_uuid, email, username) + assert user_uuid == generated_uuid + + user = await frontend_db.get_user(user_uuid) + assert user["uuid"] == str(user_uuid) + assert user["email"] == email + assert user["username"] == username + + async def test_user_exception(self) -> None: + frontend_db = PrismaFrontendDB() + user_uuid = uuid.uuid4() + with pytest.raises(KeyNotFoundError) as e: + await frontend_db.get_user(user_uuid) + assert f"user_uuid {user_uuid} not found" == str(e.value) + + +@pytest.mark.db +@pytest.mark.asyncio +class TestPrismaBackendDB: + async def test_model_CRUD(self) -> None: # noqa: N802 + # Setup + frontend_db = PrismaFrontendDB() + backend_db = PrismaBackendDB() + random_id = random.randint(1, 1_000_000) + user_uuid = await frontend_db._create_user( + uuid.uuid4(), f"user{random_id}@airt.ai", f"user{random_id}" + ) + model_uuid = uuid.uuid4() + azure_oai_api_key = AzureOAIAPIKey(api_key="whatever", name="who cares?") + + # Tests + model = await backend_db.create_model( + user_uuid=user_uuid, + model_uuid=model_uuid, + type_name="secret", + model_name="AzureOAIAPIKey", + json_str=azure_oai_api_key.model_dump_json(), + ) + assert model["uuid"] == str(model_uuid) + assert model["user_uuid"] == str(user_uuid) + assert model["type_name"] == "secret" + assert model["model_name"] == "AzureOAIAPIKey" + assert model["json_str"] == azure_oai_api_key.model_dump() + + found_model = await backend_db.find_model(model_uuid) + assert found_model["uuid"] == str(model_uuid) + + many_model = await backend_db.find_many_model(user_uuid) + assert len(many_model) == 1 + assert many_model[0]["uuid"] == str(model_uuid) + + updated_model = await backend_db.update_model( + model_uuid=model_uuid, + user_uuid=user_uuid, + type_name="secret", + model_name="AzureOAIAPIKey2", + json_str=azure_oai_api_key.model_dump_json(), + ) + assert updated_model["uuid"] == str(model_uuid) + assert updated_model["model_name"] == "AzureOAIAPIKey2" + + deleted_model = await backend_db.delete_model(model_uuid) + assert deleted_model["uuid"] == str(model_uuid) + + async def test_auth_token_CRUD(self, monkeypatch: pytest.MonkeyPatch) -> None: # noqa: N802 + # Setup + frontend_db = PrismaFrontendDB() + backend_db = PrismaBackendDB() + random_id = random.randint(1, 1_000_000) + user_uuid = await frontend_db._create_user( + uuid.uuid4(), f"user{random_id}@airt.ai", f"user{random_id}" + ) + deployment_uuid = uuid.uuid4() + auth_token_uuid = uuid.uuid4() + + async def mock_find_model( + *args: Any, **kwargs: Any + ) -> dict[str, Union[str, UUID]]: + return { + "user_uuid": user_uuid, + "uuid": deployment_uuid, + } + + monkeypatch.setattr( + fastagency_studio.db.prisma.PrismaBackendDB, + "find_model", + mock_find_model, + ) + + # Tests + auth_token = await backend_db.create_auth_token( + auth_token_uuid=auth_token_uuid, + name="Test token", + user_uuid=user_uuid, + deployment_uuid=deployment_uuid, + hashed_auth_token="whatever", + expiry="99d", + expires_at=datetime.utcnow() + timedelta(days=99), + ) + assert auth_token["uuid"] == str(auth_token_uuid) + assert auth_token["name"] == "Test token" + + many_auth_token = await backend_db.find_many_auth_token( + user_uuid, deployment_uuid + ) + assert len(many_auth_token) == 1 + assert many_auth_token[0]["uuid"] == str(auth_token_uuid) + + deleted_auth_token = await backend_db.delete_auth_token( + auth_token_uuid, deployment_uuid, user_uuid + ) + assert deleted_auth_token["uuid"] == str(auth_token_uuid) + + async def test_model_exception(self) -> None: + backend_db = PrismaBackendDB() + model_uuid = uuid.uuid4() + user_uuid = uuid.uuid4() + with pytest.raises(KeyNotFoundError) as e: + await backend_db.find_model(model_uuid) + assert f"model_uuid {model_uuid} not found" == str(e.value) + + with pytest.raises(KeyNotFoundError) as e: + await backend_db.update_model( + model_uuid=model_uuid, + user_uuid=user_uuid, + type_name="secret", + model_name="AzureOAIAPIKey2", + json_str="[]", + ) + assert f"model_uuid {model_uuid} not found" == str(e.value) + + with pytest.raises(KeyNotFoundError) as e: + await backend_db.delete_model(model_uuid) + assert f"model_uuid {model_uuid} not found" == str(e.value) + + async def test_auth_token_exception(self) -> None: + backend_db = PrismaBackendDB() + auth_token_uuid = uuid.uuid4() + deployment_uuid = uuid.uuid4() + user_uuid = uuid.uuid4() + + with pytest.raises(KeyNotFoundError) as e: + await backend_db.delete_auth_token( + auth_token_uuid, deployment_uuid, user_uuid + ) + assert f"auth_token_uuid {auth_token_uuid} not found" == str(e.value) diff --git a/tests/faststream_app/__init__.py b/tests/faststream_app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/faststream_app/test_faststream_app.py b/tests/faststream_app/test_faststream_app.py new file mode 100644 index 00000000..fcd5ec23 --- /dev/null +++ b/tests/faststream_app/test_faststream_app.py @@ -0,0 +1,97 @@ +import asyncio +import json +import random + +import pytest +from faststream.nats import TestNatsBroker + +from fastagency_studio.faststream_app import broker, register_handler, stream + + +@pytest.mark.nats +@pytest.mark.asyncio +async def test_register_handler() -> None: + client_id = random.randint(1, 1000) + async with TestNatsBroker(broker, with_real=True) as br: + # br._connection = br.stream = AsyncMock() + await br.publish( + {"client_id": client_id}, + f"register.{client_id}", + ) + await register_handler.wait_call(timeout=3) + + register_handler.mock.assert_called_once_with({"client_id": client_id}) # type: ignore[union-attr] + + # Later I will send a message to "ping.*" and will await for "pong.*" message + + +@pytest.mark.nats +@pytest.mark.asyncio +async def test_ping_handler() -> None: + client_id = random.randint(1, 1000) + + msg_queue: asyncio.Queue = asyncio.Queue(maxsize=1) # type: ignore [type-arg] + + @broker.subscriber(f"pong.{client_id}", stream=stream) + async def pong_handler(msg: str) -> None: + await msg_queue.put(msg) + + async with TestNatsBroker(broker, with_real=True) as br: + await br.publish( + {"client_id": client_id}, + f"register.{client_id}", + ) + await register_handler.wait_call(timeout=3) + + register_handler.mock.assert_called_once_with({"client_id": client_id}) # type: ignore[union-attr] + + await br.publish({"msg": "ping"}, f"ping.{client_id}") + # await ping_handler.wait_call(timeout=3) + + # ping_handler.mock.assert_called_once_with({"msg": "ping"}) # type: ignore[union-attr] + + result_set, _ = await asyncio.wait( + (asyncio.create_task(msg_queue.get()),), timeout=3 + ) + assert len(result_set) == 1 + result = json.loads(result_set.pop().result()) + assert result["msg"] == "pong" + assert "process_id" in result + + +@pytest.mark.nats +@pytest.mark.asyncio +async def test_ping_handler_with_wrong_message() -> None: + client_id = random.randint(1, 1000) + + msg_queue: asyncio.Queue = asyncio.Queue(maxsize=1) # type: ignore [type-arg] + + @broker.subscriber(f"pong.{client_id}", stream=stream) + async def pong_handler(msg: str) -> None: + await msg_queue.put(msg) + + async with TestNatsBroker(broker, with_real=True) as br: + await br.publish( + {"client_id": client_id}, + f"register.{client_id}", + ) + await register_handler.wait_call(timeout=3) + + register_handler.mock.assert_called_once_with({"client_id": client_id}) # type: ignore[union-attr] + + msg_to_send = {"msg": "This is a random message"} + await br.publish(msg_to_send, f"ping.{client_id}") # type: ignore[arg-type] + # await ping_handler.wait_call(timeout=3) + + # ping_handler.mock.assert_called_once_with({"msg": "ping"}) # type: ignore[union-attr] + + result_set, _ = await asyncio.wait( + (asyncio.create_task(msg_queue.get()),), timeout=3 + ) + assert len(result_set) == 1 + result = json.loads(result_set.pop().result()) + expected_msg = ( + f"Unknown message: {msg_to_send}, please send 'ping' in body['msg']" + ) + assert result["msg"] == expected_msg + assert "process_id" in result diff --git a/tests/helpers.py b/tests/helpers.py new file mode 100644 index 00000000..92bd4d62 --- /dev/null +++ b/tests/helpers.py @@ -0,0 +1,172 @@ +import functools +import inspect +import random +import types +from asyncio import iscoroutinefunction +from typing import Any, Callable, TypeVar + +import pytest +import pytest_asyncio + +__all__ = ["add_random_suffix", "parametrize_fixtures", "tag", "tag_list"] + + +def add_random_suffix(prefix: str) -> str: + return f"{prefix}_{random.randint(0, 1_000_000_000):09d}" + + +F = TypeVar("F", bound=Callable[..., Any]) + +_tags: dict[str, list[str]] = {} + + +def tag(*args: str) -> Callable[[F], F]: + def decorator(f: F, args: tuple[str, ...] = args) -> F: + global _tags + if not hasattr(f, "_pytestfixturefunction"): + raise ValueError(f"function {f.__name__} is not a fixture") + + name = f._pytestfixturefunction.name + if name is None: + name = f.__name__ + + for my_tag in args: + if my_tag in _tags: + _tags[my_tag].append(name) + else: + _tags[my_tag] = [name] + + return f + + return decorator + + +def tag_list(*args: str) -> Callable[[list[F]], list[F]]: + def decorator(fs: list[F], args: tuple[str, ...] = args) -> list[F]: + return [tag(*args)(f) for f in fs] + + return decorator + + +def get_by_tag(*args: str) -> list[str]: + xs = [_tags.get(my_tag, []) for my_tag in args] + return list(functools.reduce(set.intersection, map(set, xs))) # type: ignore[arg-type] + + +def get_tags() -> list[str]: + return list(_tags.keys()) + + +def get_caller_globals() -> dict[str, Any]: + # Get the caller's frame + caller_frame = inspect.stack()[2].frame + + # Set the global variable in the caller's module + caller_globals = caller_frame.f_globals + + return caller_globals + + +def parametrize_fixtures( + parameter_name: str, src_fixtures: list[str] +) -> Callable[[F], F]: + def decorator(f: F, parameter_name: str = parameter_name) -> F: + f = pytest.mark.parametrize(parameter_name, src_fixtures, indirect=True)(f) + + # this is needed to make the fixture available in the caller's module + @pytest.fixture(name=parameter_name) + def wrapper(request: Any) -> Any: + return request.getfixturevalue(request.param) + + caller_globals = get_caller_globals() + + var_name = add_random_suffix(f"parametrized_fixtures_{parameter_name}") + caller_globals[var_name] = wrapper + + return f + + return decorator + + +def rename_parameter(src_name: str, dst_name: str) -> Callable[[F], F]: + def decorator(f: F) -> F: + # Get the original signature of the function + sig = inspect.signature(f) + + # Create a new parameter list with src_name replaced by dst_name + params = [ + inspect.Parameter( + dst_name if param.name == src_name else param.name, + param.kind, + default=param.default, + annotation=param.annotation, + ) + for param in sig.parameters.values() + ] + + # Create a new signature with the modified parameters + new_sig = sig.replace(parameters=params) + + # Define the body of the new function + if iscoroutinefunction(f): + + async def wrapper(*args, **kwargs): # type: ignore[no-untyped-def] + bound_args = new_sig.bind(*args, **kwargs) + bound_args.apply_defaults() + arguments = bound_args.arguments + + if dst_name in arguments: + arguments[src_name] = arguments.pop(dst_name) + + return await f(**arguments) + else: + + def wrapper(*args, **kwargs): # type: ignore[no-untyped-def] + bound_args = new_sig.bind(*args, **kwargs) + bound_args.apply_defaults() + arguments = bound_args.arguments + + if dst_name in arguments: + arguments[src_name] = arguments.pop(dst_name) + + return f(**arguments) + + # Create the new function with the modified signature + new_func = types.FunctionType( + wrapper.__code__, + globals(), + name=f.__name__, + argdefs=wrapper.__defaults__, + closure=wrapper.__closure__, + ) + new_func.__signature__ = new_sig # type: ignore[attr-defined] + functools.update_wrapper(new_func, f) + return new_func # type: ignore + + return decorator + + +def expand_fixture( + dst_fixture_prefix: str, + src_fixtures_names: list[str], + placeholder_name: str, +) -> Callable[[F], list[F]]: + def decorator(f: F) -> list[F]: + retval: list[F] = [] + for src_type in src_fixtures_names: + name = f"{dst_fixture_prefix}_{src_type}" + + f_renamed = rename_parameter(placeholder_name, src_type)(f) + if iscoroutinefunction(f): + f_fixture = pytest_asyncio.fixture(name=name)(f_renamed) + else: + f_fixture = pytest.fixture(name=name)(f_renamed) + + caller_globals = get_caller_globals() + caller_globals[name] = f_fixture + + retval.append(f_fixture) + + return retval + + return decorator diff --git a/tests/models/__init__.py b/tests/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/agents/__init__.py b/tests/models/agents/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/agents/test_assistant.py b/tests/models/agents/test_assistant.py new file mode 100644 index 00000000..eb11bc5a --- /dev/null +++ b/tests/models/agents/test_assistant.py @@ -0,0 +1,289 @@ +from typing import Any + +import autogen +import pytest +from fastagency.api.openapi import OpenAPI + +from fastagency_studio.helpers import create_autogen +from fastagency_studio.models.agents.assistant import AssistantAgent +from fastagency_studio.models.base import ObjectReference + +from ...helpers import get_by_tag, parametrize_fixtures + + +class TestAssistantAgent: + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.llm + @parametrize_fixtures("assistant_ref", get_by_tag("assistant")) + async def test_assistant_construction( + self, + user_uuid: str, + assistant_ref: ObjectReference, + ) -> None: + print(f"test_assistant_construction({user_uuid=}, {assistant_ref=})") # noqa: T201 + + def test_assistant_model_schema(self) -> None: + schema = AssistantAgent.model_json_schema() + expected = { + "$defs": { + "AnthropicRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "Anthropic", + "default": "Anthropic", + "description": "The name of the data", + "enum": ["Anthropic"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AnthropicRef", + "type": "object", + }, + "AzureOAIRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "AzureOAI", + "default": "AzureOAI", + "description": "The name of the data", + "enum": ["AzureOAI"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AzureOAIRef", + "type": "object", + }, + "OpenAIRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "OpenAI", + "default": "OpenAI", + "description": "The name of the data", + "enum": ["OpenAI"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "OpenAIRef", + "type": "object", + }, + "TogetherAIRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "TogetherAI", + "default": "TogetherAI", + "description": "The name of the data", + "enum": ["TogetherAI"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "TogetherAIRef", + "type": "object", + }, + "ToolboxRef": { + "properties": { + "type": { + "const": "toolbox", + "default": "toolbox", + "description": "The name of the type of the data", + "enum": ["toolbox"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "Toolbox", + "default": "Toolbox", + "description": "The name of the data", + "enum": ["Toolbox"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "ToolboxRef", + "type": "object", + }, + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "llm": { + "anyOf": [ + {"$ref": "#/$defs/AnthropicRef"}, + {"$ref": "#/$defs/AzureOAIRef"}, + {"$ref": "#/$defs/OpenAIRef"}, + {"$ref": "#/$defs/TogetherAIRef"}, + ], + "description": "LLM used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the LLM the agent will use to generate responses." + }, + "title": "LLM", + }, + "toolbox_1": { + "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], + "default": None, + "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, + "title": "Toolbox", + }, + "toolbox_2": { + "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], + "default": None, + "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, + "title": "Toolbox", + }, + "toolbox_3": { + "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], + "default": None, + "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, + "title": "Toolbox", + }, + "system_message": { + "default": "You are a helpful assistant. After you successfully answer all questions and there are no new questions asked after your response (e.g. there is no specific direction or question asked after you give a response), terminate the chat by outputting 'TERMINATE' (IMPORTANT: use all caps)", + "description": "The system message of the agent. This message is used to inform the agent about his role in the conversation", + "metadata": { + "tooltip_message": "The system message defines the agent's role and influences its responses. For example, telling the agent 'You are an expert in travel advice' will make its responses focus on travel." + }, + "title": "System Message", + "type": "string", + }, + }, + "required": ["name", "llm"], + "title": "AssistantAgent", + "type": "object", + } + # print(f"{schema=}") + assert schema == expected + + @pytest.mark.asyncio + @pytest.mark.db + @parametrize_fixtures("assistant_ref", get_by_tag("assistant", "weather")) + async def test_assistant_create_autogen( + self, + user_uuid: str, + assistant_ref: ObjectReference, + ) -> None: + def is_termination_msg(msg: dict[str, Any]) -> bool: + return "TERMINATE" in ["content"] + + ag_assistant, ag_toolkits = await create_autogen( + model_ref=assistant_ref, + user_uuid=user_uuid, + is_termination_msg=is_termination_msg, + ) + assert isinstance(ag_assistant, autogen.agentchat.AssistantAgent) + assert isinstance(ag_toolkits[0], OpenAPI) + assert len(ag_toolkits) == 1 + assert ag_assistant._is_termination_msg == is_termination_msg + + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.llm + @parametrize_fixtures("assistant_ref", get_by_tag("assistant", "weather")) + async def test_assistant_weather_end2end( + self, + user_uuid: str, + assistant_ref: ObjectReference, + ) -> None: + ag_assistant, ag_toolkits = await create_autogen( + model_ref=assistant_ref, + user_uuid=user_uuid, + ) + + user_proxy = autogen.agentchat.UserProxyAgent( + name="user_proxy", + human_input_mode="NEVER", + max_consecutive_auto_reply=10, + ) + weather_tool: OpenAPI = ag_toolkits[0] + weather_tool._register_for_execution(user_proxy) + weather_tool._register_for_llm(ag_assistant) + chat_result = user_proxy.initiate_chat( + ag_assistant, message="What is the weather in New York?" + ) + + messages = [msg["content"] for msg in chat_result.chat_history] + for w in ["New York", "sunny", "TERMINATE"]: + assert any(msg is not None and w in msg for msg in messages), (w, messages) diff --git a/tests/models/agents/test_user_proxy.py b/tests/models/agents/test_user_proxy.py new file mode 100644 index 00000000..6e05bf54 --- /dev/null +++ b/tests/models/agents/test_user_proxy.py @@ -0,0 +1,37 @@ +import uuid + +import autogen +import pytest +from fastapi import BackgroundTasks + +from fastagency_studio.app import add_model +from fastagency_studio.models.agents.user_proxy import UserProxyAgent + + +class TestUserProxyAgent: + @pytest.mark.asyncio + @pytest.mark.db + async def test_user_proxy_model_create_autogen( + self, + user_uuid: str, + ) -> None: + user_proxy_model = UserProxyAgent( + name="User proxy", + system_message="test system message", + ) + user_proxy_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="agent", + model_name=UserProxyAgent.__name__, + model_uuid=user_proxy_model_uuid, + model=user_proxy_model.model_dump(), + background_tasks=BackgroundTasks(), + ) + + agent, functions = await UserProxyAgent.create_autogen( + model_id=uuid.UUID(user_proxy_model_uuid), + user_id=uuid.UUID(user_uuid), + ) + assert isinstance(agent, autogen.agentchat.UserProxyAgent) + assert functions == [] diff --git a/tests/models/agents/test_web_surfer.py b/tests/models/agents/test_web_surfer.py new file mode 100644 index 00000000..4bdbd846 --- /dev/null +++ b/tests/models/agents/test_web_surfer.py @@ -0,0 +1,490 @@ +import uuid +from typing import Any + +import autogen.agentchat.contrib.web_surfer +import pytest +from asyncer import asyncify +from fastagency.runtime.autogen.tools.web_surfer import WebSurferAnswer +from fastapi import BackgroundTasks + +from fastagency_studio.app import add_model +from fastagency_studio.helpers import create_autogen, get_model_by_ref +from fastagency_studio.models.agents.web_surfer import BingAPIKey, WebSurferAgent +from fastagency_studio.models.base import ObjectReference +from fastagency_studio.models.llms.azure import AzureOAIAPIKey + +from ...helpers import get_by_tag, parametrize_fixtures + + +class TestWebSurferAgent: + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.llm + @parametrize_fixtures("websurfer_ref", get_by_tag("websurfer")) + async def test_websurfer_construction( + self, + user_uuid: str, + websurfer_ref: ObjectReference, + ) -> None: + websurfer: WebSurferAgent = await get_model_by_ref(websurfer_ref) # type: ignore [assignment] + print(f"test_websurfer_construction({user_uuid=}, {websurfer=})") # noqa: T201 + isinstance(websurfer, WebSurferAgent) + assert websurfer.bing_api_key is not None + + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.llm + @parametrize_fixtures("llm_ref", get_by_tag("websurfer-llm")) + async def test_websurfer_llm_construction( + self, + user_uuid: str, + llm_ref: ObjectReference, + ) -> None: + llm = await get_model_by_ref(llm_ref) + print(f"test_websurfer_llm_construction({user_uuid=}, {llm=})") # noqa: T201 + + def test_web_surfer_model_schema(self) -> None: + schema = WebSurferAgent.model_json_schema() + expected = { + "$defs": { + "AnthropicRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "Anthropic", + "default": "Anthropic", + "description": "The name of the data", + "enum": ["Anthropic"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AnthropicRef", + "type": "object", + }, + "AzureOAIRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "AzureOAI", + "default": "AzureOAI", + "description": "The name of the data", + "enum": ["AzureOAI"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AzureOAIRef", + "type": "object", + }, + "BingAPIKeyRef": { + "properties": { + "type": { + "const": "secret", + "default": "secret", + "description": "The name of the type of the data", + "enum": ["secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "BingAPIKey", + "default": "BingAPIKey", + "description": "The name of the data", + "enum": ["BingAPIKey"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "BingAPIKeyRef", + "type": "object", + }, + "OpenAIRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "OpenAI", + "default": "OpenAI", + "description": "The name of the data", + "enum": ["OpenAI"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "OpenAIRef", + "type": "object", + }, + "TogetherAIRef": { + "properties": { + "type": { + "const": "llm", + "default": "llm", + "description": "The name of the type of the data", + "enum": ["llm"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "TogetherAI", + "default": "TogetherAI", + "description": "The name of the data", + "enum": ["TogetherAI"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "TogetherAIRef", + "type": "object", + }, + "ToolboxRef": { + "properties": { + "type": { + "const": "toolbox", + "default": "toolbox", + "description": "The name of the type of the data", + "enum": ["toolbox"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "Toolbox", + "default": "Toolbox", + "description": "The name of the data", + "enum": ["Toolbox"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "ToolboxRef", + "type": "object", + }, + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "llm": { + "anyOf": [ + {"$ref": "#/$defs/AnthropicRef"}, + {"$ref": "#/$defs/AzureOAIRef"}, + {"$ref": "#/$defs/OpenAIRef"}, + {"$ref": "#/$defs/TogetherAIRef"}, + ], + "description": "LLM used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the LLM the agent will use to generate responses." + }, + "title": "LLM", + }, + "toolbox_1": { + "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], + "default": None, + "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, + "title": "Toolbox", + }, + "toolbox_2": { + "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], + "default": None, + "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, + "title": "Toolbox", + }, + "toolbox_3": { + "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], + "default": None, + "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, + "title": "Toolbox", + }, + "summarizer_llm": { + "anyOf": [ + {"$ref": "#/$defs/AnthropicRef"}, + {"$ref": "#/$defs/AzureOAIRef"}, + {"$ref": "#/$defs/OpenAIRef"}, + {"$ref": "#/$defs/TogetherAIRef"}, + ], + "description": "This LLM will be used to generated summary of all pages visited", + "metadata": { + "tooltip_message": "Select the summarizer LLM, which is used for generating precise and accurate summaries of web pages, while the LLM chosen above is used for handling regular web searches." + }, + "title": "Summarizer LLM", + }, + "viewport_size": { + "default": 4096, + "description": "The viewport size of the browser", + "metadata": { + "tooltip_message": "Viewport size refers to the visible area of a webpage in the browser. Default is 4096. Modify only if a custom size is needed." + }, + "title": "Viewport Size", + "type": "integer", + }, + "bing_api_key": { + "anyOf": [{"$ref": "#/$defs/BingAPIKeyRef"}, {"type": "null"}], + "default": None, + "description": "The Bing API key for the browser", + "metadata": { + "tooltip_message": "Choose a Bing API key to allow the browser to access Bing's search and data services, improving information retrieval." + }, + "title": "Bing API Key", + }, + }, + "required": ["name", "llm", "summarizer_llm"], + "title": "WebSurferAgent", + "type": "object", + } + # print(f"{schema=}") + assert schema == expected + + @pytest.mark.asyncio + @pytest.mark.db + @parametrize_fixtures("websurfer_ref", get_by_tag("websurfer")) + async def test_assistant_create_autogen( + self, + user_uuid: str, + websurfer_ref: ObjectReference, + ) -> None: + def is_termination_msg(msg: dict[str, Any]) -> bool: + return "TERMINATE" in ["content"] + + ag_assistant, ag_toolkits = await create_autogen( + model_ref=websurfer_ref, + user_uuid=user_uuid, + is_termination_msg=is_termination_msg, + ) + assert isinstance(ag_assistant, autogen.agentchat.AssistantAgent) + assert len(ag_toolkits) == 1 + + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.llm + @parametrize_fixtures("websurfer_ref", get_by_tag("websurfer")) + @pytest.mark.parametrize( + "task", + [ + # "Visit https://en.wikipedia.org/wiki/Zagreb and tell me when Zagreb became a free royal city.", + # "What is the most expensive NVIDIA GPU on https://www.alternate.de/ and how much it costs?", + "Compile a list of news headlines under section 'Politika i kriminal' on telegram.hr.", + # "What is the most newsworthy story today?", + # "Given that weather forecast today is warm and sunny, what would be the best way to spend an evening in Zagreb according to the weather forecast?", + ], + ) + @pytest.mark.skip(reason="This test is not working properly in CI") + async def test_websurfer_end2end( + self, + user_uuid: str, + websurfer_ref: ObjectReference, + # assistant_noapi_azure_oai_gpt4o_ref: ObjectReference, + task: str, + ) -> None: + ag_websurfer, ag_toolboxes = await create_autogen( + model_ref=websurfer_ref, + user_uuid=user_uuid, + ) + ag_user_proxy = autogen.agentchat.UserProxyAgent( + name="user_proxy", + human_input_mode="NEVER", + max_consecutive_auto_reply=4, + ) + + ag_toolbox = ag_toolboxes[0] + ag_toolbox.register_for_llm(ag_websurfer) + ag_toolbox.register_for_execution(ag_user_proxy) + + chat_result = await asyncify(ag_user_proxy.initiate_chat)( + recipient=ag_websurfer, + message=task, + ) + + messages = [ + msg["content"] + for msg in chat_result.chat_history + if msg["content"] is not None + ] + assert messages != [] + + # one common error message if there is a bug with syncify + assert not any( + "Error: This function can only be run from an AnyIO worker thread" in msg + for msg in messages + ), messages + + # extract final message from web surfer + websurfer_replies = [] + for msg in messages: + try: + model = WebSurferAnswer.model_validate_json(msg) + websurfer_replies.append(model) + except Exception: # noqa: PERF203 + pass + + # we have at least one successful reply + websurfer_successful_replies = [ + reply for reply in websurfer_replies if reply.is_successful + ] + assert websurfer_successful_replies != [] + + # @pytest.mark.skip() + # @pytest.mark.asyncio() + # @pytest.mark.db() + # @pytest.mark.llm() + # @parametrize_fixtures("websurfer_ref", get_by_tag("websurfer")) + # async def test_websurfer_and_toolkit_end2end( + # self, + # user_uuid: str, + # websurfer_ref: ObjectReference, + # assistant_weather_openai_oai_gpt35_ref: ObjectReference, + # openai_gpt35_turbo_16k_llm_config: Dict[str, Any], + # ) -> None: + # ag_websurfer, _ = await create_autogen( + # model_ref=websurfer_ref, + # user_uuid=user_uuid, + # ) + + # ag_assistant, ag_toolboxes = await create_autogen( + # model_ref=assistant_weather_openai_oai_gpt35_ref, + # user_uuid=user_uuid, + # ) + + # ag_user_proxy = autogen.agentchat.UserProxyAgent( + # name="user_proxy", + # human_input_mode="NEVER", + # max_consecutive_auto_reply=4, + # ) + + # ag_toolbox = ag_toolboxes[0] + # ag_toolbox.register_for_llm(ag_assistant) + # ag_toolbox.register_for_execution(ag_user_proxy) + + # groupchat = autogen.GroupChat( + # agents=[ag_assistant, ag_websurfer, ag_user_proxy], + # messages=[], + # ) + + # manager = autogen.GroupChatManager( + # groupchat=groupchat, + # llm_config=openai_gpt35_turbo_16k_llm_config, + # ) + # chat_result = manager.initiate_chat( + # recipient=manager, + # message="Find out what's the weather in Zagreb today and then visit https://www.infozagreb.hr/hr/dogadanja and check what would be the best way to spend an evening in Zagreb according to the weather forecast.", + # ) + + # messages = [msg["content"] for msg in chat_result.chat_history] + # assert messages is not [] + + # # print("*" * 80) + # # print() + # # for msg in messages: + # # print(msg) + # # print() + # # print("*" * 80) + + # # for w in ["sunny", "Zagreb", ]: + # # assert any(msg is not None and w in msg for msg in messages), (w, messages) + + +# todo +class TestBingAPIKey: + @pytest.mark.asyncio + @pytest.mark.db + async def test_bing_api_key_model_create_autogen( + self, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + user_uuid: str, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + # Add secret to database + api_key = BingAPIKey( # type: ignore [operator] + api_key="dummy_bing_api_key", # pragma: allowlist secret + name="api_key_model_name", + ) + api_key_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="secret", + model_name=BingAPIKey.__name__, # type: ignore [attr-defined] + model_uuid=api_key_model_uuid, + model=api_key.model_dump(), + background_tasks=BackgroundTasks(), + ) + + # Call create_autogen + actual_api_key = await AzureOAIAPIKey.create_autogen( + model_id=uuid.UUID(api_key_model_uuid), + user_id=uuid.UUID(user_uuid), + ) + assert isinstance(actual_api_key, str) + assert actual_api_key == api_key.api_key diff --git a/tests/models/deployments/__init__.py b/tests/models/deployments/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/deployments/test_deployment.py b/tests/models/deployments/test_deployment.py new file mode 100644 index 00000000..4d49ffbe --- /dev/null +++ b/tests/models/deployments/test_deployment.py @@ -0,0 +1,291 @@ +import json +import uuid + +import jsondiff +import pytest +from pydantic import ValidationError + +from fastagency_studio.models.base import Model +from fastagency_studio.models.deployments.deployment import Deployment +from fastagency_studio.models.secrets.fly_token import FlyToken +from fastagency_studio.models.secrets.github_token import GitHubToken +from fastagency_studio.models.teams.multi_agent_team import MultiAgentTeam +from fastagency_studio.models.teams.two_agent_teams import TwoAgentTeam + + +class TestDeployment: + @pytest.mark.parametrize( + "team_model", + [TwoAgentTeam, pytest.param(MultiAgentTeam, marks=pytest.mark.skip)], + ) + @pytest.mark.parametrize("gh_token_model", [(GitHubToken)]) + @pytest.mark.parametrize("fly_token_model", [(FlyToken)]) + def test_deployment_constructor( + self, team_model: Model, gh_token_model: Model, fly_token_model: Model + ) -> None: + team_uuid = uuid.uuid4() + team = team_model.get_reference_model()(uuid=team_uuid) + + gh_token_uuid = uuid.uuid4() + gh_token = gh_token_model.get_reference_model()(uuid=gh_token_uuid) + + fly_token_uuid = uuid.uuid4() + fly_token = fly_token_model.get_reference_model()(uuid=fly_token_uuid) + + try: + deployment = Deployment( + team=team, + name="Test Deployment", + repo_name="test-deployment", + fly_app_name="test-deployment", + gh_token=gh_token, + fly_token=fly_token, + ) + except ValidationError: + # print(f"{e.errors()=}") + raise + + assert deployment.team == team + + def test_deployment_model_schema(self, pydantic_version: float) -> None: + schema = Deployment.model_json_schema() + expected = { + "$defs": { + "FlyTokenRef": { + "properties": { + "type": { + "const": "secret", + "default": "secret", + "description": "The name of the type of the data", + "enum": ["secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "FlyToken", + "default": "FlyToken", + "description": "The name of the data", + "enum": ["FlyToken"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "FlyTokenRef", + "type": "object", + }, + "GitHubTokenRef": { + "properties": { + "type": { + "const": "secret", + "default": "secret", + "description": "The name of the type of the data", + "enum": ["secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "GitHubToken", + "default": "GitHubToken", + "description": "The name of the data", + "enum": ["GitHubToken"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "GitHubTokenRef", + "type": "object", + }, + "TwoAgentTeamRef": { + "properties": { + "type": { + "const": "team", + "default": "team", + "description": "The name of the type of the data", + "enum": ["team"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "TwoAgentTeam", + "default": "TwoAgentTeam", + "description": "The name of the data", + "enum": ["TwoAgentTeam"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "TwoAgentTeamRef", + "type": "object", + }, + }, + "properties": { + "name": { + "description": "The name of the SaaS application.", + "metadata": { + "tooltip_message": "The application name to be used in the deployed SaaS application." + }, + "minLength": 1, + "title": "Name", + "type": "string", + }, + "repo_name": { + "description": "The name of the GitHub repository.", + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "The GitHub repository to be created. If the name contains spaces or special characters, GitHub will adjust it according to its naming rules. A random suffix will be added if the repository name already exists.", + }, + "minLength": 1, + "title": "Repo Name", + "type": "string", + }, + "fly_app_name": { + "description": "The name of the Fly.io application.", + "maxLength": 30, + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "The Fly.io application. This will be used to create and deploy your React, Node.js, and PostgreSQL apps to Fly.io.", + }, + "minLength": 1, + "title": "Fly App Name", + "type": "string", + }, + "team": { + "$ref": "#/$defs/TwoAgentTeamRef", + "description": "The team that is used in the deployment", + "metadata": { + "tooltip_message": "Choose the team to be used for deployment. User messages are sent to the Initial agent of the chosen team, and the agent's responses are sent back to the user. This field can be updated anytime to switch teams, with changes reflected in real-time in your deployments." + }, + "title": "Team Name", + }, + "gh_token": { + "$ref": "#/$defs/GitHubTokenRef", + "description": "The GitHub token to use for creating a new repository", + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "Choose the GitHub token used for authenticating and managing access to your GitHub account.", + }, + "title": "GH Token", + }, + "fly_token": { + "$ref": "#/$defs/FlyTokenRef", + "description": "The Fly.io token to use for deploying the deployment", + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "Choose the Fly.io token used for authenticating and managing access to your Fly.io account.", + }, + "title": "Fly Token", + }, + }, + "required": [ + "name", + "repo_name", + "fly_app_name", + "team", + "gh_token", + "fly_token", + ], + "title": "Deployment", + "type": "object", + } + # print(schema) + pydantic28_delta = '{"properties": {"team": {"allOf": [{"$$ref": "#/$defs/TwoAgentTeamRef"}], "$delete": ["$$ref"]}, "gh_token": {"allOf": [{"$$ref": "#/$defs/GitHubTokenRef"}], "$delete": ["$$ref"]}, "fly_token": {"allOf": [{"$$ref": "#/$defs/FlyTokenRef"}], "$delete": ["$$ref"]}}}' + if pydantic_version < 2.9: + # print(f"pydantic28_delta = '{jsondiff.diff(expected, schema, dump=True)}'") + expected = jsondiff.patch(json.dumps(expected), pydantic28_delta, load=True) + assert schema == expected + + @pytest.mark.parametrize( + "team_model", + [TwoAgentTeam, pytest.param(MultiAgentTeam, marks=pytest.mark.skip)], + ) + @pytest.mark.parametrize("gh_token_model", [(GitHubToken)]) + @pytest.mark.parametrize("fly_token_model", [(FlyToken)]) + def test_assistant_model_validation( + self, team_model: Model, gh_token_model: Model, fly_token_model: Model + ) -> None: + team_uuid = uuid.uuid4() + team = team_model.get_reference_model()(uuid=team_uuid) + + gh_token_uuid = uuid.uuid4() + gh_token = gh_token_model.get_reference_model()(uuid=gh_token_uuid) + + fly_token_uuid = uuid.uuid4() + fly_token = fly_token_model.get_reference_model()(uuid=fly_token_uuid) + + deployment = Deployment( + team=team, + name="Test Deployment", + repo_name="test-deployment", + fly_app_name="test-deployment", + gh_token=gh_token, + fly_token=fly_token, + ) + + deployment_json = deployment.model_dump_json() + # print(f"{deployment_json=}") + assert deployment_json is not None + + validated_deployment = Deployment.model_validate_json(deployment_json) + # print(f"{validated_agent=}") + assert validated_deployment is not None + assert validated_deployment == deployment + + @pytest.mark.parametrize( + "fly_app_name", ["", "app_name", "123-app-name", "2024-06-29"] + ) + def test_invalid_fly_io_app_name(self, fly_app_name: str) -> None: + with pytest.raises(ValidationError): + Deployment( + team=TwoAgentTeam.get_reference_model()(uuid=uuid.uuid4()), + name="Test Deployment", + repo_name="test-deployment", + fly_app_name=fly_app_name, + gh_token=GitHubToken.get_reference_model()(uuid=uuid.uuid4()), + fly_token=FlyToken.get_reference_model()(uuid=uuid.uuid4()), + ) + + @pytest.mark.parametrize("repo_name", ["repo name", "repo@name", "repo/name"]) + def test_invalid_repo_name(self, repo_name: str) -> None: + with pytest.raises( + ValueError, match="The repository name can only contain ASCII letters" + ): + Deployment( + team=TwoAgentTeam.get_reference_model()(uuid=uuid.uuid4()), + name="Test Deployment", + repo_name=repo_name, + fly_app_name="fly-app-name", + gh_token=GitHubToken.get_reference_model()(uuid=uuid.uuid4()), + fly_token=FlyToken.get_reference_model()(uuid=uuid.uuid4()), + ) + + @pytest.mark.parametrize("fly_app_name", ["app-name", "fa-123-app-name"]) + def test_valid_fly_io_app_name(self, fly_app_name: str) -> None: + Deployment( + team=TwoAgentTeam.get_reference_model()(uuid=uuid.uuid4()), + name="Test Deployment", + repo_name="test-deployment", + fly_app_name=fly_app_name, + gh_token=GitHubToken.get_reference_model()(uuid=uuid.uuid4()), + fly_token=FlyToken.get_reference_model()(uuid=uuid.uuid4()), + ) diff --git a/tests/models/llms/__init__.py b/tests/models/llms/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/llms/test_anthropic.py b/tests/models/llms/test_anthropic.py new file mode 100644 index 00000000..1e671d5e --- /dev/null +++ b/tests/models/llms/test_anthropic.py @@ -0,0 +1,187 @@ +import json +import uuid + +import jsondiff +import pytest + +from fastagency_studio.helpers import get_model_by_ref +from fastagency_studio.models.base import ObjectReference +from fastagency_studio.models.llms.anthropic import Anthropic, AnthropicAPIKey + + +def test_import(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False) + + from fastagency_studio.models.llms.anthropic import Anthropic + + assert Anthropic is not None + assert AnthropicAPIKey is not None + + +class TestAnthropic: + @pytest.mark.db + @pytest.mark.asyncio + async def test_anthropic_constructor(self, anthropic_ref: ObjectReference) -> None: + # create data + model = await get_model_by_ref(anthropic_ref) + assert isinstance(model, Anthropic) + + # dynamically created data + name = model.name + api_key_uuid = model.api_key.uuid # type: ignore [attr-defined] + + expected = { + "name": name, + "model": "claude-3-5-sonnet-20240620", + "api_key": { + "type": "secret", + "name": "AnthropicAPIKey", + "uuid": api_key_uuid, + }, + "base_url": "https://api.anthropic.com/v1", + "api_type": "anthropic", + "temperature": 0.0, + } + assert model.model_dump() == expected + + def test_anthropic_constructor_failure(self) -> None: + with pytest.raises(ValueError, match="Invalid Anthropic API Key"): + AnthropicAPIKey( + api_key="_sk-sUeBP9asw6GiYHXqtg70T3BlbkFJJuLwJFco90bOpU0Ntest", # pragma: allowlist secret + name="Hello World!", + ) + + def test_anthropic_model_schema(self, pydantic_version: float) -> None: + schema = Anthropic.model_json_schema() + expected = { + "$defs": { + "AnthropicAPIKeyRef": { + "properties": { + "type": { + "const": "secret", + "default": "secret", + "description": "The name of the type of the data", + "enum": ["secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "AnthropicAPIKey", + "default": "AnthropicAPIKey", + "description": "The name of the data", + "enum": ["AnthropicAPIKey"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AnthropicAPIKeyRef", + "type": "object", + } + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "model": { + "default": "claude-3-5-sonnet-20240620", + "description": "The model to use for the Anthropic API, e.g. 'claude-3-5-sonnet-20240620'", + "enum": [ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + ], + "metadata": { + "tooltip_message": "Choose the model that the LLM should use to generate responses." + }, + "title": "Model", + "type": "string", + }, + "api_key": { + "$ref": "#/$defs/AnthropicAPIKeyRef", + "description": "The API Key from Anthropic", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Anthropic services." + }, + "title": "API Key", + }, + "base_url": { + "default": "https://api.anthropic.com/v1", + "description": "The base URL of the Anthropic API", + "format": "uri", + "maxLength": 2083, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Anthropic services." + }, + "minLength": 1, + "title": "Base URL", + "type": "string", + }, + "api_type": { + "const": "anthropic", + "default": "anthropic", + "description": "The type of the API, must be 'anthropic'", + "enum": ["anthropic"], + "title": "API Type", + "type": "string", + }, + "temperature": { + "default": 0.8, + "description": "The temperature to use for the model, must be between 0 and 2", + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, + "maximum": 2.0, + "minimum": 0.0, + "title": "Temperature", + "type": "number", + }, + }, + "required": ["name", "api_key"], + "title": "Anthropic", + "type": "object", + } + # print(schema) + pydantic28_delta = '{"properties": {"api_key": {"allOf": [{"$$ref": "#/$defs/AnthropicAPIKeyRef"}], "$delete": ["$$ref"]}}}' + if pydantic_version < 2.9: + # print(f"pydantic28_delta = '{jsondiff.diff(expected, schema, dump=True)}'") + expected = jsondiff.patch(json.dumps(expected), pydantic28_delta, load=True) + assert schema == expected + + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.anthropic + async def test_anthropic_model_create_autogen( + self, + user_uuid: str, + anthropic_ref: ObjectReference, + ) -> None: + actual_llm_config = await Anthropic.create_autogen( + model_id=anthropic_ref.uuid, + user_id=uuid.UUID(user_uuid), + ) + assert isinstance(actual_llm_config, dict) + api_key = actual_llm_config["config_list"][0]["api_key"] + expected = { + "config_list": [ + { + "model": "claude-3-5-sonnet-20240620", + "api_key": api_key, + "base_url": "https://api.anthropic.com/v1", + "api_type": "anthropic", + } + ], + "temperature": 0.0, + } + + assert actual_llm_config == expected diff --git a/tests/models/llms/test_azure.py b/tests/models/llms/test_azure.py new file mode 100644 index 00000000..7c5573e9 --- /dev/null +++ b/tests/models/llms/test_azure.py @@ -0,0 +1,223 @@ +import json +from typing import Any + +import jsondiff +import pytest +from pydantic import ValidationError + +from fastagency_studio.helpers import create_autogen, get_model_by_ref +from fastagency_studio.models.base import ObjectReference +from fastagency_studio.models.llms.azure import ( + BASE_URL_ERROR_MESSAGE, + AzureOAI, + AzureOAIAPIKey, + UrlModel, +) + + +def test_import(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("AZURE_OAI_API_KEY", raising=False) + + from fastagency_studio.models.llms.azure import AzureOAI + + assert AzureOAI is not None + assert AzureOAIAPIKey is not None + + +class TestAzureOAI: + @pytest.mark.db + @pytest.mark.asyncio + async def test_azure_constructor( + self, azure_oai_gpt35_ref: ObjectReference + ) -> None: + # create data + model = await get_model_by_ref(azure_oai_gpt35_ref) + assert isinstance(model, AzureOAI) + + # dynamically created data + name = model.name + api_key_uuid = model.api_key.uuid # type: ignore [attr-defined] + base_url = model.base_url # type: ignore [attr-defined] + expected = { + "name": name, + "model": "gpt-35-turbo-16k", + "api_key": { + "type": "secret", + "name": "AzureOAIAPIKey", + "uuid": api_key_uuid, + }, + "base_url": base_url, + "api_type": "azure", + "api_version": "2024-02-01", + "temperature": 0.0, + } + assert model.model_dump() == expected + + @pytest.mark.parametrize( + "base_url", + [ + "https://{your-resource-name.openai.azure.com", + "https://your-resource-name}.openai.azure.com", + "https://{your-resource-name}.openai.azure.com", + ], + ) + @pytest.mark.db + @pytest.mark.asyncio + async def test_azure_constructor_with_invalid_base_url( + self, azure_oai_gpt35_ref: ObjectReference, base_url: str + ) -> None: + # create data + model = await get_model_by_ref(azure_oai_gpt35_ref) + assert isinstance(model, AzureOAI) + + # Construct a new AzureOAI model with the invalid base_url + with pytest.raises(ValidationError, match=BASE_URL_ERROR_MESSAGE): + AzureOAI( + name=model.name, + model=model.model, + api_key=model.api_key, + base_url=UrlModel(url=base_url).url, + api_type=model.api_type, + api_version=model.api_version, + temperature=model.temperature, + ) + + def test_azure_model_schema(self, pydantic_version: float) -> None: + schema = AzureOAI.model_json_schema() + expected = { + "$defs": { + "AzureOAIAPIKeyRef": { + "properties": { + "type": { + "const": "secret", + "default": "secret", + "description": "The name of the type of the data", + "enum": ["secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "AzureOAIAPIKey", + "default": "AzureOAIAPIKey", + "description": "The name of the data", + "enum": ["AzureOAIAPIKey"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AzureOAIAPIKeyRef", + "type": "object", + } + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "model": { + "default": "gpt-3.5-turbo", + "description": "The model to use for the Azure OpenAI API, e.g. 'gpt-3.5-turbo'", + "metadata": { + "tooltip_message": "The model that the LLM uses to interact with Azure OpenAI services." + }, + "title": "Model", + "type": "string", + }, + "api_key": { + "$ref": "#/$defs/AzureOAIAPIKeyRef", + "description": "The API Key from Azure OpenAI", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Azure OpenAI services." + }, + "title": "API Key", + }, + "base_url": { + "default": "https://{your-resource-name}.openai.azure.com", + "description": "The base URL of the Azure OpenAI API", + "format": "uri", + "maxLength": 2083, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Azure OpenAI services." + }, + "minLength": 1, + "title": "Base URL", + "type": "string", + }, + "api_type": { + "const": "azure", + "default": "azure", + "description": "The type of the API, must be 'azure'", + "enum": ["azure"], + "title": "API Type", + "type": "string", + }, + "api_version": { + "default": "2024-02-01", + "description": "The version of the Azure OpenAI API, e.g. '2024-02-01'", + "enum": [ + "2023-05-15", + "2023-06-01-preview", + "2023-10-01-preview", + "2024-02-15-preview", + "2024-03-01-preview", + "2024-04-01-preview", + "2024-05-01-preview", + "2024-02-01", + ], + "metadata": { + "tooltip_message": "The version of the Azure OpenAI API that the LLM uses to interact with Azure OpenAI services." + }, + "title": "API Version", + "type": "string", + }, + "temperature": { + "default": 0.8, + "description": "The temperature to use for the model, must be between 0 and 2", + "maximum": 2.0, + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, + "minimum": 0.0, + "title": "Temperature", + "type": "number", + }, + }, + "required": ["name", "api_key"], + "title": "AzureOAI", + "type": "object", + } + # print(schema) + pydantic28_delta = '{"properties": {"api_key": {"allOf": [{"$$ref": "#/$defs/AzureOAIAPIKeyRef"}], "$delete": ["$$ref"]}}}' + if pydantic_version < 2.9: + # print(f"pydantic28_delta = '{jsondiff.diff(expected, schema, dump=True)}'") + expected = jsondiff.patch(json.dumps(expected), pydantic28_delta, load=True) + + assert schema == expected + + @pytest.mark.asyncio + @pytest.mark.db + async def test_azure_model_create_autogen( + self, + user_uuid: str, + azure_oai_gpt35_ref: ObjectReference, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + ) -> None: + actual_llm_config = await create_autogen( + model_ref=azure_oai_gpt35_ref, + user_uuid=user_uuid, + ) + assert isinstance(actual_llm_config, dict) + assert ( + actual_llm_config["config_list"][0] + == azure_gpt35_turbo_16k_llm_config["config_list"][0] + ) + assert actual_llm_config == azure_gpt35_turbo_16k_llm_config diff --git a/tests/models/llms/test_end2end.py b/tests/models/llms/test_end2end.py new file mode 100644 index 00000000..72179668 --- /dev/null +++ b/tests/models/llms/test_end2end.py @@ -0,0 +1,66 @@ +from typing import Any + +import pytest +from autogen.agentchat import AssistantAgent + +from fastagency_studio.helpers import create_autogen +from fastagency_studio.models.base import ObjectReference + +from ...helpers import add_random_suffix, get_by_tag, parametrize_fixtures + + +@parametrize_fixtures("llm_ref", get_by_tag("llm")) +@pytest.mark.asyncio +@pytest.mark.db +@pytest.mark.llm +@pytest.mark.skip(reason="This test is not working properly in CI") +async def test_end2end_simple_chat_with_two_agents( + user_uuid: str, + llm_ref: ObjectReference, +) -> None: + llm_config = await create_autogen(model_ref=llm_ref, user_uuid=user_uuid) + + flags: dict[str, bool] = {} + + def is_termination_msg(msg: dict[str, Any]) -> bool: + flags["terminated"] = "TERMINATE" in msg["content"] + return flags["terminated"] + + for question, answer_part in zip( + ["What is 2+2?", "What was the largest city in the world in 2000?"], + ["4", "Tokyo"], + ): + flags["terminated"] = False + + assistant_agent = AssistantAgent( + name=add_random_suffix("assistant"), + llm_config=llm_config, + system_message="You are a helpful assistant.", + code_execution_config=False, + is_termination_msg=is_termination_msg, + human_input_mode="NEVER", + max_consecutive_auto_reply=10, + ) + + verifier_agent = AssistantAgent( + name=add_random_suffix("verifier"), + llm_config=llm_config, + system_message="""You are a verifier responsible for checking if other agents are giving correct answers. Please write a + few sentencases with your thoughts about the answer before classifying it as correct or not. + If the answer is correct, please finalize your analysis with word 'TERMINATE' to end the conversation. + Otherwise, give a detailed feedback and help the agent in providing a correct answer. + """, + code_execution_config=False, + is_termination_msg=is_termination_msg, + human_input_mode="NEVER", + max_consecutive_auto_reply=10, + ) + + chat_result = verifier_agent.initiate_chat( + assistant_agent, + message=question, + ) + + messages = [msg["content"] for msg in chat_result.chat_history] + assert any(answer_part in msg for msg in messages), messages + assert flags["terminated"], messages diff --git a/tests/models/llms/test_llm_keys.py b/tests/models/llms/test_llm_keys.py new file mode 100644 index 00000000..c53cd9fb --- /dev/null +++ b/tests/models/llms/test_llm_keys.py @@ -0,0 +1,35 @@ +import pytest + +from fastagency_studio.helpers import create_autogen, get_model_by_ref +from fastagency_studio.models.base import Model, ObjectReference + +from ...helpers import get_by_tag, parametrize_fixtures + + +class TestLLMKeys: + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.llm + @parametrize_fixtures("llm_key_ref", get_by_tag("llm-key")) + async def test_llm_key_constructor( + self, + llm_key_ref: ObjectReference, + ) -> None: + model = await get_model_by_ref(llm_key_ref) + assert isinstance(model, Model) + + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.llm + @parametrize_fixtures("llm_key_ref", get_by_tag("llm-key")) + async def test_llm_key_create_autogen( + self, + user_uuid: str, + llm_key_ref: ObjectReference, + ) -> None: + # Call create_autogen + actual_api_key = await create_autogen( + model_ref=llm_key_ref, + user_uuid=user_uuid, + ) + assert isinstance(actual_api_key, str) diff --git a/tests/models/llms/test_openai.py b/tests/models/llms/test_openai.py new file mode 100644 index 00000000..8d3ff488 --- /dev/null +++ b/tests/models/llms/test_openai.py @@ -0,0 +1,237 @@ +import json +import uuid + +import jsondiff +import openai +import pytest + +from fastagency_studio.helpers import get_model_by_ref +from fastagency_studio.models.base import ObjectReference +from fastagency_studio.models.llms.openai import OpenAI, OpenAIAPIKey, OpenAIModels +from tests.helpers import get_by_tag, parametrize_fixtures + + +def test_import(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + + from fastagency_studio.models.llms.openai import OpenAI, OpenAIAPIKey + + assert OpenAI is not None + assert OpenAIAPIKey is not None + + +class TestOpenAIAPIKey: + @pytest.mark.parametrize( + "openai_api_key", + [ + "sk-sUeBP9asw6GiYHXqtg70T3BlbkFJJuLwJFco90bOpU0Ntest", # pragma: allowlist secret + # OpenAI currently supports three prefixes for API keys: + # project-based API key format + "sk-proj-SomeLengthStringWhichCanHave-and_inItAndTheLengthCanBeChangedAtAnyTime", # pragma: allowlist secret + # user-level API key format + "sk-None-SomeLengthStringWhichCanHave-and_inItAndTheLengthCanBeChangedAtAnyTime", # pragma: allowlist secret + # service account APi key format + "sk-svcacct-SomeLengthStringWhichCanHave-and_inItAndTheLengthCanBeChangedAtAnyTime", # pragma: allowlist secret + ], + ) + def test_constructor_success(self, openai_api_key: str) -> None: + api_key = OpenAIAPIKey( + api_key=openai_api_key, + name="Hello World!", + ) # pragma: allowlist secret + assert api_key.api_key == openai_api_key # pragma: allowlist secret + + def test_constructor_failure(self) -> None: + with pytest.raises(ValueError, match="Invalid OpenAI API Key"): + OpenAIAPIKey( + api_key="_sk-sUeBP9asw6GiYHXqtg70T3BlbkFJJuLwJFco90bOpU0Ntest", # pragma: allowlist secret + name="Hello World!", + ) # pragma: allowlist secret + + +class TestOpenAI: + @pytest.mark.db + @pytest.mark.asyncio + @parametrize_fixtures("openai_oai_ref", get_by_tag("openai-llm")) + async def test_openai_constructor(self, openai_oai_ref: ObjectReference) -> None: + # create data + model = await get_model_by_ref(openai_oai_ref) + assert isinstance(model, OpenAI) + + # dynamically created data + name = model.name + api_key_uuid = model.api_key.uuid # type: ignore [attr-defined] + + expected = { + "name": name, + "model": model.model, + "api_key": { + "type": "secret", + "name": "OpenAIAPIKey", + "uuid": api_key_uuid, + }, + "base_url": "https://api.openai.com/v1", + "api_type": "openai", + "temperature": 0.0, + } + assert model.model_dump() == expected + + @pytest.mark.openai + def test_openai_model_list(self) -> None: + client = openai.OpenAI() + + model_list = [m.id for m in client.models.list() if "gpt-" in m.id] + # print(f"{model_list=}") + + assert set(model_list) == set(OpenAIModels.__args__), OpenAIModels.__args__ # type: ignore[attr-defined] + + def test_openai_schema(self, pydantic_version: float) -> None: + schema = OpenAI.model_json_schema() + expected = { + "$defs": { + "OpenAIAPIKeyRef": { + "properties": { + "type": { + "const": "secret", + "default": "secret", + "description": "The name of the type of the data", + "enum": ["secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "OpenAIAPIKey", + "default": "OpenAIAPIKey", + "description": "The name of the data", + "enum": ["OpenAIAPIKey"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "OpenAIAPIKeyRef", + "type": "object", + } + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "model": { + "default": "gpt-3.5-turbo", + "description": "The model to use for the OpenAI API, e.g. 'gpt-3.5-turbo'", + "enum": [ + "gpt-4o-2024-08-06", + "gpt-4-1106-preview", + "gpt-4-0613", + "gpt-4", + "chatgpt-4o-latest", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-3.5-turbo", + "gpt-3.5-turbo-1106", + "gpt-4o-mini-2024-07-18", + "gpt-3.5-turbo-0125", + "gpt-4o-mini", + "gpt-3.5-turbo-16k", + "gpt-4-turbo-2024-04-09", + "gpt-3.5-turbo-instruct-0914", + "gpt-3.5-turbo-instruct", + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4-turbo", + ], + "metadata": { + "tooltip_message": "Choose the model that the LLM uses to interact with OpenAI services." + }, + "title": "Model", + "type": "string", + }, + "api_key": { + "$ref": "#/$defs/OpenAIAPIKeyRef", + "description": "The API Key from OpenAI", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to OpenAI services." + }, + "title": "API Key", + }, + "base_url": { + "default": "https://api.openai.com/v1", + "description": "The base URL of the OpenAI API", + "format": "uri", + "maxLength": 2083, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with OpenAI services." + }, + "minLength": 1, + "title": "Base URL", + "type": "string", + }, + "api_type": { + "const": "openai", + "default": "openai", + "description": "The type of the API, must be 'openai'", + "enum": ["openai"], + "title": "API Type", + "type": "string", + }, + "temperature": { + "default": 0.8, + "description": "The temperature to use for the model, must be between 0 and 2", + "maximum": 2.0, + "minimum": 0.0, + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, + "title": "Temperature", + "type": "number", + }, + }, + "required": ["name", "api_key"], + "title": "OpenAI", + "type": "object", + } + # print(schema) + pydantic28_delta = '{"properties": {"api_key": {"allOf": [{"$$ref": "#/$defs/OpenAIAPIKeyRef"}], "$delete": ["$$ref"]}}}' + if pydantic_version < 2.9: + # print(f"pydantic28_delta = '{jsondiff.diff(expected, schema, dump=True)}'") + expected = jsondiff.patch(json.dumps(expected), pydantic28_delta, load=True) + assert schema == expected + + @pytest.mark.asyncio + @pytest.mark.db + @parametrize_fixtures("openai_oai_ref", get_by_tag("openai-llm")) + async def test_openai_model_create_autogen( + self, + user_uuid: str, + openai_oai_ref: ObjectReference, + ) -> None: + actual_llm_config = await OpenAI.create_autogen( + model_id=openai_oai_ref.uuid, + user_id=uuid.UUID(user_uuid), + ) + assert isinstance(actual_llm_config, dict) + api_key = actual_llm_config["config_list"][0]["api_key"] + model = actual_llm_config["config_list"][0]["model"] + expected = { + "config_list": [ + { + "model": model, + "api_key": api_key, + "base_url": "https://api.openai.com/v1", + "api_type": "openai", + } + ], + "temperature": 0.0, + } + + assert actual_llm_config == expected diff --git a/tests/models/llms/test_together.py b/tests/models/llms/test_together.py new file mode 100644 index 00000000..3b311a40 --- /dev/null +++ b/tests/models/llms/test_together.py @@ -0,0 +1,220 @@ +import json +import uuid + +import jsondiff +import pytest +import together + +from fastagency_studio.helpers import get_model_by_ref +from fastagency_studio.models.base import ObjectReference +from fastagency_studio.models.llms.together import ( + TogetherAI, + TogetherAIAPIKey, + together_model_string, +) + + +def test_import(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("TOGETHER_API_KEY", raising=False) + + from fastagency_studio.models.llms.together import TogetherAI, TogetherAIAPIKey + + assert TogetherAI is not None + assert TogetherAIAPIKey is not None + + +class TestTogetherAIAPIKey: + def test_constructor_success(self) -> None: + api_key = TogetherAIAPIKey( + api_key="*" * 64, # pragma: allowlist secret + name="Hello World!", + ) # pragma: allowlist secret + assert ( + api_key.api_key == "*" * 64 # pragma: allowlist secret + ) # pragma: allowlist secret + + def test_constructor_failure(self) -> None: + with pytest.raises( + ValueError, match="String should have at least 64 characters" + ): + TogetherAIAPIKey( + api_key="not a proper key", # pragma: allowlist secret + name="Hello World!", + ) # pragma: allowlist secret + + +class TestTogetherAI: + @pytest.mark.togetherai + def test_together_model_string(self) -> None: + # requires that environment variables TOGETHER_API_KEY is set + client = together.Together() + + expected_together_model_string: dict[str, str] = { + model.display_name: model.id + for model in client.models.list() + if model.type == "chat" + } + + # print(expected_together_model_string) + assert together_model_string == expected_together_model_string + + @pytest.mark.db + @pytest.mark.asyncio + async def test_togetherai_constructor( + self, + togetherai_ref: ObjectReference, + ) -> None: + # create data + model = await get_model_by_ref(togetherai_ref) + assert isinstance(model, TogetherAI) + + # dynamically created data + name = model.name + api_key_uuid = model.api_key.uuid # type: ignore [attr-defined] + + expected = { + "name": name, + "model": "Mixtral-8x7B Instruct v0.1", + "api_key": { + "type": "secret", + "name": "TogetherAIAPIKey", + "uuid": api_key_uuid, + }, + "base_url": "https://api.together.xyz/v1", + "api_type": "togetherai", + "temperature": 0.0, + } + assert model.model_dump() == expected + + def test_togetherai_schema(self, pydantic_version: float) -> None: + schema = TogetherAI.model_json_schema() + expected = { + "$defs": { + "TogetherAIAPIKeyRef": { + "properties": { + "type": { + "const": "secret", + "default": "secret", + "description": "The name of the type of the data", + "enum": ["secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "TogetherAIAPIKey", + "default": "TogetherAIAPIKey", + "description": "The name of the data", + "enum": ["TogetherAIAPIKey"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "TogetherAIAPIKeyRef", + "type": "object", + } + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "model": { + "default": "Meta Llama 3 70B Instruct Reference", + "description": "The model to use for the Together API", + "metadata": { + "tooltip_message": "Choose the model that the LLM uses to interact with Together AI services." + }, + "title": "Model", + "type": "string", + }, + "api_key": { + "$ref": "#/$defs/TogetherAIAPIKeyRef", + "description": "The API Key from Together.ai", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Together AI services." + }, + "title": "API Key", + }, + "base_url": { + "default": "https://api.together.xyz/v1", + "description": "The base URL of the OpenAI API", + "format": "uri", + "maxLength": 2083, + "minLength": 1, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Together AI services." + }, + "title": "Base URL", + "type": "string", + }, + "api_type": { + "const": "togetherai", + "default": "togetherai", + "description": "The type of the API, must be 'togetherai'", + "enum": ["togetherai"], + "title": "API Type", + "type": "string", + }, + "temperature": { + "default": 0.8, + "description": "The temperature to use for the model, must be between 0 and 2", + "maximum": 2.0, + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, + "minimum": 0.0, + "title": "Temperature", + "type": "number", + }, + }, + "required": ["name", "api_key"], + "title": "TogetherAI", + "type": "object", + } + assert ( + "Meta Llama 3 70B Instruct Reference" + in schema["properties"]["model"]["enum"] + ) + schema["properties"]["model"].pop("enum") + # print(schema) + pydantic28_delta = '{"properties": {"api_key": {"allOf": [{"$$ref": "#/$defs/TogetherAIAPIKeyRef"}], "$delete": ["$$ref"]}}}' + if pydantic_version < 2.9: + # print(f"pydantic28_delta = '{jsondiff.diff(expected, schema, dump=True)}'") + expected = jsondiff.patch(json.dumps(expected), pydantic28_delta, load=True) + assert schema == expected + + @pytest.mark.asyncio + @pytest.mark.db + async def test_togetherai_model_create_autogen( + self, + user_uuid: str, + togetherai_ref: ObjectReference, + ) -> None: + actual_llm_config = await TogetherAI.create_autogen( + model_id=togetherai_ref.uuid, + user_id=uuid.UUID(user_uuid), + ) + assert isinstance(actual_llm_config, dict) + api_key = actual_llm_config["config_list"][0]["api_key"] + expected = { + "config_list": [ + { + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "api_key": api_key, + "base_url": "https://api.together.xyz/v1", + "api_type": "togetherai", + } + ], + "temperature": 0.0, + } + + assert actual_llm_config == expected diff --git a/tests/models/secrets/__init__.py b/tests/models/secrets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/secrets/test_fly_token.py b/tests/models/secrets/test_fly_token.py new file mode 100644 index 00000000..fcdafe54 --- /dev/null +++ b/tests/models/secrets/test_fly_token.py @@ -0,0 +1,55 @@ +import uuid +from typing import Any + +import pytest +from fastapi import BackgroundTasks + +from fastagency_studio.app import add_model +from fastagency_studio.models.base import Model +from fastagency_studio.models.secrets.fly_token import FlyToken + + +class TestFlyToken: + def test_constructor_success(self) -> None: + fly_token = FlyToken( + fly_token="*" * 64, # pragma: allowlist secret + name="Hello World!", + ) # pragma: allowlist secret + assert ( + fly_token.fly_token == "*" * 64 # pragma: allowlist secret + ) # pragma: allowlist secret + + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.parametrize("fly_token_model", [(FlyToken)]) + async def test_github_token_model_create_autogen( + self, + fly_token_model: Model, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + user_uuid: str, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + dummy_github_token = "*" * 64 # pragma: allowlist secret + + # Add secret to database + fly_token = fly_token_model( # type: ignore [operator] + fly_token=dummy_github_token, + name="fly_token_model_name", + ) + fly_token_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="secret", + model_name=fly_token_model.__name__, # type: ignore [attr-defined] + model_uuid=fly_token_model_uuid, + model=fly_token.model_dump(), + background_tasks=BackgroundTasks(), + ) + + # Call create_autogen + actual_fly_token = await FlyToken.create_autogen( + model_id=uuid.UUID(fly_token_model_uuid), + user_id=uuid.UUID(user_uuid), + ) + assert isinstance(actual_fly_token, str) + assert actual_fly_token == fly_token.fly_token diff --git a/tests/models/secrets/test_github_token.py b/tests/models/secrets/test_github_token.py new file mode 100644 index 00000000..e46e6f73 --- /dev/null +++ b/tests/models/secrets/test_github_token.py @@ -0,0 +1,55 @@ +import uuid +from typing import Any + +import pytest +from fastapi import BackgroundTasks + +from fastagency_studio.app import add_model +from fastagency_studio.models.base import Model +from fastagency_studio.models.secrets.github_token import GitHubToken + + +class TestGitHubToken: + def test_constructor_success(self) -> None: + gh_token = GitHubToken( + gh_token="*" * 64, # pragma: allowlist secret + name="Hello World!", + ) # pragma: allowlist secret + assert ( + gh_token.gh_token == "*" * 64 # pragma: allowlist secret + ) # pragma: allowlist secret + + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.parametrize("gh_token_model", [(GitHubToken)]) + async def test_github_token_model_create_autogen( + self, + gh_token_model: Model, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + user_uuid: str, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + dummy_github_token = "*" * 64 # pragma: allowlist secret + + # Add secret to database + gh_token = gh_token_model( # type: ignore [operator] + gh_token=dummy_github_token, + name="gh_token_model_name", + ) + gh_token_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="secret", + model_name=gh_token_model.__name__, # type: ignore [attr-defined] + model_uuid=gh_token_model_uuid, + model=gh_token.model_dump(), + background_tasks=BackgroundTasks(), + ) + + # Call create_autogen + actual_gh_token = await GitHubToken.create_autogen( + model_id=uuid.UUID(gh_token_model_uuid), + user_id=uuid.UUID(user_uuid), + ) + assert isinstance(actual_gh_token, str) + assert actual_gh_token == gh_token.gh_token diff --git a/tests/models/teams/__init__.py b/tests/models/teams/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/teams/test_base.py b/tests/models/teams/test_base.py new file mode 100644 index 00000000..579295ca --- /dev/null +++ b/tests/models/teams/test_base.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.mark.skip(reason="Not implemented yet") +class TestRegisterToolboxFunctions: + pass diff --git a/tests/models/teams/test_multi_agents_team.py b/tests/models/teams/test_multi_agents_team.py new file mode 100644 index 00000000..1d24afda --- /dev/null +++ b/tests/models/teams/test_multi_agents_team.py @@ -0,0 +1,443 @@ +import uuid +from typing import Any + +import pytest +from pydantic import ValidationError + +from fastagency_studio.models.agents.assistant import AssistantAgent +from fastagency_studio.models.agents.web_surfer import WebSurferAgent +from fastagency_studio.models.base import Model +from fastagency_studio.models.llms.azure import AzureOAI, AzureOAIAPIKey +from fastagency_studio.models.llms.openai import OpenAI +from fastagency_studio.models.teams.multi_agent_team import MultiAgentTeam + + +@pytest.mark.skip(reason="Temporarily disabling multi agent team") +class TestMultiAgentTeam: + @pytest.mark.parametrize("llm_model", [OpenAI, AzureOAI]) + def test_multi_agent_constructor(self, llm_model: Model) -> None: + llm_uuid = uuid.uuid4() + llm = llm_model.get_reference_model()(uuid=llm_uuid) + + summarizer_llm_uuid = uuid.uuid4() + summarizer_llm = llm_model.get_reference_model()(uuid=summarizer_llm_uuid) + + assistant_1 = AssistantAgent( + llm=llm, name="Assistant", system_message="test system message" + ) + assistant_2 = AssistantAgent( + llm=llm, name="Assistant", system_message="test system message" + ) + web_surfer = WebSurferAgent( + name="WebSurfer", llm=llm, summarizer_llm=summarizer_llm + ) + + assistant_1_uuid = uuid.uuid4() + assistant_1_ref = assistant_1.get_reference_model()(uuid=assistant_1_uuid) + assistant_2_uuid = uuid.uuid4() + assistant_2_ref = assistant_2.get_reference_model()(uuid=assistant_2_uuid) + web_surfer_uuid = uuid.uuid4() + web_surfer_ref = web_surfer.get_reference_model()(uuid=web_surfer_uuid) + + try: + team = MultiAgentTeam( + name="MultiAgentTeam", + agent_1=assistant_1_ref, + agent_2=assistant_2_ref, + web_surfer_ref=web_surfer_ref, + ) + except ValidationError: + # print(f"{e.errors()=}") + raise + + assert team + + def test_multi_agent_model_schema(self) -> None: + schema = MultiAgentTeam.model_json_schema() + expected = { + "$defs": { + "AssistantAgentRef": { + "properties": { + "type": { + "const": "agent", + "default": "agent", + "description": "The name of the type of the data", + "enum": ["agent"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "AssistantAgent", + "default": "AssistantAgent", + "description": "The name of the data", + "enum": ["AssistantAgent"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AssistantAgentRef", + "type": "object", + }, + "UserProxyAgentRef": { + "properties": { + "type": { + "const": "agent", + "default": "agent", + "description": "The name of the type of the data", + "enum": ["agent"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "UserProxyAgent", + "default": "UserProxyAgent", + "description": "The name of the data", + "enum": ["UserProxyAgent"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "UserProxyAgentRef", + "type": "object", + }, + "WebSurferAgentRef": { + "properties": { + "type": { + "const": "agent", + "default": "agent", + "description": "The name of the type of the data", + "enum": ["agent"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "WebSurferAgent", + "default": "WebSurferAgent", + "description": "The name of the data", + "enum": ["WebSurferAgent"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "WebSurferAgentRef", + "type": "object", + }, + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "termination_message_regex": { + "default": "^TERMINATE$", + "description": "Whether the message is a termination message or not. If it is a termination message, the agent will not respond to it.", + "title": "Termination Message Regex", + "type": "string", + }, + "human_input_mode": { + "default": "ALWAYS", + "description": "Mode for human input", + "enum": ["ALWAYS", "TERMINATE", "NEVER"], + "title": "Human input mode", + "type": "string", + }, + "agent_1": { + "anyOf": [ + {"$ref": "#/$defs/AssistantAgentRef"}, + {"$ref": "#/$defs/UserProxyAgentRef"}, + {"$ref": "#/$defs/WebSurferAgentRef"}, + ], + "description": "An agent in the team", + "title": "Agents", + }, + "agent_2": { + "anyOf": [ + {"$ref": "#/$defs/AssistantAgentRef"}, + {"$ref": "#/$defs/UserProxyAgentRef"}, + {"$ref": "#/$defs/WebSurferAgentRef"}, + ], + "description": "An agent in the team", + "title": "Agents", + }, + "agent_3": { + "anyOf": [ + {"$ref": "#/$defs/AssistantAgentRef"}, + {"$ref": "#/$defs/UserProxyAgentRef"}, + {"$ref": "#/$defs/WebSurferAgentRef"}, + {"type": "null"}, + ], + "default": None, + "description": "An agent in the team", + "title": "Agents", + }, + "agent_4": { + "anyOf": [ + {"$ref": "#/$defs/AssistantAgentRef"}, + {"$ref": "#/$defs/UserProxyAgentRef"}, + {"$ref": "#/$defs/WebSurferAgentRef"}, + {"type": "null"}, + ], + "default": None, + "description": "An agent in the team", + "title": "Agents", + }, + "agent_5": { + "anyOf": [ + {"$ref": "#/$defs/AssistantAgentRef"}, + {"$ref": "#/$defs/UserProxyAgentRef"}, + {"$ref": "#/$defs/WebSurferAgentRef"}, + {"type": "null"}, + ], + "default": None, + "description": "An agent in the team", + "title": "Agents", + }, + }, + "required": ["name", "agent_1", "agent_2"], + "title": "MultiAgentTeam", + "type": "object", + } + # print(f"{schema=}") + assert schema == expected + + @pytest.mark.parametrize("llm_model", [OpenAI, AzureOAI]) + def test_multi_agent_model_validation(self, llm_model: Model) -> None: + llm_uuid = uuid.uuid4() + llm = llm_model.get_reference_model()(uuid=llm_uuid) + + summarizer_llm_uuid = uuid.uuid4() + summarizer_llm = llm_model.get_reference_model()(uuid=summarizer_llm_uuid) + + assistant_1 = AssistantAgent( + llm=llm, name="Assistant", system_message="test system message" + ) + assistant_2 = AssistantAgent( + llm=llm, name="Assistant", system_message="test system message" + ) + web_surfer = WebSurferAgent( + name="WebSurfer", llm=llm, summarizer_llm=summarizer_llm + ) + + assistant_1_uuid = uuid.uuid4() + assistant_1_ref = assistant_1.get_reference_model()(uuid=assistant_1_uuid) + assistant_2_uuid = uuid.uuid4() + assistant_2_ref = assistant_2.get_reference_model()(uuid=assistant_2_uuid) + web_surfer_uuid = uuid.uuid4() + web_surfer_ref = web_surfer.get_reference_model()(uuid=web_surfer_uuid) + + team = MultiAgentTeam( + name="MultiAgentTeam", + agent_1=assistant_1_ref, + agent_2=assistant_2_ref, + web_surfer_ref=web_surfer_ref, + ) + + team_json = team.model_dump_json() + assert team_json is not None + + validated_team = MultiAgentTeam.model_validate_json(team_json) + assert validated_team is not None + assert validated_team == team + + @pytest.mark.skip(reason="Temporarily disabling multi agent team") + @pytest.mark.asyncio + @pytest.mark.db + @pytest.mark.parametrize("enable_monkeypatch", [True, False]) + @pytest.mark.parametrize( + "llm_model,api_key_model", # noqa: PT006 + [ + (AzureOAI, AzureOAIAPIKey), + ], + ) + async def test_multi_agent_team_autogen( + self, + enable_monkeypatch: bool, + llm_model: Model, + api_key_model: Model, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + user_uuid: str, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + pass + # Add secret, llm, agent, team to database + # api_key = api_key_model( # type: ignore [operator] + # api_key = os.getenv("AZURE_OPENAI_API_KEY", default="*" * 64), + # name="api_key_model_name", + # ) + # api_key_model_uuid = str(uuid.uuid4()) + # await add_model( + # user_uuid=user_uuid, + # type_name="secret", + # model_name=api_key_model.__name__, # type: ignore [attr-defined] + # model_uuid=api_key_model_uuid, + # model=api_key.model_dump(), + # ) + + # llm = llm_model( # type: ignore [operator] + # name="llm_model_name", + # model=os.getenv(AZURE_GPT35_MODEL, default="gpt-35-turbo-16k"), + # api_key=api_key.get_reference_model()(uuid=api_key_model_uuid), + # base_url=os.getenv( + # "AZURE_API_ENDPOINT", default="https://my-deployment.openai.azure.com" + + # ), + # api_version=os.getenv("AZURE_API_VERSION", default="2024-02-01"), + # ) + # llm_model_uuid = str(uuid.uuid4()) + # await add_model( + # user_uuid=user_uuid, + # type_name="llm", + # model_name=llm_model.__name__, # type: ignore [attr-defined] + # model_uuid=llm_model_uuid, + # model=llm.model_dump(), + # ) + + # user_proxy_model = UserProxyAgent( + # name="UserProxyAgent", + # llm=llm.get_reference_model()(uuid=llm_model_uuid), + # ) + # user_proxy_model_uuid = str(uuid.uuid4()) + # await add_model( + # user_uuid=user_uuid, + # type_name="agent", + # model_name=UserProxyAgent.__name__, + # model_uuid=user_proxy_model_uuid, + # model=user_proxy_model.model_dump(), + # ) + + # weatherman_assistant_model_1 = AssistantAgent( + # llm=llm.get_reference_model()(uuid=llm_model_uuid), + # name="Assistant", + # system_message="test system message", + # ) + # weatherman_assistant_model_1_uuid = str(uuid.uuid4()) + # await add_model( + # user_uuid=user_uuid, + # type_name="agent", + # model_name=AssistantAgent.__name__, + # model_uuid=weatherman_assistant_model_1_uuid, + # model=weatherman_assistant_model_1.model_dump(), + # ) + + # team_model_uuid = str(uuid.uuid4()) + # agent_1 = user_proxy_model.get_reference_model()(uuid=user_proxy_model_uuid) + # agent_2 = weatherman_assistant_model_1.get_reference_model()( + # uuid=weatherman_assistant_model_1_uuid + # ) + + # team = MultiAgentTeam( + # name="MultiAgentTeam", + # agent_1=agent_1, + # agent_2=agent_2, + # ) + # await add_model( + # user_uuid=user_uuid, + # type_name="team", + # model_name=MultiAgentTeam.__name__, + # model_uuid=team_model_uuid, + # model=team.model_dump(), + # ) + + # # Then create autogen agents by monkeypatching create_autogen method + # user_proxy_agent = autogen.agentchat.UserProxyAgent( + # "user_proxy", + # code_execution_config=False, + # ) + + # weatherman_agent_1 = autogen.agentchat.AssistantAgent( + # name="weather_man_1", + # system_message="You are the weather man. Ask the user to give you the name of a city and then provide the weather forecast for that city.", + # llm_config=llm_config, + # code_execution_config=False, + # ) + + # get_forecast_for_city_mock = MagicMock() + + # # @user_proxy_agent.register_for_execution() # type: ignore [misc] + # # @weatherman_agent_1.register_for_llm( + # # description="Get weather forecast for a city" + # # ) # type: ignore [misc] + # def get_forecast_for_city(city: str) -> str: + # get_forecast_for_city_mock(city) + # return f"The weather in {city} is sunny today." + + # async def weatherman_create_autogen( # type: ignore [no-untyped-def] + # cls, model_id, user_id + # ) -> autogen.agentchat.AssistantAgent: + # f_info = FunctionInfo( + # function=get_forecast_for_city, + # description="Get weather forecast for a city", + # name="get_forecast_for_city", + # ) + # return weatherman_agent_1, [f_info] + + # async def user_proxy_create_autogen( # type: ignore [no-untyped-def] + # cls, model_id, user_id + # ) -> autogen.agentchat.UserProxyAgent: + # return user_proxy_agent, [] + + # if enable_monkeypatch: + # monkeypatch.setattr( + # AssistantAgent, "create_autogen", weatherman_create_autogen + # ) + + # monkeypatch.setattr( + # UserProxyAgent, "create_autogen", user_proxy_create_autogen + # ) + + # team = await MultiAgentTeam.create_autogen( + # model_id=uuid.UUID(team_model_uuid), user_id=uuid.UUID(user_uuid) + # ) + + # assert hasattr(team, "initiate_chat") + + # d = {"count": 0} + + # def input(prompt: str, d: Dict[str, int] = d) -> str: + # d["count"] += 1 + # if d["count"] == 1: + # return f"[{datetime.now()}] What's the weather in New York today?" + # elif d["count"] == 2: + # return "" + # else: + # return "exit" + + # monkeypatch.setattr(IOConsole, "input", lambda self, prompt: input(prompt)) + + # chat_result = team.initiate_chat( + # message="Hi! Tell me the city for which you want the weather forecast.", + # ) + + # last_message = chat_result.chat_history[-1] + + # if enable_monkeypatch: + # get_forecast_for_city_mock.assert_called_once_with("New York") + # # get_forecast_for_city_mock.assert_not_called() + # assert "sunny" in last_message["content"] + # else: + # # assert "sunny" not in last_message["content"] + # # assert "weather" in last_message["content"] + # assert isinstance(last_message["content"], str) diff --git a/tests/models/teams/test_two_agents_team.py b/tests/models/teams/test_two_agents_team.py new file mode 100644 index 00000000..54ea99e6 --- /dev/null +++ b/tests/models/teams/test_two_agents_team.py @@ -0,0 +1,251 @@ +import uuid + +import pytest +from pydantic import ValidationError + +from fastagency_studio.helpers import create_autogen +from fastagency_studio.models.agents.assistant import AssistantAgent +from fastagency_studio.models.agents.web_surfer import WebSurferAgent +from fastagency_studio.models.base import Model, ObjectReference +from fastagency_studio.models.llms.azure import AzureOAI +from fastagency_studio.models.llms.openai import OpenAI +from fastagency_studio.models.teams.two_agent_teams import TwoAgentTeam +from tests.helpers import get_by_tag, parametrize_fixtures + + +class TestTwoAgentTeam: + @pytest.mark.parametrize("llm_model", [OpenAI, AzureOAI]) + def test_two_agent_constructor(self, llm_model: Model) -> None: + llm_uuid = uuid.uuid4() + llm = llm_model.get_reference_model()(uuid=llm_uuid) + + summarizer_llm_uuid = uuid.uuid4() + summarizer_llm = llm_model.get_reference_model()(uuid=summarizer_llm_uuid) + + assistant = AssistantAgent( + llm=llm, name="Assistant", system_message="test system message" + ) + web_surfer = WebSurferAgent( + name="WebSurfer", llm=llm, summarizer_llm=summarizer_llm + ) + + assistant_uuid = uuid.uuid4() + assistant_ref = assistant.get_reference_model()(uuid=assistant_uuid) + web_surfer_uuid = uuid.uuid4() + web_surfer_ref = web_surfer.get_reference_model()(uuid=web_surfer_uuid) + + try: + team = TwoAgentTeam( + name="TwoAgentTeam", + initial_agent=assistant_ref, + secondary_agent=web_surfer_ref, + ) + except ValidationError: + # print(f"{e.errors()=}") + raise + + assert team + + def test_two_agents_team_schema(self) -> None: + schema = TwoAgentTeam.model_json_schema() + expected = { + "$defs": { + "AssistantAgentRef": { + "properties": { + "type": { + "const": "agent", + "default": "agent", + "description": "The name of the type of the data", + "enum": ["agent"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "AssistantAgent", + "default": "AssistantAgent", + "description": "The name of the data", + "enum": ["AssistantAgent"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "AssistantAgentRef", + "type": "object", + }, + "UserProxyAgentRef": { + "properties": { + "type": { + "const": "agent", + "default": "agent", + "description": "The name of the type of the data", + "enum": ["agent"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "UserProxyAgent", + "default": "UserProxyAgent", + "description": "The name of the data", + "enum": ["UserProxyAgent"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "UserProxyAgentRef", + "type": "object", + }, + "WebSurferAgentRef": { + "properties": { + "type": { + "const": "agent", + "default": "agent", + "description": "The name of the type of the data", + "enum": ["agent"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "WebSurferAgent", + "default": "WebSurferAgent", + "description": "The name of the data", + "enum": ["WebSurferAgent"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "WebSurferAgentRef", + "type": "object", + }, + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "is_termination_msg_regex": { + "default": "TERMINATE", + "description": "Whether the message is a termination message or not. If it is a termination message, the chat will terminate.", + "metadata": { + "tooltip_message": "The termination message regular expression format. The LLM uses this pattern to decide when to end the chat if the message matches." + }, + "title": "Is Termination Msg Regex", + "type": "string", + }, + "human_input_mode": { + "default": "ALWAYS", + "description": "Mode for human input", + "enum": ["ALWAYS", "TERMINATE", "NEVER"], + "metadata": { + "tooltip_message": "Select the human input mode to control the level of human involvement. Modes include NEVER (full autonomy), TERMINATE (human input requested upon termination), and ALWAYS (input required after every message)." + }, + "title": "Human Input Mode", + "type": "string", + }, + "initial_agent": { + "anyOf": [ + {"$ref": "#/$defs/AssistantAgentRef"}, + {"$ref": "#/$defs/UserProxyAgentRef"}, + {"$ref": "#/$defs/WebSurferAgentRef"}, + ], + "description": "Agent that starts the conversation", + "metadata": { + "tooltip_message": "Select the Initial Agent, the agent responsible for task orchestration. It interacts with users and assigns tasks to Secondary Agent, enhancing the efficiency of complex operations." + }, + "title": "Initial Agent", + }, + "secondary_agent": { + "anyOf": [ + {"$ref": "#/$defs/AssistantAgentRef"}, + {"$ref": "#/$defs/UserProxyAgentRef"}, + {"$ref": "#/$defs/WebSurferAgentRef"}, + ], + "description": "Agent that continues the conversation", + "metadata": { + "tooltip_message": "Select the Secondary Agent, the agent responsible for collaborating with the Initial Agent in performing specialized tasks. Secondary Agents enhance efficiency by focusing on specific roles, such as data analysis or code execution." + }, + "title": "Secondary Agent", + }, + }, + "required": ["name", "initial_agent", "secondary_agent"], + "title": "TwoAgentTeam", + "type": "object", + } + # print(schema) + assert schema == expected + + +@pytest.mark.db +@pytest.mark.llm +class TestTwoAgentTeamSimpleChat: + @pytest.mark.asyncio + @parametrize_fixtures("team_ref", get_by_tag("team", "noapi")) + async def test_simple_chat( + self, + user_uuid: str, + team_ref: ObjectReference, + ) -> None: + # print(f"test_simple_chat: {team_ref=}") + + ag_team = await create_autogen( + model_ref=team_ref, + user_uuid=user_uuid, + ) + + assert ag_team + history = ag_team.initiate_chat("What is 2 + 2?") + messages = (msg["content"] for msg in history.chat_history) + assert sum("TERMINATE" in msg for msg in messages) == 1 + + @pytest.mark.asyncio + @parametrize_fixtures("team_ref", get_by_tag("team", "weather")) + async def test_chat_with_weatherapi( + self, + user_uuid: str, + team_ref: ObjectReference, + ) -> None: + # print(f"test_simple_chat: {team_ref=}") + + ag_team = await create_autogen( + model_ref=team_ref, + user_uuid=user_uuid, + ) + + assert ag_team + history = ag_team.initiate_chat("What is the weather in New York?") + # print(f"history: {history=}") + assert any( + "sunny" in msg["content"] + for msg in history.chat_history + if "content" in msg and msg["content"] is not None + ) + assert ( + sum( + "TERMINATE" in msg["content"] + for msg in history.chat_history + if "content" in msg and msg["content"] is not None + ) + == 1 + ) diff --git a/tests/models/test_base.py b/tests/models/test_base.py new file mode 100644 index 00000000..c4c3c7b8 --- /dev/null +++ b/tests/models/test_base.py @@ -0,0 +1,77 @@ +import uuid + +import pytest +from pydantic import BaseModel + +from fastagency_studio.models.base import ( + Model, + create_reference_model, + get_reference_model, +) + + +def test_create_reference_model() -> None: + class MyModel(Model): + i: int + s: str + + MyModelRef = create_reference_model(MyModel, type_name="my_type") # noqa: N806 + + assert hasattr(MyModelRef, "get_data_model") + data_model = MyModelRef.get_data_model() + assert data_model == MyModel + + schema = MyModelRef.model_json_schema() + expected = { + "properties": { + "type": { + "const": "my_type", + "default": "my_type", + "description": "The name of the type of the data", + "enum": ["my_type"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "MyModel", + "default": "MyModel", + "description": "The name of the data", + "enum": ["MyModel"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "MyModelRef", + "type": "object", + } + assert schema == expected + + my_uuid = uuid.uuid4() + o = MyModelRef.create(uuid=my_uuid) + dump = o.model_dump() + assert dump == {"type": "my_type", "name": "MyModel", "uuid": my_uuid} + + loaded = MyModelRef(**dump) + assert loaded == o + + +def test_get_reference_model() -> None: + class MyModel(Model): + i: int + s: str + + MyModelRef = create_reference_model(MyModel, type_name="my_type") # noqa: N806 + + assert get_reference_model(MyModel) == MyModelRef + assert get_reference_model(MyModelRef) == MyModelRef + with pytest.raises( + ValueError, match="Class 'BaseModel' is not and does not have a reference" + ): + get_reference_model(BaseModel) diff --git a/tests/models/test_registry.py b/tests/models/test_registry.py new file mode 100644 index 00000000..df25552a --- /dev/null +++ b/tests/models/test_registry.py @@ -0,0 +1,230 @@ +import pytest + +from fastagency_studio.models.base import Model +from fastagency_studio.models.registry import ModelSchema, Registry + + +class TestRegistry: + def test_create_reference_success(self) -> None: + registry = Registry() + + MySecretRef = registry.create_reference( # noqa: N806 + type_name="my_secret", model_name="MySecret" + ) + + assert hasattr(MySecretRef, "get_data_model") + with pytest.raises(RuntimeError, match="data class not set"): + MySecretRef.get_data_model() + assert registry._store["my_secret"]["MySecret"] == (None, MySecretRef) + + def test_create_reference_fail(self) -> None: + registry = Registry() + + @registry.register("my_secret") + class MySecret(Model): + key: str + + with pytest.raises(ValueError, match="Reference already created for the model"): + registry.create_reference(type_name="my_secret", model_name="MySecret") + + def test_register_simple_success(self) -> None: + registry = Registry() + + @registry.register("my_type") + class MyModel(Model): + i: int + s: str + + MyModelRef = MyModel.get_reference_model() # noqa: N806 + assert registry._store["my_type"]["MyModel"] == (MyModel, MyModelRef) + + def test_register_complex_with_ref_success(self) -> None: + registry = Registry() + + MySecretRef = registry.create_reference( # noqa: N806 + type_name="my_secret", model_name="MySecret" + ) + + @registry.register("my_type") + class MyModel(Model): + i: int + s: str + secret: MySecretRef # type: ignore[valid-type] + + MyModelRef = MyModel.get_reference_model() # noqa: N806 + assert registry._store["my_type"]["MyModel"] == (MyModel, MyModelRef) + + def test_register_complex_with_nested_model_success(self) -> None: + registry = Registry() + + @registry.register("my_secret") + class MySecret(Model): + key: str + + MySecretRef = MySecret.get_reference_model() # noqa: N806 + + @registry.register("my_type") + class MyModel(Model): + i: int + s: str + secret: MySecretRef # type: ignore[valid-type] + + MyModelRef = MyModel.get_reference_model() # noqa: N806 + assert registry._store["my_type"]["MyModel"] == (MyModel, MyModelRef) + + def test_get_default(self) -> None: + registry = Registry.get_default() + assert isinstance(registry, Registry) + assert Registry.get_default() == registry + + def test_get_dongling_references(self) -> None: + registry = Registry() + + assert registry.get_dongling_references() == [] + + MySecretRef = registry.create_reference( # noqa: N806 + type_name="my_secret", model_name="MySecret" + ) + assert registry.get_dongling_references() == [MySecretRef] + + @registry.register("my_secret") + class MySecret(Model): + key: str + + assert registry.get_dongling_references() == [] + + def test_get_model_schema_simple(self) -> None: + registry = Registry() + + @registry.register("my_type") + class MyModel(Model): + i: int + s: str + + schema = registry.get_model_schema(MyModel) # type: ignore[type-abstract] + expected = ModelSchema( + name="MyModel", + json_schema={ + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "i": {"title": "I", "type": "integer"}, + "s": {"title": "S", "type": "string"}, + }, + "required": ["name", "i", "s"], + "title": "MyModel", + "type": "object", + }, + ) + assert schema == expected + + def test_get_model_schema_nested(self) -> None: + registry = Registry() + + @registry.register("my_secret") + class MySecret(Model): + key: str + + MySecretRef = MySecret.get_reference_model() # noqa: N806 + + @registry.register("my_type") + class MyModel(Model): + i: int + s: str + secret: MySecretRef # type: ignore[valid-type] + + schema = registry.get_model_schema(MyModel) # type: ignore[type-abstract] + expected = ModelSchema( + name="MyModel", + json_schema={ + "$defs": { + "MySecretRef": { + "properties": { + "type": { + "const": "my_secret", + "default": "my_secret", + "description": "The name of the type of the data", + "enum": ["my_secret"], + "title": "Type", + "type": "string", + }, + "name": { + "const": "MySecret", + "default": "MySecret", + "description": "The name of the data", + "enum": ["MySecret"], + "title": "Name", + "type": "string", + }, + "uuid": { + "description": "The unique identifier", + "format": "uuid", + "title": "UUID", + "type": "string", + }, + }, + "required": ["uuid"], + "title": "MySecretRef", + "type": "object", + } + }, + "properties": { + "name": { + "description": "The name of the item", + "minLength": 1, + "title": "Name", + "type": "string", + }, + "i": {"title": "I", "type": "integer"}, + "s": {"title": "S", "type": "string"}, + "secret": {"$ref": "#/$defs/MySecretRef"}, + }, + "required": ["name", "i", "s", "secret"], + "title": "MyModel", + "type": "object", + }, + ) + assert schema == expected + + def test_get_model_schemas_simple(self) -> None: + registry = Registry() + + @registry.register("my_type") + class MyModel(Model): + i: int + s: str + + schemas = registry.get_model_schemas("my_type") + assert len(schemas.schemas) == 1 + assert schemas.schemas[0].name == "MyModel" + + def test_get_schemas_simple(self) -> None: + registry = Registry() + + @registry.register("my_type") + class MyModel(Model): + i: int + s: str + + schemas = registry.get_schemas() + assert len(schemas.list_of_schemas) == 1 + assert len(schemas.list_of_schemas[0].schemas) == 1 + assert schemas.list_of_schemas[0].schemas[0].name == "MyModel" + + def test_get_models_refs_by_type(self) -> None: + registry = Registry() + + @registry.register("my_secret") + class MySecretOne(Model): + key: str + + MySecretTwoRef = registry.create_reference( # noqa: N806 + type_name="my_secret", model_name="MySecretTwo" + ) + + refs = registry.get_models_refs_by_type("my_secret") + assert set(refs) == {MySecretOne.get_reference_model(), MySecretTwoRef} diff --git a/tests/models/toolboxes/__init__.py b/tests/models/toolboxes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/toolboxes/test_toolbox.py b/tests/models/toolboxes/test_toolbox.py new file mode 100644 index 00000000..5ef00966 --- /dev/null +++ b/tests/models/toolboxes/test_toolbox.py @@ -0,0 +1,81 @@ +from typing import Optional + +import pytest +from pydantic import BaseModel + +from fastagency_studio.helpers import create_autogen, get_model_by_ref +from fastagency_studio.models.base import ObjectReference +from fastagency_studio.models.toolboxes.toolbox import ( + OpenAPI, + Toolbox, +) + + +@pytest.mark.skip("Functionality is not implemented yet") +class TestOpenAPIAuth: + pass + + +class TestToolbox: + @pytest.mark.db + @pytest.mark.asyncio + async def test_toolbox_constructor( + self, toolbox_ref: ObjectReference, fastapi_openapi_url: str, user_uuid: str + ) -> None: + toolbox: Toolbox = await get_model_by_ref( # type: ignore[assignment] + model_ref=toolbox_ref + ) + assert toolbox + assert isinstance(toolbox, Toolbox) + assert toolbox.name == "test_toolbox" + assert str(toolbox.openapi_url) == fastapi_openapi_url + + @pytest.mark.db + @pytest.mark.asyncio + async def test_toolbox_create_autogen( + self, + toolbox_ref: ObjectReference, + user_uuid: str, + ) -> None: + api: OpenAPI = await create_autogen( + model_ref=toolbox_ref, + user_uuid=user_uuid, + ) + assert api + assert isinstance(api, OpenAPI) + + assert len(api.registered_funcs) == 3 + + expected = { + "create_item_items_post": "Create Item", + "read_item_items__item_id__get": "Read Item", + "read_root__get": "Read Root", + } + + actual = { + x.__name__: x._description # type: ignore[attr-defined] + for x in api.registered_funcs + } + + assert actual == expected, actual + + # actual = function_infos[0].function() + actual = api.registered_funcs[0]() + expected = {"Hello": "World"} + assert actual == expected, actual + + # actual = function_infos[2].function(item_id=1, q="test") + actual = api.registered_funcs[2](item_id=1, q="test") + expected = {"item_id": 1, "q": "test"} # type: ignore[dict-item] + assert actual == expected, actual + + class Item(BaseModel): + name: str + description: Optional[str] = None + price: float + tax: Optional[float] = None + + # actual = function_infos[1].function(body=Item(name="item", price=1.0)) + actual = api.registered_funcs[1](body=Item(name="item", price=1.0)) + expected = {"name": "item", "description": None, "price": 1.0, "tax": None} # type: ignore[dict-item] + assert actual == expected, actual diff --git a/tests/test_conftest.py b/tests/test_conftest.py new file mode 100644 index 00000000..44421a7d --- /dev/null +++ b/tests/test_conftest.py @@ -0,0 +1,112 @@ +from typing import Any + +import httpx +import pytest + +from fastagency_studio.helpers import get_model_by_ref +from fastagency_studio.models.base import ObjectReference + +from .conftest import find_free_port + + +def test_azure_gpt35_turbo_16k_llm_config( + azure_gpt35_turbo_16k_llm_config: dict[str, Any], +) -> None: + assert set(azure_gpt35_turbo_16k_llm_config.keys()) == { + "config_list", + "temperature", + } + assert isinstance(azure_gpt35_turbo_16k_llm_config["config_list"], list) + assert azure_gpt35_turbo_16k_llm_config["temperature"] == 0.0 + + assert ( + azure_gpt35_turbo_16k_llm_config["config_list"][0]["model"] + == "gpt-35-turbo-16k" + ) + + for k in ["model", "api_key", "base_url", "api_type", "api_version"]: + assert len(azure_gpt35_turbo_16k_llm_config["config_list"][0][k]) > 3 + + +def test_openai_gpt35_turbo_16k_llm_config( + openai_gpt35_turbo_16k_llm_config: dict[str, Any], +) -> None: + api_key = openai_gpt35_turbo_16k_llm_config["config_list"][0]["api_key"] + expected = { + "config_list": [ + { + "model": "gpt-3.5-turbo", + "api_key": api_key, # pragma: allowlist secret + } + ], + "temperature": 0.0, + } + assert openai_gpt35_turbo_16k_llm_config == expected + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_azure_oai_key_ref(azure_oai_key_ref: ObjectReference) -> None: + assert isinstance(azure_oai_key_ref, ObjectReference) + assert azure_oai_key_ref.type == "secret" + assert azure_oai_key_ref.name == "AzureOAIAPIKey" + + azure_oai_key = await get_model_by_ref(azure_oai_key_ref) + assert azure_oai_key.name.startswith("azure_oai_key_") + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_azure_oai_gpt35_ref(azure_oai_gpt35_ref: ObjectReference) -> None: + assert isinstance(azure_oai_gpt35_ref, ObjectReference) + assert azure_oai_gpt35_ref.type == "llm" + assert azure_oai_gpt35_ref.name == "AzureOAI" + + azure_oai_key = await get_model_by_ref(azure_oai_gpt35_ref) + assert azure_oai_key.name.startswith("azure_oai_") + + +def test_find_free_port() -> None: + port = find_free_port() + assert isinstance(port, int) + assert 1024 <= port <= 65535 + + +def test_fastapi_openapi(fastapi_openapi_url: str) -> None: + assert isinstance(fastapi_openapi_url, str) + + resp = httpx.get(fastapi_openapi_url) + assert resp.status_code == 200 + resp_json = resp.json() + assert "openapi" in resp_json + assert "servers" in resp_json + assert len(resp_json["servers"]) == 1 + assert resp_json["info"]["title"] == "FastAPI" + + +def test_weather_fastapi_openapi(weather_fastapi_openapi_url: str) -> None: + assert isinstance(weather_fastapi_openapi_url, str) + + resp = httpx.get(weather_fastapi_openapi_url) + assert resp.status_code == 200 + resp_json = resp.json() + assert "openapi" in resp_json + assert "servers" in resp_json + assert len(resp_json["servers"]) == 1 + assert resp_json["info"]["title"] == "Weather" + + +@pytest.mark.db +@pytest.mark.asyncio +async def test_weather_toolbox_ref(weather_toolbox_ref: ObjectReference) -> None: + assert isinstance(weather_toolbox_ref, ObjectReference) + + +@pytest.mark.anthropic +def test_empty_anthropic() -> None: + pass + + +@pytest.mark.openai +def test_empty_openai() -> None: + pass diff --git a/tests/test_nats.py b/tests/test_nats.py new file mode 100644 index 00000000..653d40b3 --- /dev/null +++ b/tests/test_nats.py @@ -0,0 +1,481 @@ +import asyncio +import json +import os +import uuid +from datetime import datetime +from typing import Any, Callable +from unittest.mock import MagicMock + +# from autogen.agentchat import AssistantAgent, UserProxyAgent +import autogen +import pytest +from autogen.io.console import IOConsole +from fastapi import BackgroundTasks +from faststream.nats import TestNatsBroker +from pydantic import BaseModel + +import fastagency_studio.io.ionats +from fastagency_studio.app import add_model +from fastagency_studio.io.ionats import ( # type: ignore [attr-defined] + InputResponseModel, + ServerResponseModel, + broker, + stream, +) +from fastagency_studio.models.agents.assistant import AssistantAgent +from fastagency_studio.models.agents.user_proxy import UserProxyAgent +from fastagency_studio.models.base import Model +from fastagency_studio.models.llms.azure import AzureOAI, AzureOAIAPIKey +from fastagency_studio.models.teams.two_agent_teams import TwoAgentTeam + + +def as_dict(model: BaseModel) -> dict[str, Any]: + return json.loads(model.model_dump_json()) # type: ignore [no-any-return] + + +class TestAutogen: + @pytest.mark.azure_oai + def test_ioconsole( + self, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + monkeypatch: pytest.MonkeyPatch, + ) -> None: + d = {"count": 0} + + def input(prompt: str, d: dict[str, int] = d) -> str: + d["count"] += 1 + if d["count"] == 1: + return f"[{datetime.now()}] What's the weather in New York today?" + elif d["count"] == 2: + return "" + else: + return "exit" + + monkeypatch.setattr(IOConsole, "input", lambda self, prompt: input(prompt)) + + # print(f"{llm_config=}") + + weather_man = autogen.agentchat.AssistantAgent( + name="weather_man", + system_message="You are the weather man. Ask the user to give you the name of a city and then provide the weather forecast for that city.", + llm_config=azure_gpt35_turbo_16k_llm_config, + code_execution_config=False, + ) + + user_proxy = autogen.agentchat.UserProxyAgent( + "user_proxy", + code_execution_config=False, + ) + + get_forecast_for_city_mock = MagicMock() + + @user_proxy.register_for_execution() # type: ignore [misc] + @weather_man.register_for_llm(description="Get weather forecast for a city") # type: ignore [misc] + def get_forecast_for_city(city: str) -> str: + get_forecast_for_city_mock(city) + return f"The weather in {city} is sunny today." + + chat_result = weather_man.initiate_chat( + recipient=user_proxy, + message="Hi! Tell me the city for which you want the weather forecast.", + ) + + # print(f"{chat_result=}") + + last_message = chat_result.chat_history[-1] + # print(f"{last_message=}") + + get_forecast_for_city_mock.assert_called_once_with("New York") + assert "sunny" in last_message["content"] + + @pytest.mark.azure_oai + @pytest.mark.nats + @pytest.mark.asyncio + async def test_ionats_success( # noqa: C901 + self, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + monkeypatch: pytest.MonkeyPatch, + ) -> None: + user_id = uuid.uuid4() + thread_id = uuid.uuid4() + team_id = uuid.uuid4() + + azure_gpt35_turbo_16k_llm_config["temperature"] = 0.0 + + ### begin sending inputs to server + + d = {"count": 0} + + def input(prompt: str, d: dict[str, int] = d) -> str: + d["count"] += 1 + if d["count"] == 1: + return f"[{datetime.now()}] What's the weather in New York today?" + elif d["count"] == 2: + return "" + else: + return "exit" + + actual = [] + terminate_chat_queue: asyncio.Queue = asyncio.Queue(maxsize=1) # type: ignore [type-arg] + + @broker.subscriber( + f"chat.client.messages.{user_id}.playground.{thread_id}", stream=stream + ) + async def client_input_handler(msg: ServerResponseModel) -> None: + if msg.type == "input": + response = InputResponseModel(msg=input(msg.data.prompt)) # type: ignore [union-attr] + + await broker.publish( + response, + subject=f"chat.server.messages.{user_id}.playground.{thread_id}", + ) + elif msg.type == "print": + actual.append(msg.data.model_dump()) + elif msg.type == "terminate": + await terminate_chat_queue.put(msg.data.model_dump()) + else: + raise ValueError(f"Unknown message type {msg.type}") + + ### end sending inputs to server + + get_forecast_for_city_mock = MagicMock() + + async def create_team( + team_id: uuid.UUID, user_id: uuid.UUID + ) -> Callable[[str], list[dict[str, Any]]]: + weather_man = autogen.agentchat.AssistantAgent( + name="weather_man", + system_message="You are the weather man. Ask the user to give you the name of a city and then provide the weather forecast for that city.", + llm_config=azure_gpt35_turbo_16k_llm_config, + code_execution_config=False, + ) + + user_proxy = autogen.agentchat.UserProxyAgent( + "user_proxy", + code_execution_config=False, + ) + + @user_proxy.register_for_execution() # type: ignore [misc] + @weather_man.register_for_llm(description="Get weather forecast for a city") # type: ignore [misc] + def get_forecast_for_city(city: str) -> str: + get_forecast_for_city_mock(city) + return f"The weather in {city} is sunny today." + + def initiate_chat(msg: str) -> list[dict[str, Any]]: + chat_result: list[dict[str, Any]] = weather_man.initiate_chat( + recipient=user_proxy, + message="Hi! Tell me the city for which you want the weather forecast.", + ) + return chat_result + + return initiate_chat + + monkeypatch.setattr(fastagency_studio.io.ionats, "create_team", create_team) + + async with TestNatsBroker(broker) as br: + await br.publish( + fastagency_studio.io.ionats.InitiateModel( + msg="exit", + thread_id=thread_id, + team_id=team_id, + user_id=user_id, + ), + subject="chat.server.initiate_chat", + ) + + expected = [ + {"msg": "(to user_proxy):\n\n"}, + { + "msg": "Hi! Tell me the city for which you want the weather forecast.\n" + }, + { + "msg": "\n--------------------------------------------------------------------------------\n" + }, + {"msg": "(to weather_man):\n\n"}, + {"msg": "What's the weather in New York today?\n"}, + { + "msg": "\n--------------------------------------------------------------------------------\n" + }, + {"msg": "(to user_proxy):\n\n"}, + {"msg": " Suggested tool call (call_"}, + {"msg": 'Arguments: \n{\n "city": "New York"\n}\n'}, + { + "msg": "*********************************************************************************" + }, + { + "msg": "\n--------------------------------------------------------------------------------\n" + }, + {"msg": ">>>>>>>> NO HUMAN INPUT RECEIVED."}, + {"msg": ">>>>>>>> USING AUTO REPLY..."}, + {"msg": ">>>>>>>> EXECUTING FUNCTION get_forecast_for_city..."}, + {"msg": "(to weather_man):\n\n"}, + {"msg": "(to weather_man):\n\n"}, + {"msg": " Response from calling tool (call_"}, + {"msg": "The weather in New York is sunny today.\n"}, + { + "msg": "*****************************************************************" + }, + { + "msg": "\n--------------------------------------------------------------------------------\n" + }, + {"msg": "(to user_proxy):\n\n"}, + {"msg": "The weather in New York today is sunny.\n"}, + { + "msg": "\n--------------------------------------------------------------------------------\n" + }, + ] + + await asyncio.sleep(10) + + assert len(actual) == len(expected) + for i in range(len(expected)): + assert ( + expected[i]["msg"] in actual[i]["msg"] + ), f"{actual[i]} != {expected[i]}" + + result_set, _ = await asyncio.wait( + (asyncio.create_task(terminate_chat_queue.get()),), + timeout=30, + ) + result = (result_set.pop()).result() + assert result == {"msg": "Chat completed."} + + @pytest.mark.azure_oai + @pytest.mark.nats + @pytest.mark.asyncio + async def test_ionats_error_msg( + self, + azure_gpt35_turbo_16k_llm_config: dict[str, Any], + monkeypatch: pytest.MonkeyPatch, + ) -> None: + user_id = uuid.uuid4() + thread_id = uuid.uuid4() + team_id = uuid.uuid4() + + ### begin sending inputs to server + + d = {"count": 0} + + def input(prompt: str, d: dict[str, int] = d) -> str: + d["count"] += 1 + if d["count"] == 1: + return f"[{datetime.now()}] What's the weather in New York today?" + elif d["count"] == 2: + return "" + else: + return "exit" + + actual = [] + terminate_chat_queue: asyncio.Queue = asyncio.Queue(maxsize=1) # type: ignore [type-arg] + error_queue: asyncio.Queue = asyncio.Queue(maxsize=1) # type: ignore [type-arg] + + @broker.subscriber( + f"chat.client.messages.{user_id}.playground.{thread_id}", stream=stream + ) + async def client_input_handler(msg: ServerResponseModel) -> None: + if msg.type == "input": + response = InputResponseModel(msg=input(msg.data.prompt)) # type: ignore [union-attr] + + await broker.publish( + response, + subject=f"chat.server.messages.{user_id}.playground.{thread_id}", + ) + elif msg.type == "print": + actual.append(msg.data.model_dump()) + elif msg.type == "terminate": + await terminate_chat_queue.put(msg.data.model_dump()) + elif msg.type == "error": + await error_queue.put(msg.data.model_dump()) + else: + raise ValueError(f"Unknown message type {msg.type}") + + ### end sending inputs to server + + async def create_team( + team_id: uuid.UUID, user_id: uuid.UUID + ) -> Callable[[str], list[dict[str, Any]]]: + raise ValueError("Triggering error in test") + + monkeypatch.setattr(fastagency_studio.io.ionats, "create_team", create_team) + + async with TestNatsBroker(broker) as br: + await br.publish( + fastagency_studio.io.ionats.InitiateModel( + msg="exit", + thread_id=thread_id, + team_id=team_id, + user_id=user_id, + ), + subject="chat.server.initiate_chat", + ) + + # await asyncio.sleep(10) + + result_set, _ = await asyncio.wait( + (asyncio.create_task(error_queue.get()),), + timeout=10, + ) + result = (result_set.pop()).result() + assert result == {"msg": "Triggering error in test"} + + @pytest.mark.azure_oai + @pytest.mark.nats + @pytest.mark.db + @pytest.mark.asyncio + @pytest.mark.parametrize( + "llm_model,api_key_model", # noqa: PT006 + [ + (AzureOAI, AzureOAIAPIKey), + ], + ) + async def test_ionats_e2e( + self, + user_uuid: str, + llm_model: Model, + api_key_model: Model, + # llm_config: Dict[str, Any], + # monkeypatch: pytest.MonkeyPatch, + ) -> None: + thread_id = uuid.uuid4() + + # Add secret, llm, agent, team to database + api_key_model_uuid = str(uuid.uuid4()) + api_key = api_key_model( # type: ignore [operator] + api_key=os.getenv("AZURE_OPENAI_API_KEY", default="*" * 64), + name=f"api_key_model_name_{api_key_model_uuid}", + ) + await add_model( + user_uuid=user_uuid, + type_name="secret", + model_name=api_key_model.__name__, # type: ignore [attr-defined] + model_uuid=api_key_model_uuid, + model=api_key.model_dump(), + background_tasks=BackgroundTasks(), + ) + + llm = llm_model( # type: ignore [operator] + name="llm_model_name", + model=os.getenv("AZURE_GPT35_MODEL", default="gpt-35-turbo-16k"), + api_key=api_key.get_reference_model()(uuid=api_key_model_uuid), + base_url=os.getenv( + "AZURE_API_ENDPOINT", default="https://my-deployment.openai.azure.com" + ), + api_version=os.getenv("AZURE_API_VERSION", default="2024-02-01"), + ) + llm_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="llm", + model_name=llm_model.__name__, # type: ignore [attr-defined] + model_uuid=llm_model_uuid, + model=llm.model_dump(), + background_tasks=BackgroundTasks(), + ) + + weatherman_assistant_model = AssistantAgent( + llm=llm.get_reference_model()(uuid=llm_model_uuid), + name="Assistant", + system_message="test system message", + ) + weatherman_assistant_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="agent", + model_name=AssistantAgent.__name__, + model_uuid=weatherman_assistant_model_uuid, + model=weatherman_assistant_model.model_dump(), + background_tasks=BackgroundTasks(), + ) + + user_proxy_model = UserProxyAgent( + name="UserProxyAgent", + llm=llm.get_reference_model()(uuid=llm_model_uuid), + ) + user_proxy_model_uuid = str(uuid.uuid4()) + await add_model( + user_uuid=user_uuid, + type_name="agent", + model_name=UserProxyAgent.__name__, + model_uuid=user_proxy_model_uuid, + model=user_proxy_model.model_dump(), + background_tasks=BackgroundTasks(), + ) + + team_model_uuid = str(uuid.uuid4()) + initial_agent = weatherman_assistant_model.get_reference_model()( + uuid=weatherman_assistant_model_uuid + ) + secondary_agent = user_proxy_model.get_reference_model()( + uuid=user_proxy_model_uuid + ) + team = TwoAgentTeam( + name="TwoAgentTeam", + initial_agent=initial_agent, + secondary_agent=secondary_agent, + ) + await add_model( + user_uuid=user_uuid, + type_name="team", + model_name=TwoAgentTeam.__name__, + model_uuid=team_model_uuid, + model=team.model_dump(), + background_tasks=BackgroundTasks(), + ) + + ### begin sending inputs to server + + d = {"count": 0} + + def input(prompt: str, d: dict[str, int] = d) -> str: + d["count"] += 1 + if d["count"] == 1: + return f"[{datetime.now()}] What's the weather in New York today?" + elif d["count"] == 2: + return "" + else: + return "exit" + + actual = [] + terminate_chat_queue: asyncio.Queue = asyncio.Queue(maxsize=1) # type: ignore [type-arg] + + @broker.subscriber( + f"chat.client.messages.{user_uuid}.playground.{thread_id}", stream=stream + ) + async def client_input_handler(msg: ServerResponseModel) -> None: + if msg.type == "input": + response = InputResponseModel(msg=input(msg.data.prompt)) # type: ignore [union-attr] + + await broker.publish( + response, + subject=f"chat.server.messages.{user_uuid}.playground.{thread_id}", + ) + elif msg.type == "print": + actual.append(msg.data.model_dump()) + elif msg.type == "terminate": + await terminate_chat_queue.put(msg.data.model_dump()) + else: + raise ValueError(f"Unknown message type {msg.type}") + + ### end sending inputs to server + + async with TestNatsBroker(broker) as br: + await br.publish( + fastagency_studio.io.ionats.InitiateModel( + msg="exit", + thread_id=thread_id, + team_id=team_model_uuid, + user_id=user_uuid, + ), + subject="chat.server.initiate_chat", + ) + + print(f"{actual=}") # noqa + + assert isinstance(actual, list) + + result_set, _ = await asyncio.wait( + (asyncio.create_task(terminate_chat_queue.get()),), + timeout=30, + ) + result = (result_set.pop()).result() + assert result == {"msg": "Chat completed."} diff --git a/tests/test_saas_app_generator.py b/tests/test_saas_app_generator.py new file mode 100644 index 00000000..7f34744e --- /dev/null +++ b/tests/test_saas_app_generator.py @@ -0,0 +1,381 @@ +import subprocess +import tempfile +import zipfile +from pathlib import Path +from unittest.mock import ANY, MagicMock, call, mock_open, patch + +import pytest + +from fastagency_studio.saas_app_generator import InvalidGHTokenError, SaasAppGenerator + + +@pytest.fixture +def saas_app_generator() -> SaasAppGenerator: + fly_api_token = "some-token" + fastagency_deployment_uuid = "some-uuid" + github_token = "some-github-token" + app_name = "test fastagency template" + repo_name = "test-fastagency-template" + fly_app_name = "test-fastagency-template" + deployment_auth_token = "test-deployment_auth_token" + developer_uuid = "test-developer-uuid" + + return SaasAppGenerator( + fly_api_token, + fastagency_deployment_uuid, + github_token, + app_name, + repo_name, + fly_app_name, + deployment_auth_token, + developer_uuid, + ) + + +def test_get_account_name_and_repo_name() -> None: + fixture = "https://github.com/account-name/repo-name" + expected = "account-name/repo-name" + actual = SaasAppGenerator._get_account_name_and_repo_name(fixture) + assert actual == expected + + +@patch("requests.get") +@patch("shutil.unpack_archive") +def test_download_template_repo( + mock_unpack_archive: MagicMock, + mock_get: MagicMock, + saas_app_generator: SaasAppGenerator, +) -> None: + with tempfile.TemporaryDirectory() as temp_dir: + # mock requests.get + temp_dir_path = Path(temp_dir) + repo_name = "fastagency-wasp-app-template" + repo_main_dir = temp_dir_path / f"{repo_name}-main" + mock_response = MagicMock() + zip_content = b"fake-zip-content" + mock_response.status_code = 200 + mock_response.content = zip_content + mock_get.return_value = mock_response + + # Create a fake directory structure to mimic the unzipped content + repo_main_dir.mkdir() + (repo_main_dir / "dummy_file.txt").touch() + + zip_path = temp_dir_path / f"{repo_name}.zip" + with zipfile.ZipFile(str(zip_path), "w") as zip_file: + zip_file.writestr(f"{repo_name}-main", "dummy content") + + with patch.object(Path, "open", mock_open()) as mocked_file: + saas_app_generator._download_template_repo(Path(temp_dir)) + + # Ensure the zip file is written + # mocked_file.assert_called_once_with(zip_path, 'wb') + mocked_file().write.assert_called_once_with(zip_content) + + # Ensure the archive is unpacked and moved correctly + mock_unpack_archive.assert_called_once_with(str(zip_path), str(temp_dir_path)) + + # Ensure the directory structure is correct after moving files + assert (temp_dir_path / repo_main_dir / "dummy_file.txt").exists() + assert len(list(temp_dir_path.iterdir())) == 1 + + +@patch("subprocess.run") +def test_run_cli_command( + mock_run: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + command = "ls" + saas_app_generator._run_cli_command(command) + mock_run.assert_called_once_with( + command, + check=True, + capture_output=True, + shell=True, + text=True, + cwd=None, + env=None, + ) + + +@patch("subprocess.run") +@patch.dict("os.environ", {}, clear=True) +def test_create_new_repository( + mock_run: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + with patch.object(Path, "open", mock_open()): + saas_app_generator.create_new_repository(max_retries=1) + + expected_command = "gh repo create test-fastagency-template --public > " + + # Get the actual command that was called + actual_command = mock_run.call_args[0][0] + + # Assert that the actual command starts with the expected command + assert actual_command.startswith( + expected_command + ), f"Command {actual_command} does not start with {expected_command}" + + # Assert the other call parameters + mock_run.assert_called_once_with( + ANY, + check=True, + capture_output=True, + shell=True, + text=True, + cwd=ANY, + env={"GH_TOKEN": saas_app_generator.github_token}, + ) + + +@patch("subprocess.run") +@patch.dict("os.environ", {}, clear=True) +def test_create_new_repository_retry( + mock_run: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + with patch.object(Path, "open", mock_open()): + # Simulate "Name already exists on this account" error for the first two attempts + mock_run.side_effect = [ + subprocess.CalledProcessError( + 1, + "gh", + output="Name already exists on this account", + stderr="Name already exists on this account", + ) + ] * 2 + [None] + + # Call the method + saas_app_generator.create_new_repository(max_retries=3) + + # Check that the method was called three times + assert mock_run.call_count == 3 + + +@patch.dict("os.environ", {}, clear=True) +@patch("subprocess.run") +def test_create_new_repository_retry_fail( + mock_run: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + # Simulate "Name already exists on this account" error for all attempts + mock_run.side_effect = subprocess.CalledProcessError( + 1, + "gh", + output="Name already exists on this account", + stderr="Name already exists on this account", + ) + + # Call the method and expect an exception + expected_error_msg = ( + "Unable to create a new GitHub repository. Please try again later." + ) + with pytest.raises(InvalidGHTokenError, match=expected_error_msg) as e: + saas_app_generator.create_new_repository(max_retries=3) + + assert expected_error_msg in str(e) + + # Check that the method was called three times + assert mock_run.call_count == 3 + + +@patch("subprocess.run") +def test_create_new_repository_with_non_retry_exception( + mock_run: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + # Simulate "Name already exists on this account" error for all attempts + mock_run.side_effect = subprocess.CalledProcessError( + 1, "gh", output="Bad credentials", stderr="Bad credentials" + ) + + # Call the method and expect an exception + expected_error_msg = ( + "Unable to create a new GitHub repository. Please try again later." + ) + with pytest.raises(InvalidGHTokenError, match=expected_error_msg) as e: + saas_app_generator.create_new_repository(max_retries=3) + + assert expected_error_msg in str(e) + + # Check that the method was called three times + assert mock_run.call_count == 3 + + +@patch("subprocess.run") +def test_set_github_actions_secrets( + mock_run: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + with tempfile.TemporaryDirectory() as temp_dir: + saas_app_generator._set_github_actions_secrets(cwd=temp_dir, env={}) + expected_commands = [ + 'gh secret set FLY_API_TOKEN --body "$FLY_API_TOKEN" --app actions', + 'gh secret set FASTAGENCY_DEPLOYMENT_UUID --body "$FASTAGENCY_DEPLOYMENT_UUID" --app actions', + 'gh secret set AUTH_TOKEN --body "$AUTH_TOKEN" --app actions', + 'gh secret set DEVELOPER_UUID --body "$DEVELOPER_UUID" --app actions', + 'gh secret set USER_GH_PAT --body "$GH_TOKEN" --app actions', + f'gh variable set REACT_APP_NAME --body "{saas_app_generator.app_name}"', + f'gh variable set FLY_IO_APP_NAME --body "{saas_app_generator.fly_app_name}"', + ] + + # for call in mock_run.call_args_list: + # print("Called with args:", call) + + for command in expected_commands: + mock_run.assert_any_call( + command, + check=True, + capture_output=True, + shell=True, + text=True, + cwd=temp_dir, + env={ + "FLY_API_TOKEN": saas_app_generator.fly_api_token, + "FASTAGENCY_DEPLOYMENT_UUID": saas_app_generator.fastagency_deployment_uuid, + "AUTH_TOKEN": saas_app_generator.deployment_auth_token, + "DEVELOPER_UUID": saas_app_generator.developer_uuid, + }, + ) + + +@patch("fastagency_studio.saas_app_generator._make_request") +def test_get_github_username_and_primary_email( + mock_make_request: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + # Arrange + mock_make_request.side_effect = [ + {"name": "test_username", "login": "test_user", "id": 12345}, # First response + [ + { + "email": "test_username@gmail.com", + "primary": False, + "verified": True, + "visibility": None, + }, + { + "email": "test_username_primary@gmail.com", + "primary": True, + "verified": True, + "visibility": "public", + }, + ], # Second response + ] + + actual = saas_app_generator._get_github_username_and_email() + expected = ("test_username", "test_username_primary@gmail.com") + assert actual == expected + mock_make_request.assert_has_calls( + [ + call("https://api.github.com/user", ANY), # Replace ANY with actual headers + call( + "https://api.github.com/user/emails", ANY + ), # Replace ANY with actual headers + ] + ) + + +@patch("fastagency_studio.saas_app_generator._make_request") +def test_get_github_username_and_non_primary_email( + mock_make_request: MagicMock, saas_app_generator: SaasAppGenerator +) -> None: + # Arrange + mock_make_request.side_effect = [ + {"name": "test_username", "login": "test_user", "id": 12345}, # First response + [ + { + "email": "test_username@gmail.com", + "primary": False, + "verified": True, + "visibility": None, + } + ], # Second response + ] + + actual = saas_app_generator._get_github_username_and_email() + expected = ("test_username", "test_username@gmail.com") + assert actual == expected + mock_make_request.assert_has_calls( + [ + call("https://api.github.com/user", ANY), # Replace ANY with actual headers + call( + "https://api.github.com/user/emails", ANY + ), # Replace ANY with actual headers + ] + ) + + +@patch.dict("os.environ", {}, clear=True) +@patch("subprocess.run") +def test_initialize_git_and_push( + mock_run: MagicMock, + saas_app_generator: SaasAppGenerator, +) -> None: + with ( + tempfile.TemporaryDirectory() as temp_dir, + patch.object( + SaasAppGenerator, + "_get_account_name_and_repo_name", + return_value="account/repo", + ), + patch.object( + SaasAppGenerator, + "_get_github_username_and_email", + return_value=("John Doe", "john@doe.org"), + ), + patch.object(Path, "open", mock_open()), + ): + temp_dir_path = Path(temp_dir) + extracted_template_dir = ( + temp_dir_path / SaasAppGenerator.EXTRACTED_TEMPLATE_DIR_NAME + ) + extracted_template_dir.mkdir(parents=True, exist_ok=True) + + saas_app_generator.create_new_repository(max_retries=1) + saas_app_generator._initialize_git_and_push(temp_dir_path, env={}) + + expected_commands = [ + "git init", + "git add .", + 'git config user.name "John Doe"', + 'git config user.email "john@doe.org"', + 'git commit -m "Create a new FastAgency SaaS application"', + "git branch -M main", + "git remote add origin https://account:$GH_TOKEN@github.com/account/repo.git", + 'gh secret set FLY_API_TOKEN --body "$FLY_API_TOKEN" --app actions', + 'gh secret set FASTAGENCY_DEPLOYMENT_UUID --body "$FASTAGENCY_DEPLOYMENT_UUID" --app actions', + 'gh secret set AUTH_TOKEN --body "$AUTH_TOKEN" --app actions', + "git push -u origin main", + ] + + # Print actual commands + # for call in mock_run.call_args_list: + # print("Called with args:", call) + + for command in expected_commands: + mock_run.assert_any_call( + command, + check=True, + capture_output=True, + shell=True, + text=True, + cwd=str(extracted_template_dir), + env=ANY, + ) + + +@patch("fastagency_studio.saas_app_generator.SaasAppGenerator._initialize_git_and_push") +@patch("fastagency_studio.saas_app_generator.SaasAppGenerator._download_template_repo") +@patch.dict("os.environ", {}, clear=True) +@patch("tempfile.TemporaryDirectory", new_callable=MagicMock) +def test_execute( + mock_tempdir: MagicMock, + mock_download: MagicMock, + mock_init_git: MagicMock, + saas_app_generator: SaasAppGenerator, +) -> None: + temp_dir_path = Path("/mock/temp/dir") + mock_tempdir.return_value.__enter__.return_value = temp_dir_path + + saas_app_generator.gh_repo_url = "" + saas_app_generator.execute() + + mock_download.assert_called_once_with(temp_dir_path) + mock_init_git.assert_called_once_with( + temp_dir_path, env={"GH_TOKEN": "some-github-token"} + )