From e18db160b54e3ec0d95a817235ed804c908f4eb9 Mon Sep 17 00:00:00 2001 From: Matt Vallillo Date: Mon, 26 Aug 2024 11:46:25 -0500 Subject: [PATCH 01/39] Add `BaseEvent.meta` (#1103) --- CHANGELOG.md | 1 + griptape/events/base_event.py | 2 ++ tests/unit/events/test_base_event.py | 19 ++++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 59947dc5f..f984776ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased ### Added - `BaseConversationMemory.prompt_driver` for use with autopruning. +- Parameter `meta: dict` on `BaseEvent`. ### Fixed - Parsing streaming response with some OpenAi compatible services. diff --git a/griptape/events/base_event.py b/griptape/events/base_event.py index 9ab8e6c47..61443107e 100644 --- a/griptape/events/base_event.py +++ b/griptape/events/base_event.py @@ -3,6 +3,7 @@ import time import uuid from abc import ABC +from typing import Any from attrs import Factory, define, field @@ -13,3 +14,4 @@ class BaseEvent(SerializableMixin, ABC): id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={"serializable": True}) timestamp: float = field(default=Factory(lambda: time.time()), kw_only=True, metadata={"serializable": True}) + meta: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) diff --git a/tests/unit/events/test_base_event.py b/tests/unit/events/test_base_event.py index 778f7c096..6ce010ee9 100644 --- a/tests/unit/events/test_base_event.py +++ b/tests/unit/events/test_base_event.py @@ -34,6 +34,7 @@ def test_start_prompt_event_from_dict(self): "id": "917298d4bf894b0a824a8fdb26717a0c", "timestamp": 123, "model": "foo bar", + "meta": {"foo": "bar"}, "prompt_stack": { "type": "PromptStack", "messages": [ @@ -66,10 +67,12 @@ def test_start_prompt_event_from_dict(self): assert event.prompt_stack.messages[1].content[0].artifact.value == "bar" assert event.prompt_stack.messages[1].role == "system" assert event.model == "foo bar" + assert event.meta == {"foo": "bar"} def test_finish_prompt_event_from_dict(self): dict_value = { "type": "FinishPromptEvent", + "meta": {"foo": "bar"}, "timestamp": 123.0, "input_token_count": 10, "output_token_count": 12, @@ -85,10 +88,12 @@ def test_finish_prompt_event_from_dict(self): assert event.output_token_count == 12 assert event.result == "foo bar" assert event.model == "foo bar" + assert event.meta == {"foo": "bar"} def test_start_task_event_from_dict(self): dict_value = { "type": "StartTaskEvent", + "meta": {"foo": "bar"}, "timestamp": 123.0, "task_id": "foo", "task_parent_ids": ["bar"], @@ -107,10 +112,12 @@ def test_start_task_event_from_dict(self): assert isinstance(event.task_input, BaseArtifact) assert event.task_input.value == "foo" assert event.task_output.value == "bar" + assert event.meta == {"foo": "bar"} def test_start_subtask_event_from_dict(self): dict_value = { "type": "StartActionsSubtaskEvent", + "meta": {"foo": "bar"}, "timestamp": 123.0, "task_id": "foo", "task_parent_ids": ["bar"], @@ -139,10 +146,12 @@ def test_start_subtask_event_from_dict(self): assert event.subtask_actions[0]["path"] == "foopath" assert event.subtask_actions[0]["input"] is not None assert event.subtask_actions[0]["input"]["value"] == "quux" + assert event.meta == {"foo": "bar"} def test_finish_task_event_from_dict(self): dict_value = { "type": "FinishTaskEvent", + "meta": {"foo": "bar"}, "timestamp": 123.0, "task_id": "foo", "task_parent_ids": ["bar"], @@ -161,10 +170,12 @@ def test_finish_task_event_from_dict(self): assert isinstance(event.task_input, BaseArtifact) assert event.task_input.value == "foo" assert event.task_output.value == "bar" + assert event.meta == {"foo": "bar"} def test_finish_subtask_event_from_dict(self): dict_value = { "type": "FinishActionsSubtaskEvent", + "meta": {"foo": "bar"}, "timestamp": 123.0, "task_id": "foo", "task_parent_ids": ["bar"], @@ -193,10 +204,12 @@ def test_finish_subtask_event_from_dict(self): assert event.subtask_actions[0]["path"] == "foopath" assert event.subtask_actions[0]["input"] is not None assert event.subtask_actions[0]["input"]["value"] == "quux" + assert event.meta == {"foo": "bar"} def test_start_structure_run_event_from_dict(self): dict_value = { "type": "StartStructureRunEvent", + "meta": {"foo": "bar"}, "timestamp": 123.0, "structure_id": "foo", "input_task_input": {"type": "TextArtifact", "value": "foo"}, @@ -210,10 +223,12 @@ def test_start_structure_run_event_from_dict(self): assert isinstance(event.input_task_input, BaseArtifact) assert event.input_task_input.value == "foo" assert event.input_task_output.value == "bar" + assert event.meta == {"foo": "bar"} def test_finish_structure_run_event_from_dict(self): dict_value = { "type": "FinishStructureRunEvent", + "meta": {"foo": "bar"}, "timestamp": 123.0, "structure_id": "foo", "output_task_input": {"type": "TextArtifact", "value": "foo"}, @@ -227,14 +242,16 @@ def test_finish_structure_run_event_from_dict(self): assert isinstance(event.output_task_input, BaseArtifact) assert event.output_task_input.value == "foo" assert event.output_task_output.value == "bar" + assert event.meta == {"foo": "bar"} def test_completion_chunk_event_from_dict(self): - dict_value = {"type": "CompletionChunkEvent", "timestamp": 123.0, "token": "foo"} + dict_value = {"type": "CompletionChunkEvent", "timestamp": 123.0, "token": "foo", "meta": {}} event = BaseEvent.from_dict(dict_value) assert isinstance(event, CompletionChunkEvent) assert event.token == "foo" + assert event.meta == {} def test_unsupported_from_dict(self): dict_value = {"type": "foo", "value": "foobar"} From f2b529ab361c33b2a26a4510f78f85181b96af4f Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Mon, 26 Aug 2024 14:42:29 -0700 Subject: [PATCH 02/39] Add migration guide (#1108) --- CHANGELOG.md | 2 ++ MIGRATION.md | 5 +++++ 2 files changed, 7 insertions(+) create mode 100644 MIGRATION.md diff --git a/CHANGELOG.md b/CHANGELOG.md index f984776ce..b066e2e44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Parsing streaming response with some OpenAi compatible services. +**Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. + ## [0.30.1] - 2024-08-21 ### Fixed diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 000000000..ac1cbcd70 --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,5 @@ +# Migration Guide + +This document provides instructions for migrating your codebase to accommodate breaking changes introduced in new versions of Griptape. + +## 0.30.X to 0.31.X From 3c604af6ee0f4c4b5a2a068fa3254f7a0f6130ec Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Mon, 26 Aug 2024 15:22:22 -0700 Subject: [PATCH 03/39] Remove many instances of catching exceptions (#1101) --- MIGRATION.md | 19 ++++ .../src/load_query_and_chat_marqo_1.py | 4 - docs/examples/src/query_webpage_1.py | 3 - docs/examples/src/query_webpage_astra_db_1.py | 4 +- docs/examples/src/talk_to_a_pdf_1.py | 3 - docs/examples/src/talk_to_a_webpage_1.py | 4 - .../drivers/src/vector_store_drivers_1.py | 3 - .../drivers/src/vector_store_drivers_10.py | 4 - .../drivers/src/vector_store_drivers_11.py | 4 - .../drivers/src/vector_store_drivers_3.py | 4 - .../drivers/src/vector_store_drivers_4.py | 4 - .../drivers/src/vector_store_drivers_5.py | 4 - .../drivers/src/vector_store_drivers_6.py | 4 - .../drivers/src/vector_store_drivers_7.py | 4 - .../drivers/src/vector_store_drivers_8.py | 4 - .../drivers/src/vector_store_drivers_9.py | 4 - .../engines/src/rag_engines_1.py | 3 - .../engines/src/summary_engines_1.py | 4 - .../official-tools/src/vector_store_tool_1.py | 3 - .../file_manager/base_file_manager_driver.py | 63 +++++--------- .../griptape_cloud_structure_run_driver.py | 43 ++++----- .../extraction/base_extraction_engine.py | 4 +- .../extraction/csv_extraction_engine.py | 21 ++--- .../extraction/json_extraction_engine.py | 23 +++-- .../text_loader_retrieval_rag_module.py | 4 +- griptape/loaders/base_text_loader.py | 9 +- griptape/loaders/blob_loader.py | 10 +-- griptape/loaders/csv_loader.py | 17 ++-- griptape/loaders/email_loader.py | 63 +++++++------- griptape/loaders/pdf_loader.py | 9 +- griptape/loaders/text_loader.py | 16 ++-- griptape/loaders/web_loader.py | 10 +-- griptape/tasks/code_execution_task.py | 11 ++- griptape/tools/file_manager/tool.py | 13 ++- .../tools/variation_image_generation/tool.py | 3 - griptape/tools/web_scraper/tool.py | 5 +- .../test_amazon_s3_file_manager_driver.py | 85 ++++++++---------- .../test_local_file_manager_driver.py | 87 +++++++++---------- .../extraction/test_json_extraction_engine.py | 5 +- tests/unit/loaders/test_email_loader.py | 14 ++- tests/unit/loaders/test_web_loader.py | 17 ++-- tests/unit/tasks/test_code_execution_task.py | 10 +-- tests/unit/tools/test_file_manager.py | 6 +- 43 files changed, 253 insertions(+), 381 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index ac1cbcd70..75b7218fb 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -3,3 +3,22 @@ This document provides instructions for migrating your codebase to accommodate breaking changes introduced in new versions of Griptape. ## 0.30.X to 0.31.X + +### Exceptions Over `ErrorArtifact`s + +Drivers, Loaders, and Engines will now raises exceptions rather than returning `ErrorArtifact`s. +Update any logic that expects `ErrorArtifact` to handle exceptions instead. + +```python +# Before +artifacts = WebLoader().load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# After +try: + artifacts = WebLoader().load("https://www.griptape.ai") +except Exception as e: + raise e +``` diff --git a/docs/examples/src/load_query_and_chat_marqo_1.py b/docs/examples/src/load_query_and_chat_marqo_1.py index 013a0264f..cdcb376bb 100644 --- a/docs/examples/src/load_query_and_chat_marqo_1.py +++ b/docs/examples/src/load_query_and_chat_marqo_1.py @@ -1,7 +1,6 @@ import os from griptape import utils -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import MarqoVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader from griptape.structures import Agent @@ -27,9 +26,6 @@ # Load artifacts from the web artifacts = WebLoader().load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert the artifacts into the vector store vector_store.upsert_text_artifacts( { diff --git a/docs/examples/src/query_webpage_1.py b/docs/examples/src/query_webpage_1.py index 2ea32b718..b9e3286d6 100644 --- a/docs/examples/src/query_webpage_1.py +++ b/docs/examples/src/query_webpage_1.py @@ -1,14 +1,11 @@ import os -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"])) artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) for a in artifacts: vector_store.upsert_text_artifact(a, namespace="griptape") diff --git a/docs/examples/src/query_webpage_astra_db_1.py b/docs/examples/src/query_webpage_astra_db_1.py index 3309e1dcd..4590a6b59 100644 --- a/docs/examples/src/query_webpage_astra_db_1.py +++ b/docs/examples/src/query_webpage_astra_db_1.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts import ErrorArtifact from griptape.drivers import ( AstraDbVectorStoreDriver, OpenAiChatPromptDriver, @@ -45,8 +44,7 @@ ) artifacts = WebLoader(max_tokens=256).load(input_blogpost) -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) + vector_store_driver.upsert_text_artifacts({namespace: artifacts}) rag_tool = RagTool( diff --git a/docs/examples/src/talk_to_a_pdf_1.py b/docs/examples/src/talk_to_a_pdf_1.py index b4ab72029..3c29f4c74 100644 --- a/docs/examples/src/talk_to_a_pdf_1.py +++ b/docs/examples/src/talk_to_a_pdf_1.py @@ -1,6 +1,5 @@ import requests -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.engines.rag import RagEngine from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule @@ -32,8 +31,6 @@ ) artifacts = PdfLoader().load(response.content) -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) vector_store.upsert_text_artifacts({namespace: artifacts}) diff --git a/docs/examples/src/talk_to_a_webpage_1.py b/docs/examples/src/talk_to_a_webpage_1.py index 0412ed977..3e973da2d 100644 --- a/docs/examples/src/talk_to_a_webpage_1.py +++ b/docs/examples/src/talk_to_a_webpage_1.py @@ -1,4 +1,3 @@ -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.engines.rag import RagEngine from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule @@ -28,9 +27,6 @@ artifacts = WebLoader().load("https://en.wikipedia.org/wiki/Physics") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - vector_store_driver.upsert_text_artifacts({namespace: artifacts}) rag_tool = RagTool( diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_1.py b/docs/griptape-framework/drivers/src/vector_store_drivers_1.py index a4e54da3a..7f7e98e13 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_1.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_1.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts import ErrorArtifact from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -12,8 +11,6 @@ # Load Artifacts from the web artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) # Upsert Artifacts into the Vector Store Driver [vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_10.py b/docs/griptape-framework/drivers/src/vector_store_drivers_10.py index b7645bd82..39a21121d 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_10.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_10.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import OpenAiEmbeddingDriver, QdrantVectorStoreDriver from griptape.loaders import WebLoader @@ -22,9 +21,6 @@ # Load Artifacts from the web artifacts = WebLoader().load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Recreate Qdrant collection vector_store_driver.client.recreate_collection( collection_name=vector_store_driver.collection_name, diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_11.py b/docs/griptape-framework/drivers/src/vector_store_drivers_11.py index 965f97715..a8d9ceed1 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_11.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_11.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import AstraDbVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -23,9 +22,6 @@ # Load Artifacts from the web artifacts = WebLoader().load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver [vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_3.py b/docs/griptape-framework/drivers/src/vector_store_drivers_3.py index d2cfc8142..559eaec5a 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_3.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_3.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts import ErrorArtifact from griptape.drivers import OpenAiEmbeddingDriver, PineconeVectorStoreDriver from griptape.loaders import WebLoader @@ -17,9 +16,6 @@ # Load Artifacts from the web artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver [vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_4.py b/docs/griptape-framework/drivers/src/vector_store_drivers_4.py index fe35f1ff5..f2f0091a0 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_4.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_4.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts import ErrorArtifact from griptape.drivers import MarqoVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -22,9 +21,6 @@ # Load Artifacts from the web artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_5.py b/docs/griptape-framework/drivers/src/vector_store_drivers_5.py index 867195a48..7649579c7 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_5.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_5.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import MongoDbAtlasVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -28,9 +27,6 @@ # Load Artifacts from the web artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_6.py b/docs/griptape-framework/drivers/src/vector_store_drivers_6.py index 9c5c9cab6..78a7cc3e6 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_6.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_6.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import AzureMongoDbVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -28,9 +27,6 @@ # Load Artifacts from the web artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_7.py b/docs/griptape-framework/drivers/src/vector_store_drivers_7.py index c08d9ff3b..d34ff8649 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_7.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_7.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import OpenAiEmbeddingDriver, RedisVectorStoreDriver from griptape.loaders import WebLoader @@ -18,9 +17,6 @@ # Load Artifacts from the web artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_8.py b/docs/griptape-framework/drivers/src/vector_store_drivers_8.py index a57363eb3..18e50a397 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_8.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_8.py @@ -2,7 +2,6 @@ import boto3 -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import AmazonOpenSearchVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader @@ -19,9 +18,6 @@ # Load Artifacts from the web artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_9.py b/docs/griptape-framework/drivers/src/vector_store_drivers_9.py index c5aface63..ad5abf932 100644 --- a/docs/griptape-framework/drivers/src/vector_store_drivers_9.py +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_9.py @@ -1,6 +1,5 @@ import os -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import OpenAiEmbeddingDriver, PgVectorVectorStoreDriver from griptape.loaders import WebLoader @@ -25,9 +24,6 @@ # Load Artifacts from the web artifacts = WebLoader().load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - # Upsert Artifacts into the Vector Store Driver vector_store_driver.upsert_text_artifacts( { diff --git a/docs/griptape-framework/engines/src/rag_engines_1.py b/docs/griptape-framework/engines/src/rag_engines_1.py index c257cd4df..a8a9cc06b 100644 --- a/docs/griptape-framework/engines/src/rag_engines_1.py +++ b/docs/griptape-framework/engines/src/rag_engines_1.py @@ -1,4 +1,3 @@ -from griptape.artifacts import ErrorArtifact from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver from griptape.engines.rag import RagContext, RagEngine from griptape.engines.rag.modules import PromptResponseRagModule, TranslateQueryRagModule, VectorStoreRetrievalRagModule @@ -11,8 +10,6 @@ vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) artifacts = WebLoader(max_tokens=500).load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) vector_store.upsert_text_artifacts( { diff --git a/docs/griptape-framework/engines/src/summary_engines_1.py b/docs/griptape-framework/engines/src/summary_engines_1.py index 092665b37..b5adf2a5a 100644 --- a/docs/griptape-framework/engines/src/summary_engines_1.py +++ b/docs/griptape-framework/engines/src/summary_engines_1.py @@ -1,6 +1,5 @@ import requests -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import OpenAiChatPromptDriver from griptape.engines import PromptSummaryEngine from griptape.loaders import PdfLoader @@ -12,9 +11,6 @@ artifacts = PdfLoader().load(response.content) -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) - text = "\n\n".join([a.value for a in artifacts]) engine.summarize_text(text) diff --git a/docs/griptape-tools/official-tools/src/vector_store_tool_1.py b/docs/griptape-tools/official-tools/src/vector_store_tool_1.py index 266398d5e..26c87e255 100644 --- a/docs/griptape-tools/official-tools/src/vector_store_tool_1.py +++ b/docs/griptape-tools/official-tools/src/vector_store_tool_1.py @@ -1,4 +1,3 @@ -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver from griptape.loaders import WebLoader from griptape.structures import Agent @@ -9,8 +8,6 @@ ) artifacts = WebLoader().load("https://www.griptape.ai") -if isinstance(artifacts, ErrorArtifact): - raise Exception(artifacts.value) vector_store_driver.upsert_text_artifacts({"griptape": artifacts}) vector_db = VectorStoreTool( diff --git a/griptape/drivers/file_manager/base_file_manager_driver.py b/griptape/drivers/file_manager/base_file_manager_driver.py index 1c4f1dd6a..dce538812 100644 --- a/griptape/drivers/file_manager/base_file_manager_driver.py +++ b/griptape/drivers/file_manager/base_file_manager_driver.py @@ -41,60 +41,39 @@ class BaseFileManagerDriver(ABC): ) def list_files(self, path: str) -> TextArtifact | ErrorArtifact: - try: - entries = self.try_list_files(path) - return TextArtifact("\n".join(list(entries))) - except FileNotFoundError: - return ErrorArtifact("Path not found") - except NotADirectoryError: - return ErrorArtifact("Path is not a directory") - except Exception as e: - return ErrorArtifact(f"Failed to list files: {str(e)}") + entries = self.try_list_files(path) + return TextArtifact("\n".join(list(entries))) @abstractmethod def try_list_files(self, path: str) -> list[str]: ... def load_file(self, path: str) -> BaseArtifact: - try: - extension = path.split(".")[-1] - loader = self.loaders.get(extension) or self.default_loader - source = self.try_load_file(path) - result = loader.load(source) - - if isinstance(result, BaseArtifact): - return result - else: - return ListArtifact(result) - except FileNotFoundError: - return ErrorArtifact("Path not found") - except IsADirectoryError: - return ErrorArtifact("Path is a directory") - except NotADirectoryError: - return ErrorArtifact("Not a directory") - except Exception as e: - return ErrorArtifact(f"Failed to load file: {str(e)}") + extension = path.split(".")[-1] + loader = self.loaders.get(extension) or self.default_loader + source = self.try_load_file(path) + result = loader.load(source) + + if isinstance(result, BaseArtifact): + return result + else: + return ListArtifact(result) @abstractmethod def try_load_file(self, path: str) -> bytes: ... - def save_file(self, path: str, value: bytes | str) -> InfoArtifact | ErrorArtifact: - try: - extension = path.split(".")[-1] - loader = self.loaders.get(extension) or self.default_loader - encoding = None if loader is None else loader.encoding + def save_file(self, path: str, value: bytes | str) -> InfoArtifact: + extension = path.split(".")[-1] + loader = self.loaders.get(extension) or self.default_loader + encoding = None if loader is None else loader.encoding - if isinstance(value, str): - value = value.encode() if encoding is None else value.encode(encoding=encoding) - elif isinstance(value, (bytearray, memoryview)): - raise ValueError(f"Unsupported type: {type(value)}") + if isinstance(value, str): + value = value.encode() if encoding is None else value.encode(encoding=encoding) + elif isinstance(value, (bytearray, memoryview)): + raise ValueError(f"Unsupported type: {type(value)}") - self.try_save_file(path, value) + self.try_save_file(path, value) - return InfoArtifact("Successfully saved file") - except IsADirectoryError: - return ErrorArtifact("Path is a directory") - except Exception as e: - return ErrorArtifact(f"Failed to save file: {str(e)}") + return InfoArtifact("Successfully saved file") @abstractmethod def try_save_file(self, path: str, value: bytes) -> None: ... diff --git a/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py b/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py index 305d14995..a6e2064b6 100644 --- a/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py +++ b/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py @@ -6,7 +6,7 @@ from attrs import Factory, define, field -from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact +from griptape.artifacts import BaseArtifact, InfoArtifact from griptape.drivers.structure_run.base_structure_run_driver import BaseStructureRunDriver @@ -23,28 +23,25 @@ class GriptapeCloudStructureRunDriver(BaseStructureRunDriver): structure_run_max_wait_time_attempts: int = field(default=20, kw_only=True) async_run: bool = field(default=False, kw_only=True) - def try_run(self, *args: BaseArtifact) -> BaseArtifact: - from requests import HTTPError, Response, exceptions, post + def try_run(self, *args: BaseArtifact) -> BaseArtifact | InfoArtifact: + from requests import Response, post url = urljoin(self.base_url.strip("/"), f"/api/structures/{self.structure_id}/runs") - try: - response: Response = post( - url, - json={"args": [arg.value for arg in args], "env": self.env}, - headers=self.headers, - ) - response.raise_for_status() - response_json = response.json() - - if self.async_run: - return InfoArtifact("Run started successfully") - else: - return self._get_structure_run_result(response_json["structure_run_id"]) - except (exceptions.RequestException, HTTPError) as err: - return ErrorArtifact(str(err)) - - def _get_structure_run_result(self, structure_run_id: str) -> InfoArtifact | BaseArtifact | ErrorArtifact: + response: Response = post( + url, + json={"args": [arg.value for arg in args], "env": self.env}, + headers=self.headers, + ) + response.raise_for_status() + response_json = response.json() + + if self.async_run: + return InfoArtifact("Run started successfully") + else: + return self._get_structure_run_result(response_json["structure_run_id"]) + + def _get_structure_run_result(self, structure_run_id: str) -> BaseArtifact | InfoArtifact: url = urljoin(self.base_url.strip("/"), f"/api/structure-runs/{structure_run_id}") result = self._get_structure_run_result_attempt(url) @@ -59,12 +56,10 @@ def _get_structure_run_result(self, structure_run_id: str) -> InfoArtifact | Bas status = result["status"] if wait_attempts >= self.structure_run_max_wait_time_attempts: - return ErrorArtifact( - f"Failed to get Run result after {self.structure_run_max_wait_time_attempts} attempts.", - ) + raise Exception(f"Failed to get Run result after {self.structure_run_max_wait_time_attempts} attempts.") if status != "SUCCEEDED": - return ErrorArtifact(result) + raise Exception(f"Run failed with status: {status}") if "output" in result: return BaseArtifact.from_dict(result["output"]) diff --git a/griptape/engines/extraction/base_extraction_engine.py b/griptape/engines/extraction/base_extraction_engine.py index fb1fab6c4..d3a50585d 100644 --- a/griptape/engines/extraction/base_extraction_engine.py +++ b/griptape/engines/extraction/base_extraction_engine.py @@ -9,7 +9,7 @@ from griptape.configs import Defaults if TYPE_CHECKING: - from griptape.artifacts import ErrorArtifact, ListArtifact + from griptape.artifacts import ListArtifact from griptape.drivers import BasePromptDriver from griptape.rules import Ruleset @@ -54,4 +54,4 @@ def extract( *, rulesets: Optional[list[Ruleset]] = None, **kwargs, - ) -> ListArtifact | ErrorArtifact: ... + ) -> ListArtifact: ... diff --git a/griptape/engines/extraction/csv_extraction_engine.py b/griptape/engines/extraction/csv_extraction_engine.py index c9c040f65..b45bdf7f5 100644 --- a/griptape/engines/extraction/csv_extraction_engine.py +++ b/griptape/engines/extraction/csv_extraction_engine.py @@ -6,7 +6,7 @@ from attrs import Factory, define, field -from griptape.artifacts import CsvRowArtifact, ErrorArtifact, ListArtifact, TextArtifact +from griptape.artifacts import CsvRowArtifact, ListArtifact, TextArtifact from griptape.common import Message, PromptStack from griptape.engines import BaseExtractionEngine from griptape.utils import J2 @@ -27,17 +27,14 @@ def extract( *, rulesets: Optional[list[Ruleset]] = None, **kwargs, - ) -> ListArtifact | ErrorArtifact: - try: - return ListArtifact( - self._extract_rec( - cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], - [], - ), - item_separator="\n", - ) - except Exception as e: - return ErrorArtifact(f"error extracting CSV rows: {e}") + ) -> ListArtifact: + return ListArtifact( + self._extract_rec( + cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], + [], + ), + item_separator="\n", + ) def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[CsvRowArtifact]: rows = [] diff --git a/griptape/engines/extraction/json_extraction_engine.py b/griptape/engines/extraction/json_extraction_engine.py index 8f2f4a3fe..a4cd3a438 100644 --- a/griptape/engines/extraction/json_extraction_engine.py +++ b/griptape/engines/extraction/json_extraction_engine.py @@ -6,7 +6,7 @@ from attrs import Factory, define, field -from griptape.artifacts import ErrorArtifact, ListArtifact, TextArtifact +from griptape.artifacts import ListArtifact, TextArtifact from griptape.common import PromptStack from griptape.common.prompt_stack.messages.message import Message from griptape.engines import BaseExtractionEngine @@ -32,18 +32,15 @@ def extract( *, rulesets: Optional[list[Ruleset]] = None, **kwargs, - ) -> ListArtifact | ErrorArtifact: - try: - return ListArtifact( - self._extract_rec( - cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], - [], - rulesets=rulesets, - ), - item_separator="\n", - ) - except Exception as e: - return ErrorArtifact(f"error extracting JSON: {e}") + ) -> ListArtifact: + return ListArtifact( + self._extract_rec( + cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], + [], + rulesets=rulesets, + ), + item_separator="\n", + ) def json_to_text_artifacts(self, json_input: str) -> list[TextArtifact]: json_matches = re.findall(self.JSON_PATTERN, json_input, re.DOTALL) diff --git a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py index 4f53cc5f9..7e4854d00 100644 --- a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py +++ b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py @@ -6,12 +6,12 @@ from attrs import Factory, define, field from griptape import utils -from griptape.artifacts import ErrorArtifact, TextArtifact from griptape.engines.rag.modules import BaseRetrievalRagModule if TYPE_CHECKING: from collections.abc import Sequence + from griptape.artifacts import TextArtifact from griptape.drivers import BaseVectorStoreDriver from griptape.engines.rag import RagContext from griptape.loaders import BaseTextLoader @@ -38,8 +38,6 @@ def run(self, context: RagContext) -> Sequence[TextArtifact]: loader_output = self.loader.load(source) - if isinstance(loader_output, ErrorArtifact): - raise Exception(loader_output.to_text() if loader_output.exception is None else loader_output.exception) self.vector_store_driver.upsert_text_artifacts({namespace: loader_output}) return self.process_query_output_fn(self.vector_store_driver.query(context.query, **query_params)) diff --git a/griptape/loaders/base_text_loader.py b/griptape/loaders/base_text_loader.py index 369f3f1fc..196cb0087 100644 --- a/griptape/loaders/base_text_loader.py +++ b/griptape/loaders/base_text_loader.py @@ -1,12 +1,11 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Optional, Union, cast +from typing import TYPE_CHECKING, Any, Optional, cast from attrs import Factory, define, field from griptape.artifacts import TextArtifact -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.chunkers import BaseChunker, TextChunker from griptape.loaders import BaseLoader from griptape.tokenizers import OpenAiTokenizer @@ -40,11 +39,11 @@ class BaseTextLoader(BaseLoader, ABC): reference: Optional[Reference] = field(default=None, kw_only=True) @abstractmethod - def load(self, source: Any, *args, **kwargs) -> ErrorArtifact | list[TextArtifact]: ... + def load(self, source: Any, *args, **kwargs) -> list[TextArtifact]: ... - def load_collection(self, sources: list[Any], *args, **kwargs) -> dict[str, ErrorArtifact | list[TextArtifact]]: + def load_collection(self, sources: list[Any], *args, **kwargs) -> dict[str, list[TextArtifact]]: return cast( - dict[str, Union[ErrorArtifact, list[TextArtifact]]], + dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs), ) diff --git a/griptape/loaders/blob_loader.py b/griptape/loaders/blob_loader.py index fffabb849..d0099b47b 100644 --- a/griptape/loaders/blob_loader.py +++ b/griptape/loaders/blob_loader.py @@ -1,20 +1,20 @@ from __future__ import annotations -from typing import Any, Union, cast +from typing import Any, cast from attrs import define -from griptape.artifacts import BlobArtifact, ErrorArtifact +from griptape.artifacts import BlobArtifact from griptape.loaders import BaseLoader @define class BlobLoader(BaseLoader): - def load(self, source: Any, *args, **kwargs) -> BlobArtifact | ErrorArtifact: + def load(self, source: Any, *args, **kwargs) -> BlobArtifact: if self.encoding is None: return BlobArtifact(source) else: return BlobArtifact(source, encoding=self.encoding) - def load_collection(self, sources: list[bytes | str], *args, **kwargs) -> dict[str, BlobArtifact | ErrorArtifact]: - return cast(dict[str, Union[BlobArtifact, ErrorArtifact]], super().load_collection(sources, *args, **kwargs)) + def load_collection(self, sources: list[bytes | str], *args, **kwargs) -> dict[str, BlobArtifact]: + return cast(dict[str, BlobArtifact], super().load_collection(sources, *args, **kwargs)) diff --git a/griptape/loaders/csv_loader.py b/griptape/loaders/csv_loader.py index dc73ca52c..14dfe3e4a 100644 --- a/griptape/loaders/csv_loader.py +++ b/griptape/loaders/csv_loader.py @@ -2,11 +2,11 @@ import csv from io import StringIO -from typing import TYPE_CHECKING, Optional, Union, cast +from typing import TYPE_CHECKING, Optional, cast from attrs import define, field -from griptape.artifacts import CsvRowArtifact, ErrorArtifact +from griptape.artifacts import CsvRowArtifact from griptape.loaders import BaseLoader if TYPE_CHECKING: @@ -19,16 +19,13 @@ class CsvLoader(BaseLoader): delimiter: str = field(default=",", kw_only=True) encoding: str = field(default="utf-8", kw_only=True) - def load(self, source: bytes | str, *args, **kwargs) -> ErrorArtifact | list[CsvRowArtifact]: + def load(self, source: bytes | str, *args, **kwargs) -> list[CsvRowArtifact]: artifacts = [] if isinstance(source, bytes): - try: - source = source.decode(encoding=self.encoding) - except UnicodeDecodeError: - return ErrorArtifact(f"Failed to decode bytes to string using encoding: {self.encoding}") + source = source.decode(encoding=self.encoding) elif isinstance(source, (bytearray, memoryview)): - return ErrorArtifact(f"Unsupported source type: {type(source)}") + raise ValueError(f"Unsupported source type: {type(source)}") reader = csv.DictReader(StringIO(source), delimiter=self.delimiter) chunks = [CsvRowArtifact(row) for row in reader] @@ -47,8 +44,8 @@ def load_collection( sources: list[bytes | str], *args, **kwargs, - ) -> dict[str, ErrorArtifact | list[CsvRowArtifact]]: + ) -> dict[str, list[CsvRowArtifact]]: return cast( - dict[str, Union[ErrorArtifact, list[CsvRowArtifact]]], + dict[str, list[CsvRowArtifact]], super().load_collection(sources, *args, **kwargs), ) diff --git a/griptape/loaders/email_loader.py b/griptape/loaders/email_loader.py index 82f34bd8a..f6c9ca406 100644 --- a/griptape/loaders/email_loader.py +++ b/griptape/loaders/email_loader.py @@ -1,12 +1,11 @@ from __future__ import annotations import imaplib -import logging -from typing import Optional, Union, cast +from typing import Optional, cast from attrs import astuple, define, field -from griptape.artifacts import ErrorArtifact, ListArtifact, TextArtifact +from griptape.artifacts import ListArtifact, TextArtifact from griptape.loaders import BaseLoader from griptape.utils import import_optional_dependency @@ -33,50 +32,46 @@ class EmailQuery: username: str = field(kw_only=True) password: str = field(kw_only=True) - def load(self, source: EmailQuery, *args, **kwargs) -> ListArtifact | ErrorArtifact: + def load(self, source: EmailQuery, *args, **kwargs) -> ListArtifact: mailparser = import_optional_dependency("mailparser") label, key, search_criteria, max_count = astuple(source) artifacts = [] - try: - with imaplib.IMAP4_SSL(self.imap_url) as client: - client.login(self.username, self.password) + with imaplib.IMAP4_SSL(self.imap_url) as client: + client.login(self.username, self.password) - mailbox = client.select(f'"{label}"', readonly=True) - if mailbox[0] != "OK": - raise Exception(mailbox[1][0].decode()) + mailbox = client.select(f'"{label}"', readonly=True) + if mailbox[0] != "OK": + raise Exception(mailbox[1][0].decode()) - if key and search_criteria: - _typ, [message_numbers] = client.search(None, key, f'"{search_criteria}"') - messages_count = self._count_messages(message_numbers) - elif len(mailbox) > 1 and mailbox[1] and mailbox[1][0] is not None: - messages_count = int(mailbox[1][0]) - else: - raise Exception("unable to parse number of messages") + if key and search_criteria: + _typ, [message_numbers] = client.search(None, key, f'"{search_criteria}"') + messages_count = self._count_messages(message_numbers) + elif len(mailbox) > 1 and mailbox[1] and mailbox[1][0] is not None: + messages_count = int(mailbox[1][0]) + else: + raise Exception("unable to parse number of messages") - top_n = max(0, messages_count - max_count) if max_count else 0 - for i in range(messages_count, top_n, -1): - _result, data = client.fetch(str(i), "(RFC822)") + top_n = max(0, messages_count - max_count) if max_count else 0 + for i in range(messages_count, top_n, -1): + _result, data = client.fetch(str(i), "(RFC822)") - if data is None or not data or data[0] is None: - continue + if data is None or not data or data[0] is None: + continue - message = mailparser.parse_from_bytes(data[0][1]) + message = mailparser.parse_from_bytes(data[0][1]) - # Note: mailparser only populates the text_plain field - # if the message content type is explicitly set to 'text/plain'. - if message.text_plain: - artifacts.append(TextArtifact("\n".join(message.text_plain))) + # Note: mailparser only populates the text_plain field + # if the message content type is explicitly set to 'text/plain'. + if message.text_plain: + artifacts.append(TextArtifact("\n".join(message.text_plain))) - client.close() + client.close() - return ListArtifact(artifacts) - except Exception as e: - logging.error(e) - return ErrorArtifact(f"error retrieving email: {e}") + return ListArtifact(artifacts) def _count_messages(self, message_numbers: bytes) -> int: return len(list(filter(None, message_numbers.decode().split(" ")))) - def load_collection(self, sources: list[EmailQuery], *args, **kwargs) -> dict[str, ListArtifact | ErrorArtifact]: - return cast(dict[str, Union[ListArtifact, ErrorArtifact]], super().load_collection(sources, *args, **kwargs)) + def load_collection(self, sources: list[EmailQuery], *args, **kwargs) -> dict[str, ListArtifact]: + return cast(dict[str, ListArtifact], super().load_collection(sources, *args, **kwargs)) diff --git a/griptape/loaders/pdf_loader.py b/griptape/loaders/pdf_loader.py index b38e2cd77..419bfabf4 100644 --- a/griptape/loaders/pdf_loader.py +++ b/griptape/loaders/pdf_loader.py @@ -1,12 +1,11 @@ from __future__ import annotations from io import BytesIO -from typing import Optional, Union, cast +from typing import Optional, cast from attrs import Factory, define, field from griptape.artifacts import TextArtifact -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.chunkers import PdfChunker from griptape.loaders import BaseTextLoader from griptape.utils import import_optional_dependency @@ -26,13 +25,13 @@ def load( password: Optional[str] = None, *args, **kwargs, - ) -> ErrorArtifact | list[TextArtifact]: + ) -> list[TextArtifact]: pypdf = import_optional_dependency("pypdf") reader = pypdf.PdfReader(BytesIO(source), strict=True, password=password) return self._text_to_artifacts("\n".join([p.extract_text() for p in reader.pages])) - def load_collection(self, sources: list[bytes], *args, **kwargs) -> dict[str, ErrorArtifact | list[TextArtifact]]: + def load_collection(self, sources: list[bytes], *args, **kwargs) -> dict[str, list[TextArtifact]]: return cast( - dict[str, Union[ErrorArtifact, list[TextArtifact]]], + dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs), ) diff --git a/griptape/loaders/text_loader.py b/griptape/loaders/text_loader.py index e356a2cdb..79e551a8e 100644 --- a/griptape/loaders/text_loader.py +++ b/griptape/loaders/text_loader.py @@ -1,11 +1,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional, Union, cast +from typing import TYPE_CHECKING, Optional, cast from attrs import Factory, define, field from griptape.artifacts import TextArtifact -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.chunkers import TextChunker from griptape.loaders import BaseTextLoader from griptape.tokenizers import OpenAiTokenizer @@ -36,14 +35,11 @@ class TextLoader(BaseTextLoader): embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) encoding: str = field(default="utf-8", kw_only=True) - def load(self, source: bytes | str, *args, **kwargs) -> ErrorArtifact | list[TextArtifact]: + def load(self, source: bytes | str, *args, **kwargs) -> list[TextArtifact]: if isinstance(source, bytes): - try: - source = source.decode(encoding=self.encoding) - except UnicodeDecodeError: - return ErrorArtifact(f"Failed to decode bytes to string using encoding: {self.encoding}") + source = source.decode(encoding=self.encoding) elif isinstance(source, (bytearray, memoryview)): - return ErrorArtifact(f"Unsupported source type: {type(source)}") + raise ValueError(f"Unsupported source type: {type(source)}") return self._text_to_artifacts(source) @@ -52,8 +48,8 @@ def load_collection( sources: list[bytes | str], *args, **kwargs, - ) -> dict[str, ErrorArtifact | list[TextArtifact]]: + ) -> dict[str, list[TextArtifact]]: return cast( - dict[str, Union[ErrorArtifact, list[TextArtifact]]], + dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs), ) diff --git a/griptape/loaders/web_loader.py b/griptape/loaders/web_loader.py index 3798f9488..720ab34a1 100644 --- a/griptape/loaders/web_loader.py +++ b/griptape/loaders/web_loader.py @@ -4,7 +4,6 @@ from attrs import Factory, define, field -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers import BaseWebScraperDriver, TrafilaturaWebScraperDriver from griptape.loaders import BaseTextLoader @@ -19,9 +18,6 @@ class WebLoader(BaseTextLoader): kw_only=True, ) - def load(self, source: str, *args, **kwargs) -> ErrorArtifact | list[TextArtifact]: - try: - single_chunk_text_artifact = self.web_scraper_driver.scrape_url(source) - return self._text_to_artifacts(single_chunk_text_artifact.value) - except Exception as e: - return ErrorArtifact(f"Error loading from source: {source}", exception=e) + def load(self, source: str, *args, **kwargs) -> list[TextArtifact]: + single_chunk_text_artifact = self.web_scraper_driver.scrape_url(source) + return self._text_to_artifacts(single_chunk_text_artifact.value) diff --git a/griptape/tasks/code_execution_task.py b/griptape/tasks/code_execution_task.py index 68e0d66ad..d627382fd 100644 --- a/griptape/tasks/code_execution_task.py +++ b/griptape/tasks/code_execution_task.py @@ -1,19 +1,18 @@ from __future__ import annotations -from typing import Callable +from typing import TYPE_CHECKING, Callable from attrs import define, field -from griptape.artifacts import BaseArtifact, ErrorArtifact from griptape.tasks import BaseTextInputTask +if TYPE_CHECKING: + from griptape.artifacts import BaseArtifact + @define class CodeExecutionTask(BaseTextInputTask): run_fn: Callable[[CodeExecutionTask], BaseArtifact] = field(kw_only=True) def run(self) -> BaseArtifact: - try: - return self.run_fn(self) - except Exception as e: - return ErrorArtifact(f"error during Code Execution Task: {e}") + return self.run_fn(self) diff --git a/griptape/tools/file_manager/tool.py b/griptape/tools/file_manager/tool.py index ece6a0e92..2ca14d565 100644 --- a/griptape/tools/file_manager/tool.py +++ b/griptape/tools/file_manager/tool.py @@ -93,9 +93,16 @@ def save_memory_artifacts_to_disk(self, params: dict) -> ErrorArtifact | InfoArt for artifact in list_artifact.value: formatted_file_name = f"{artifact.name}-{file_name}" if len(list_artifact) > 1 else file_name - result = self.file_manager_driver.save_file(os.path.join(dir_name, formatted_file_name), artifact.value) - if isinstance(result, ErrorArtifact): - return result + try: + self.file_manager_driver.save_file(os.path.join(dir_name, formatted_file_name), artifact.value) + except FileNotFoundError: + return ErrorArtifact("Path not found") + except IsADirectoryError: + return ErrorArtifact("Path is a directory") + except NotADirectoryError: + return ErrorArtifact("Not a directory") + except Exception as e: + return ErrorArtifact(f"Failed to load file: {str(e)}") return InfoArtifact("Successfully saved memory artifacts to disk") diff --git a/griptape/tools/variation_image_generation/tool.py b/griptape/tools/variation_image_generation/tool.py index 9691f6206..0d4456c2f 100644 --- a/griptape/tools/variation_image_generation/tool.py +++ b/griptape/tools/variation_image_generation/tool.py @@ -51,9 +51,6 @@ def image_variation_from_file(self, params: dict[str, dict[str, str]]) -> ImageA image_artifact = self.image_loader.load(Path(image_file).read_bytes()) - if isinstance(image_artifact, ErrorArtifact): - return image_artifact - return self._generate_variation(prompt, negative_prompt, image_artifact) @activity( diff --git a/griptape/tools/web_scraper/tool.py b/griptape/tools/web_scraper/tool.py index c27aaa066..2895d5e0d 100644 --- a/griptape/tools/web_scraper/tool.py +++ b/griptape/tools/web_scraper/tool.py @@ -24,9 +24,6 @@ def get_content(self, params: dict) -> ListArtifact | ErrorArtifact: try: result = self.web_loader.load(url) - if isinstance(result, ErrorArtifact): - return result - else: - return ListArtifact(result) + return ListArtifact(result) except Exception as e: return ErrorArtifact("Error getting page content: " + str(e)) diff --git a/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py b/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py index e3ec78eeb..84ce61768 100644 --- a/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py +++ b/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py @@ -5,7 +5,7 @@ import pytest from moto import mock_s3 -from griptape.artifacts import ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact +from griptape.artifacts import InfoArtifact, ListArtifact, TextArtifact from griptape.drivers import AmazonS3FileManagerDriver from griptape.loaders import TextLoader from tests.utils.aws import mock_aws_credentials @@ -135,23 +135,21 @@ def test_list_files(self, workdir, path, expected, driver): ("workdir", "path", "expected"), [ # non-existent paths - ("/", "bar", "Path not found"), - ("/", "bar/", "Path not found"), - ("/", "bitcoin.pdf", "Path not found"), + ("/", "bar", FileNotFoundError), + ("/", "bar/", FileNotFoundError), + ("/", "bitcoin.pdf", FileNotFoundError), # # paths to files (not directories) - ("/", "foo.txt", "Path is not a directory"), - ("/", "/foo.txt", "Path is not a directory"), - ("/resources", "bitcoin.pdf", "Path is not a directory"), - ("/resources", "/bitcoin.pdf", "Path is not a directory"), + ("/", "foo.txt", NotADirectoryError), + ("/", "/foo.txt", NotADirectoryError), + ("/resources", "bitcoin.pdf", NotADirectoryError), + ("/resources", "/bitcoin.pdf", NotADirectoryError), ], ) def test_list_files_failure(self, workdir, path, expected, driver): driver.workdir = workdir - artifact = driver.list_files(path) - - assert isinstance(artifact, ErrorArtifact) - assert artifact.value == expected + with pytest.raises(expected): + driver.list_files(path) def test_load_file(self, driver): artifact = driver.load_file("resources/bitcoin.pdf") @@ -163,28 +161,26 @@ def test_load_file(self, driver): ("workdir", "path", "expected"), [ # non-existent files or directories - ("/", "bitcoin.pdf", "Path not found"), - ("/resources", "foo.txt", "Path not found"), - ("/", "bar/", "Path is a directory"), + ("/", "bitcoin.pdf", FileNotFoundError), + ("/resources", "foo.txt", FileNotFoundError), + ("/", "bar/", IsADirectoryError), # existing files with trailing slash - ("/", "resources/bitcoin.pdf/", "Path is a directory"), - ("/resources", "bitcoin.pdf/", "Path is a directory"), + ("/", "resources/bitcoin.pdf/", IsADirectoryError), + ("/resources", "bitcoin.pdf/", IsADirectoryError), # directories -- not files - ("/", "", "Path is a directory"), - ("/", "/", "Path is a directory"), - ("/", "resources", "Path is a directory"), - ("/", "resources/", "Path is a directory"), - ("/resources", "", "Path is a directory"), - ("/resources", "/", "Path is a directory"), + ("/", "", IsADirectoryError), + ("/", "/", IsADirectoryError), + ("/", "resources", IsADirectoryError), + ("/", "resources/", IsADirectoryError), + ("/resources", "", IsADirectoryError), + ("/resources", "/", IsADirectoryError), ], ) def test_load_file_failure(self, workdir, path, expected, driver): driver.workdir = workdir - artifact = driver.load_file(path) - - assert isinstance(artifact, ErrorArtifact) - assert artifact.value == expected + with pytest.raises(expected): + driver.load_file(path) def test_load_file_with_encoding(self, driver): artifact = driver.load_file("resources/test.txt") @@ -193,15 +189,6 @@ def test_load_file_with_encoding(self, driver): assert len(artifact.value) == 1 assert isinstance(artifact.value[0], TextArtifact) - def test_load_file_with_encoding_failure(self, session, bucket): - driver = AmazonS3FileManagerDriver( - session=session, bucket=bucket, default_loader=TextLoader(encoding="utf-8"), loaders={} - ) - - artifact = driver.load_file("resources/bitcoin.pdf") - - assert isinstance(artifact, ErrorArtifact) - @pytest.mark.parametrize( ("workdir", "path", "content"), [ @@ -231,27 +218,25 @@ def test_save_file(self, workdir, path, content, driver, get_s3_value): ("workdir", "path", "expected"), [ # non-existent directories - ("/", "bar/", "Path is a directory"), - ("/", "/bar/", "Path is a directory"), + ("/", "bar/", IsADirectoryError), + ("/", "/bar/", IsADirectoryError), # # existing directories - ("/", "", "Path is a directory"), - ("/", "/", "Path is a directory"), - ("/", "resources", "Path is a directory"), - ("/", "resources/", "Path is a directory"), - ("/resources", "", "Path is a directory"), - ("/resources", "/", "Path is a directory"), + ("/", "", IsADirectoryError), + ("/", "/", IsADirectoryError), + ("/", "resources", IsADirectoryError), + ("/", "resources/", IsADirectoryError), + ("/resources", "", IsADirectoryError), + ("/resources", "/", IsADirectoryError), # existing files with trailing slash - ("/", "resources/bitcoin.pdf/", "Path is a directory"), - ("/resources", "bitcoin.pdf/", "Path is a directory"), + ("/", "resources/bitcoin.pdf/", IsADirectoryError), + ("/resources", "bitcoin.pdf/", IsADirectoryError), ], ) def test_save_file_failure(self, workdir, path, expected, temp_dir, driver, s3_client, bucket): driver.workdir = workdir - artifact = driver.save_file(path, "foobar") - - assert isinstance(artifact, ErrorArtifact) - assert artifact.value == expected + with pytest.raises(expected): + driver.save_file(path, "foobar") def test_save_file_with_encoding(self, session, bucket, get_s3_value): workdir = "/sub-folder" diff --git a/tests/unit/drivers/file_manager/test_local_file_manager_driver.py b/tests/unit/drivers/file_manager/test_local_file_manager_driver.py index a7c244f09..394a838a3 100644 --- a/tests/unit/drivers/file_manager/test_local_file_manager_driver.py +++ b/tests/unit/drivers/file_manager/test_local_file_manager_driver.py @@ -4,7 +4,7 @@ import pytest -from griptape.artifacts import ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact +from griptape.artifacts import InfoArtifact, ListArtifact, TextArtifact from griptape.drivers import LocalFileManagerDriver from griptape.loaders.text_loader import TextLoader @@ -107,24 +107,22 @@ def test_list_files(self, workdir, path, expected, temp_dir, driver): ("workdir", "path", "expected"), [ # non-existent paths - ("/", "bar", "Path not found"), - ("/", "bar/", "Path not found"), - ("/", "bitcoin.pdf", "Path not found"), + ("/", "bar", FileNotFoundError), + ("/", "bar/", FileNotFoundError), + ("/", "bitcoin.pdf", FileNotFoundError), # # paths to files (not directories) - ("/", "foo.txt", "Path is not a directory"), - ("/", "/foo.txt", "Path is not a directory"), - ("/resources", "bitcoin.pdf", "Path is not a directory"), - ("/resources", "/bitcoin.pdf", "Path is not a directory"), + ("/", "foo.txt", NotADirectoryError), + ("/", "/foo.txt", NotADirectoryError), + ("/resources", "bitcoin.pdf", NotADirectoryError), + ("/resources", "/bitcoin.pdf", NotADirectoryError), ], ) def test_list_files_failure(self, workdir, path, expected, temp_dir, driver): # Treat the workdir as an absolute path, but modify it to be relative to the temp_dir. driver.workdir = self._to_driver_workdir(temp_dir, workdir) - artifact = driver.list_files(path) - - assert isinstance(artifact, ErrorArtifact) - assert artifact.value == expected + with pytest.raises(expected): + driver.list_files(path) def test_load_file(self, driver: LocalFileManagerDriver): artifact = driver.load_file("resources/bitcoin.pdf") @@ -136,29 +134,27 @@ def test_load_file(self, driver: LocalFileManagerDriver): ("workdir", "path", "expected"), [ # # non-existent files or directories - ("/", "bitcoin.pdf", "Path not found"), - ("/resources", "foo.txt", "Path not found"), - ("/", "bar/", "Path is a directory"), + ("/", "bitcoin.pdf", FileNotFoundError), + ("/resources", "foo.txt", FileNotFoundError), + ("/", "bar/", IsADirectoryError), # existing files with trailing slash - ("/", "resources/bitcoin.pdf/", "Path is a directory"), - ("/resources", "bitcoin.pdf/", "Path is a directory"), + ("/", "resources/bitcoin.pdf/", IsADirectoryError), + ("/resources", "bitcoin.pdf/", IsADirectoryError), # directories -- not files - ("/", "", "Path is a directory"), - ("/", "/", "Path is a directory"), - ("/", "resources", "Path is a directory"), - ("/", "resources/", "Path is a directory"), - ("/resources", "", "Path is a directory"), - ("/resources", "/", "Path is a directory"), + ("/", "", IsADirectoryError), + ("/", "/", IsADirectoryError), + ("/", "resources", IsADirectoryError), + ("/", "resources/", IsADirectoryError), + ("/resources", "", IsADirectoryError), + ("/resources", "/", IsADirectoryError), ], ) def test_load_file_failure(self, workdir, path, expected, temp_dir, driver): # Treat the workdir as an absolute path, but modify it to be relative to the temp_dir. driver.workdir = self._to_driver_workdir(temp_dir, workdir) - artifact = driver.load_file(path) - - assert isinstance(artifact, ErrorArtifact) - assert artifact.value == expected + with pytest.raises(expected): + driver.load_file(path) def test_load_file_with_encoding(self, driver: LocalFileManagerDriver): artifact = driver.load_file("resources/test.txt") @@ -167,14 +163,15 @@ def test_load_file_with_encoding(self, driver: LocalFileManagerDriver): assert len(artifact.value) == 1 assert isinstance(artifact.value[0], TextArtifact) - def test_load_file_with_encoding_failure(self): + def test_load_file_with_encoding_failure(self, driver): driver = LocalFileManagerDriver( - default_loader=TextLoader(encoding="utf-8"), loaders={}, workdir=os.path.abspath(os.path.dirname(__file__)) + default_loader=TextLoader(encoding="utf-8"), + loaders={}, + workdir=os.path.normpath(os.path.abspath(os.path.dirname(__file__) + "../../../../")), ) - artifact = driver.load_file("resources/bitcoin.pdf") - - assert isinstance(artifact, ErrorArtifact) + with pytest.raises(UnicodeDecodeError): + driver.load_file("resources/bitcoin.pdf") @pytest.mark.parametrize( ("workdir", "path", "content"), @@ -205,28 +202,26 @@ def test_save_file(self, workdir, path, content, temp_dir, driver): ("workdir", "path", "expected"), [ # non-existent directories - ("/", "bar/", "Path is a directory"), - ("/", "/bar/", "Path is a directory"), + ("/", "bar/", IsADirectoryError), + ("/", "/bar/", IsADirectoryError), # existing directories - ("/", "", "Path is a directory"), - ("/", "/", "Path is a directory"), - ("/", "resources", "Path is a directory"), - ("/", "resources/", "Path is a directory"), - ("/resources", "", "Path is a directory"), - ("/resources", "/", "Path is a directory"), + ("/", "", IsADirectoryError), + ("/", "/", IsADirectoryError), + ("/", "resources", IsADirectoryError), + ("/", "resources/", IsADirectoryError), + ("/resources", "", IsADirectoryError), + ("/resources", "/", IsADirectoryError), # existing files with trailing slash - ("/", "resources/bitcoin.pdf/", "Path is a directory"), - ("/resources", "bitcoin.pdf/", "Path is a directory"), + ("/", "resources/bitcoin.pdf/", IsADirectoryError), + ("/resources", "bitcoin.pdf/", IsADirectoryError), ], ) def test_save_file_failure(self, workdir, path, expected, temp_dir, driver): # Treat the workdir as an absolute path, but modify it to be relative to the temp_dir. driver.workdir = self._to_driver_workdir(temp_dir, workdir) - artifact = driver.save_file(path, "foobar") - - assert isinstance(artifact, ErrorArtifact) - assert artifact.value == expected + with pytest.raises(expected): + driver.save_file(path, "foobar") def test_save_file_with_encoding(self, temp_dir): driver = LocalFileManagerDriver(default_loader=TextLoader(encoding="utf-8"), loaders={}, workdir=temp_dir) diff --git a/tests/unit/engines/extraction/test_json_extraction_engine.py b/tests/unit/engines/extraction/test_json_extraction_engine.py index 48430f1e5..9d6442579 100644 --- a/tests/unit/engines/extraction/test_json_extraction_engine.py +++ b/tests/unit/engines/extraction/test_json_extraction_engine.py @@ -1,7 +1,6 @@ import pytest from schema import Schema -from griptape.artifacts import ErrorArtifact from griptape.engines import JsonExtractionEngine from tests.mocks.mock_prompt_driver import MockPromptDriver @@ -25,7 +24,9 @@ def test_extract(self, engine): def test_extract_error(self, engine): engine.template_schema = lambda: "non serializable" - assert isinstance(engine.extract("foo"), ErrorArtifact) + + with pytest.raises(TypeError): + engine.extract("foo") def test_json_to_text_artifacts(self, engine): assert [ diff --git a/tests/unit/loaders/test_email_loader.py b/tests/unit/loaders/test_email_loader.py index f1e057453..ade062743 100644 --- a/tests/unit/loaders/test_email_loader.py +++ b/tests/unit/loaders/test_email_loader.py @@ -6,7 +6,7 @@ import pytest -from griptape.artifacts import ErrorArtifact, ListArtifact +from griptape.artifacts import ListArtifact from griptape.loaders import EmailLoader @@ -79,20 +79,16 @@ def test_load_returns_error_artifact_when_select_returns_non_ok(self, loader, mo mock_select.return_value = (None, [b"NOT-OK"]) # When - artifact = loader.load(EmailLoader.EmailQuery(label="INBOX")) - - # Then - assert isinstance(artifact, ErrorArtifact) + with pytest.raises(Exception, match="NOT-OK"): + loader.load(EmailLoader.EmailQuery(label="INBOX")) def test_load_returns_error_artifact_when_login_throws(self, loader, mock_login): # Given mock_login.side_effect = Exception("login-failed") # When - artifact = loader.load(EmailLoader.EmailQuery(label="INBOX")) - - # Then - assert isinstance(artifact, ErrorArtifact) + with pytest.raises(Exception, match="login-failed"): + loader.load(EmailLoader.EmailQuery(label="INBOX")) def test_load_collection(self, loader, mock_fetch): # Given diff --git a/tests/unit/loaders/test_web_loader.py b/tests/unit/loaders/test_web_loader.py index f264ce667..f7cccb666 100644 --- a/tests/unit/loaders/test_web_loader.py +++ b/tests/unit/loaders/test_web_loader.py @@ -1,6 +1,5 @@ import pytest -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.loaders import WebLoader from tests.mocks.mock_embedding_driver import MockEmbeddingDriver @@ -27,10 +26,8 @@ def test_load(self, loader): def test_load_exception(self, mocker, loader): mocker.patch("trafilatura.fetch_url", side_effect=Exception("error")) source = "https://github.com/griptape-ai/griptape" - artifact = loader.load(source) - - assert isinstance(artifact, ErrorArtifact) - assert f"Error loading from source: {source}" == artifact.value + with pytest.raises(Exception, match="error"): + loader.load(source) def test_load_collection(self, loader): artifacts = loader.load_collection( @@ -48,13 +45,11 @@ def test_load_collection(self, loader): def test_empty_page_string_response(self, loader, mocker): mocker.patch("trafilatura.extract", return_value="") - artifact = loader.load("https://example.com/") - assert isinstance(artifact, ErrorArtifact) - assert str(artifact.exception) == "can't extract page" + with pytest.raises(Exception, match="can't extract page"): + loader.load("https://example.com/") def test_empty_page_none_response(self, loader, mocker): mocker.patch("trafilatura.extract", return_value=None) - artifact = loader.load("https://example.com/") - assert isinstance(artifact, ErrorArtifact) - assert str(artifact.exception) == "can't extract page" + with pytest.raises(Exception, match="can't extract page"): + loader.load("https://example.com/") diff --git a/tests/unit/tasks/test_code_execution_task.py b/tests/unit/tasks/test_code_execution_task.py index e2c492fad..f0eb37ede 100644 --- a/tests/unit/tasks/test_code_execution_task.py +++ b/tests/unit/tasks/test_code_execution_task.py @@ -1,4 +1,6 @@ -from griptape.artifacts import BaseArtifact, ErrorArtifact, TextArtifact +import pytest + +from griptape.artifacts import BaseArtifact, TextArtifact from griptape.structures import Pipeline from griptape.tasks import CodeExecutionTask @@ -35,7 +37,5 @@ def test_noop_fn(self): def test_error_fn(self): task = CodeExecutionTask(run_fn=deliberate_exception) - result = task.run() - - assert isinstance(result, ErrorArtifact) - assert result.value == "error during Code Execution Task: Intentional Error" + with pytest.raises(ValueError): + task.run() diff --git a/tests/unit/tools/test_file_manager.py b/tests/unit/tools/test_file_manager.py index dccf2f1a2..469918a02 100644 --- a/tests/unit/tools/test_file_manager.py +++ b/tests/unit/tools/test_file_manager.py @@ -6,7 +6,6 @@ import pytest from griptape.artifacts import ListArtifact, TextArtifact -from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers.file_manager.local_file_manager_driver import LocalFileManagerDriver from griptape.loaders.text_loader import TextLoader from griptape.tools import FileManagerTool @@ -55,9 +54,8 @@ def test_load_files_from_disk_with_encoding_failure(self): ) ) - result = file_manager.load_files_from_disk({"values": {"paths": ["../../resources/bitcoin.pdf"]}}) - - assert isinstance(result.value[0], ErrorArtifact) + with pytest.raises(UnicodeDecodeError): + file_manager.load_files_from_disk({"values": {"paths": ["../../resources/bitcoin.pdf"]}}) def test_save_memory_artifacts_to_disk_for_one_artifact(self, temp_dir): memory = defaults.text_task_memory("Memory1") From ef61c53a0ece043ee3aae2bc384b366de680ca92 Mon Sep 17 00:00:00 2001 From: Matt Vallillo Date: Tue, 27 Aug 2024 10:49:10 -0500 Subject: [PATCH 04/39] Refactor Conversation Memory class and drivers (#1084) --- CHANGELOG.md | 10 ++- MIGRATION.md | 67 ++++++++++++++++++- .../src/amazon_dynamodb_sessions_1.py | 2 +- .../src/conversation_memory_drivers_1.py | 4 +- .../src/conversation_memory_drivers_2.py | 2 +- .../src/conversation_memory_drivers_3.py | 2 +- ...versation_memory_drivers_griptape_cloud.py | 2 +- .../configs/drivers/base_drivers_config.py | 6 +- griptape/configs/drivers/drivers_config.py | 7 +- ...zon_dynamodb_conversation_memory_driver.py | 27 +++----- .../base_conversation_memory_driver.py | 16 +++-- ...iptape_cloud_conversation_memory_driver.py | 62 ++++++++--------- .../local_conversation_memory_driver.py | 43 ++++++------ .../redis_conversation_memory_driver.py | 26 +++---- .../structure/base_conversation_memory.py | 29 ++++---- griptape/memory/structure/run.py | 16 +++-- griptape/tasks/prompt_task.py | 2 +- griptape/tasks/toolkit_task.py | 2 +- .../test_amazon_bedrock_drivers_config.py | 10 ++- .../drivers/test_anthropic_drivers_config.py | 5 +- .../test_azure_openai_drivers_config.py | 5 +- .../drivers/test_cohere_drivers_config.py | 5 +- .../configs/drivers/test_drivers_config.py | 9 ++- .../drivers/test_google_drivers_config.py | 5 +- .../drivers/test_openai_driver_config.py | 5 +- ...est_dynamodb_conversation_memory_driver.py | 24 +++---- ...iptape_cloud_conversation_memory_driver.py | 60 ++++++++++------- .../test_local_conversation_memory_driver.py | 50 ++++++++------ .../test_redis_conversation_memory_driver.py | 22 +++--- .../structure/test_conversation_memory.py | 8 +-- 30 files changed, 322 insertions(+), 211 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b066e2e44..555306f90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,10 +5,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Unreleased + ### Added -- `BaseConversationMemory.prompt_driver` for use with autopruning. - Parameter `meta: dict` on `BaseEvent`. +### Changed +- **BREAKING**: Parameter `driver` on `BaseConversationMemory` renamed to `conversation_memory_driver`. +- **BREAKING**: `BaseConversationMemory.add_to_prompt_stack` now takes a `prompt_driver` parameter. +- **BREAKING**: `BaseConversationMemoryDriver.load` now returns `tuple[list[Run], Optional[dict]]`. +- **BREAKING**: `BaseConversationMemoryDriver.store` now takes `runs: list[Run]` and `metadata: Optional[dict]` as input. +- **BREAKING**: Parameter `file_path` on `LocalConversationMemoryDriver` renamed to `persist_file` and is now type `Optional[str]`. +- `Defaults.drivers_config.conversation_memory_driver` now defaults to `LocalConversationMemoryDriver` instead of `None`. + ### Fixed - Parsing streaming response with some OpenAi compatible services. diff --git a/MIGRATION.md b/MIGRATION.md index 75b7218fb..89ba95494 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -9,16 +9,79 @@ This document provides instructions for migrating your codebase to accommodate b Drivers, Loaders, and Engines will now raises exceptions rather than returning `ErrorArtifact`s. Update any logic that expects `ErrorArtifact` to handle exceptions instead. +#### Before ```python -# Before artifacts = WebLoader().load("https://www.griptape.ai") if isinstance(artifacts, ErrorArtifact): raise Exception(artifacts.value) +``` -# After +#### After +```python try: artifacts = WebLoader().load("https://www.griptape.ai") except Exception as e: raise e ``` + +### LocalConversationMemoryDriver `file_path` renamed to `persist_file` + +`LocalConversationMemoryDriver.file_path` has been renamed to `persist_file` and is now `Optional[str]`. If `persist_file` is not passed as a parameter, nothing will be persisted and no errors will be raised. `LocalConversationMemoryDriver` is now the default driver in the global `Defaults` object. + +#### Before +```python +local_driver_with_file = LocalConversationMemoryDriver( + file_path="my_file.json" +) + +local_driver = LocalConversationMemoryDriver() + +assert local_driver_with_file.file_path == "my_file.json" +assert local_driver.file_path == "griptape_memory.json" +``` + +#### After +```python +local_driver_with_file = LocalConversationMemoryDriver( + persist_file="my_file.json" +) + +local_driver = LocalConversationMemoryDriver() + +assert local_driver_with_file.persist_file == "my_file.json" +assert local_driver.persist_file is None +``` + +### Changes to BaseConversationMemoryDriver + +`BaseConversationMemoryDriver.driver` has been renamed to `conversation_memory_driver`. Method signatures for `.store` and `.load` have been changed. + +#### Before +```python +memory_driver = LocalConversationMemoryDriver() + +conversation_memory = ConversationMemory( + driver=memory_driver +) + +load_result: BaseConversationMemory = memory_driver.load() + +memory_driver.store(conversation_memory) +``` + +#### After +```python +memory_driver = LocalConversationMemoryDriver() + +conversation_memory = ConversationMemory( + conversation_memory_driver=memory_driver +) + +load_result: tuple[list[Run], dict[str, Any]] = memory_driver.load() + +memory_driver.store( + conversation_memory.runs, + conversation_memory.meta +) +``` diff --git a/docs/examples/src/amazon_dynamodb_sessions_1.py b/docs/examples/src/amazon_dynamodb_sessions_1.py index f7a6d0cd6..d44ec8f56 100644 --- a/docs/examples/src/amazon_dynamodb_sessions_1.py +++ b/docs/examples/src/amazon_dynamodb_sessions_1.py @@ -18,7 +18,7 @@ structure = Agent( conversation_memory=ConversationMemory( - driver=AmazonDynamoDbConversationMemoryDriver( + conversation_memory_driver=AmazonDynamoDbConversationMemoryDriver( session=boto3.Session( aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py index 27829d8d2..d87586d88 100644 --- a/docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py @@ -2,8 +2,8 @@ from griptape.memory.structure import ConversationMemory from griptape.structures import Agent -local_driver = LocalConversationMemoryDriver(file_path="memory.json") -agent = Agent(conversation_memory=ConversationMemory(driver=local_driver)) +local_driver = LocalConversationMemoryDriver(persist_file="memory.json") +agent = Agent(conversation_memory=ConversationMemory(conversation_memory_driver=local_driver)) agent.run("Surfing is my favorite sport.") agent.run("What is my favorite sport?") diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py index 9db525b42..0c32c1cc5 100644 --- a/docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py @@ -13,7 +13,7 @@ partition_key_value=conversation_id, ) -agent = Agent(conversation_memory=ConversationMemory(driver=dynamodb_driver)) +agent = Agent(conversation_memory=ConversationMemory(conversation_memory_driver=dynamodb_driver)) agent.run("My name is Jeff.") agent.run("What is my name?") diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py index 0f80d1393..5f0723940 100644 --- a/docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py @@ -14,7 +14,7 @@ conversation_id=conversation_id, ) -agent = Agent(conversation_memory=ConversationMemory(driver=redis_conversation_driver)) +agent = Agent(conversation_memory=ConversationMemory(conversation_memory_driver=redis_conversation_driver)) agent.run("My name is Jeff.") agent.run("What is my name?") diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py index 35492e06b..0723b5f75 100644 --- a/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py @@ -9,7 +9,7 @@ cloud_conversation_driver = GriptapeCloudConversationMemoryDriver( api_key=os.environ["GT_CLOUD_API_KEY"], ) -agent = Agent(conversation_memory=ConversationMemory(driver=cloud_conversation_driver)) +agent = Agent(conversation_memory=ConversationMemory(conversation_memory_driver=cloud_conversation_driver)) agent.run("My name is Jeff.") agent.run("What is my name?") diff --git a/griptape/configs/drivers/base_drivers_config.py b/griptape/configs/drivers/base_drivers_config.py index ec7503478..456249634 100644 --- a/griptape/configs/drivers/base_drivers_config.py +++ b/griptape/configs/drivers/base_drivers_config.py @@ -1,7 +1,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from attrs import define, field @@ -38,7 +38,7 @@ class BaseDriversConfig(ABC, SerializableMixin): _vector_store_driver: BaseVectorStoreDriver = field( default=None, kw_only=True, metadata={"serializable": True}, alias="vector_store_driver" ) - _conversation_memory_driver: Optional[BaseConversationMemoryDriver] = field( + _conversation_memory_driver: BaseConversationMemoryDriver = field( default=None, kw_only=True, metadata={"serializable": True}, alias="conversation_memory_driver" ) _text_to_speech_driver: BaseTextToSpeechDriver = field( @@ -70,7 +70,7 @@ def vector_store_driver(self) -> BaseVectorStoreDriver: ... @lazy_property() @abstractmethod - def conversation_memory_driver(self) -> Optional[BaseConversationMemoryDriver]: ... + def conversation_memory_driver(self) -> BaseConversationMemoryDriver: ... @lazy_property() @abstractmethod diff --git a/griptape/configs/drivers/drivers_config.py b/griptape/configs/drivers/drivers_config.py index ed68bcf8c..04edfd303 100644 --- a/griptape/configs/drivers/drivers_config.py +++ b/griptape/configs/drivers/drivers_config.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from attrs import define @@ -13,6 +13,7 @@ DummyPromptDriver, DummyTextToSpeechDriver, DummyVectorStoreDriver, + LocalConversationMemoryDriver, ) from griptape.utils.decorators import lazy_property @@ -52,8 +53,8 @@ def vector_store_driver(self) -> BaseVectorStoreDriver: return DummyVectorStoreDriver(embedding_driver=self.embedding_driver) @lazy_property() - def conversation_memory_driver(self) -> Optional[BaseConversationMemoryDriver]: - return None + def conversation_memory_driver(self) -> BaseConversationMemoryDriver: + return LocalConversationMemoryDriver() @lazy_property() def text_to_speech_driver(self) -> BaseTextToSpeechDriver: diff --git a/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py b/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py index b0c2485d6..0842870eb 100644 --- a/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py @@ -11,7 +11,7 @@ if TYPE_CHECKING: import boto3 - from griptape.memory.structure import BaseConversationMemory + from griptape.memory.structure import Run @define @@ -27,35 +27,26 @@ class AmazonDynamoDbConversationMemoryDriver(BaseConversationMemoryDriver): table: Any = field(init=False) def __attrs_post_init__(self) -> None: - dynamodb = self.session.resource("dynamodb") + self.table = self.session.resource("dynamodb").Table(self.table_name) - self.table = dynamodb.Table(self.table_name) - - def store(self, memory: BaseConversationMemory) -> None: + def store(self, runs: list[Run], metadata: dict) -> None: self.table.update_item( Key=self._get_key(), UpdateExpression="set #attr = :value", ExpressionAttributeNames={"#attr": self.value_attribute_key}, - ExpressionAttributeValues={":value": memory.to_json()}, + ExpressionAttributeValues={ + ":value": json.dumps(self._to_params_dict(runs, metadata)), + }, ) - def load(self) -> Optional[BaseConversationMemory]: - from griptape.memory.structure import BaseConversationMemory - + def load(self) -> tuple[list[Run], dict[str, Any]]: response = self.table.get_item(Key=self._get_key()) if "Item" in response and self.value_attribute_key in response["Item"]: memory_dict = json.loads(response["Item"][self.value_attribute_key]) - # needed to avoid recursive method calls - memory_dict["autoload"] = False - - memory = BaseConversationMemory.from_dict(memory_dict) - - memory.driver = self - - return memory + return self._from_params_dict(memory_dict) else: - return None + return [], {} def _get_key(self) -> dict[str, str | int]: key: dict[str, str | int] = {self.partition_key: self.partition_key_value} diff --git a/griptape/drivers/memory/conversation/base_conversation_memory_driver.py b/griptape/drivers/memory/conversation/base_conversation_memory_driver.py index 1caeb902f..ea0a171f2 100644 --- a/griptape/drivers/memory/conversation/base_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/base_conversation_memory_driver.py @@ -1,17 +1,25 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Any from griptape.mixins import SerializableMixin if TYPE_CHECKING: - from griptape.memory.structure import BaseConversationMemory + from griptape.memory.structure import Run class BaseConversationMemoryDriver(SerializableMixin, ABC): @abstractmethod - def store(self, memory: BaseConversationMemory) -> None: ... + def store(self, runs: list[Run], metadata: dict[str, Any]) -> None: ... @abstractmethod - def load(self) -> Optional[BaseConversationMemory]: ... + def load(self) -> tuple[list[Run], dict[str, Any]]: ... + + def _to_params_dict(self, runs: list[Run], metadata: dict[str, Any]) -> dict: + return {"runs": [run.to_dict() for run in runs], "metadata": metadata} + + def _from_params_dict(self, params_dict: dict[str, Any]) -> tuple[list[Run], dict[str, Any]]: + from griptape.memory.structure import Run + + return [Run.from_dict(run) for run in params_dict.get("runs", [])], params_dict.get("metadata", {}) diff --git a/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py index 2ea1d0d1a..3aac74090 100644 --- a/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py @@ -2,7 +2,7 @@ import os import uuid -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Any, Optional from urllib.parse import urljoin import requests @@ -10,9 +10,10 @@ from griptape.artifacts import BaseArtifact from griptape.drivers import BaseConversationMemoryDriver +from griptape.utils import dict_merge if TYPE_CHECKING: - from griptape.memory.structure import BaseConversationMemory + from griptape.memory.structure import Run @define(kw_only=True) @@ -55,26 +56,38 @@ def validate_api_key(self, _: Attribute, value: Optional[str]) -> str: raise ValueError(f"{self.__class__.__name__} requires an API key") return value - def store(self, memory: BaseConversationMemory) -> None: - # serliaze the run artifacts to json strings - messages = [{"input": run.input.to_json(), "output": run.output.to_json()} for run in memory.runs] + def store(self, runs: list[Run], metadata: dict[str, Any]) -> None: + # serialize the run artifacts to json strings + messages = [ + dict_merge( + { + "input": run.input.to_json(), + "output": run.output.to_json(), + "metadata": {"run_id": run.id}, + }, + run.meta, + ) + for run in runs + ] - # serialize the metadata to a json string - # remove runs because they are already stored as Messages - metadata = memory.to_dict() - del metadata["runs"] + body = dict_merge( + { + "messages": messages, + }, + metadata, + ) # patch the Thread with the new messages and metadata # all old Messages are replaced with the new ones response = requests.patch( self._get_url(f"/threads/{self.thread_id}"), - json={"messages": messages, "metadata": metadata}, + json=body, headers=self.headers, ) response.raise_for_status() - def load(self) -> BaseConversationMemory: - from griptape.memory.structure import BaseConversationMemory, ConversationMemory, Run + def load(self) -> tuple[list[Run], dict[str, Any]]: + from griptape.memory.structure import Run # get the Messages from the Thread messages_response = requests.get(self._get_url(f"/threads/{self.thread_id}/messages"), headers=self.headers) @@ -86,33 +99,16 @@ def load(self) -> BaseConversationMemory: thread_response.raise_for_status() thread_response = thread_response.json() - messages = messages_response.get("messages", []) - runs = [ Run( - id=m["message_id"], + id=m["metadata"].pop("run_id"), + meta=m["metadata"], input=BaseArtifact.from_json(m["input"]), output=BaseArtifact.from_json(m["output"]), ) - for m in messages + for m in messages_response.get("messages", []) ] - metadata = thread_response.get("metadata") - - # the metadata will contain the serialized - # ConversationMemory object with the runs removed - # autoload=False to prevent recursively loading the memory - if metadata is not None and metadata != {}: - memory = BaseConversationMemory.from_dict( - { - **metadata, - "runs": [run.to_dict() for run in runs], - "autoload": False, - } - ) - memory.driver = self - return memory - # no metadata found, return a new ConversationMemory object - return ConversationMemory(runs=runs, autoload=False, driver=self) + return runs, thread_response.get("metadata", {}) def _get_thread_id(self) -> str: res = requests.post(self._get_url("/threads"), json={"name": uuid.uuid4().hex}, headers=self.headers) diff --git a/griptape/drivers/memory/conversation/local_conversation_memory_driver.py b/griptape/drivers/memory/conversation/local_conversation_memory_driver.py index 9a79accc3..c8ea540be 100644 --- a/griptape/drivers/memory/conversation/local_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/local_conversation_memory_driver.py @@ -3,34 +3,33 @@ import json import os from pathlib import Path -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Any, Optional from attrs import define, field from griptape.drivers import BaseConversationMemoryDriver if TYPE_CHECKING: - from griptape.memory.structure import BaseConversationMemory + from griptape.memory.structure import Run -@define +@define(kw_only=True) class LocalConversationMemoryDriver(BaseConversationMemoryDriver): - file_path: str = field(default="griptape_memory.json", kw_only=True, metadata={"serializable": True}) - - def store(self, memory: BaseConversationMemory) -> None: - Path(self.file_path).write_text(memory.to_json()) - - def load(self) -> Optional[BaseConversationMemory]: - from griptape.memory.structure import BaseConversationMemory - - if not os.path.exists(self.file_path): - return None - - memory_dict = json.loads(Path(self.file_path).read_text()) - # needed to avoid recursive method calls - memory_dict["autoload"] = False - memory = BaseConversationMemory.from_dict(memory_dict) - - memory.driver = self - - return memory + persist_file: Optional[str] = field(default=None, metadata={"serializable": True}) + + def store(self, runs: list[Run], metadata: dict[str, Any]) -> None: + if self.persist_file is not None: + Path(self.persist_file).write_text(json.dumps(self._to_params_dict(runs, metadata))) + + def load(self) -> tuple[list[Run], dict[str, Any]]: + if ( + self.persist_file is not None + and os.path.exists(self.persist_file) + and (loaded_str := Path(self.persist_file).read_text()) is not None + ): + try: + return self._from_params_dict(json.loads(loaded_str)) + except Exception as e: + raise ValueError(f"Unable to load data from {self.persist_file}") from e + + return [], {} diff --git a/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py b/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py index 8741cda50..f30189e37 100644 --- a/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py @@ -2,17 +2,17 @@ import json import uuid -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Any, Optional from attrs import Factory, define, field from griptape.drivers import BaseConversationMemoryDriver -from griptape.utils.import_utils import import_optional_dependency +from griptape.utils import import_optional_dependency if TYPE_CHECKING: from redis import Redis - from griptape.memory.structure import BaseConversationMemory + from griptape.memory.structure import Run @define @@ -52,19 +52,11 @@ class RedisConversationMemoryDriver(BaseConversationMemoryDriver): ), ) - def store(self, memory: BaseConversationMemory) -> None: - self.client.hset(self.index, self.conversation_id, memory.to_json()) + def store(self, runs: list[Run], metadata: dict[str, Any]) -> None: + self.client.hset(self.index, self.conversation_id, json.dumps(self._to_params_dict(runs, metadata))) - def load(self) -> Optional[BaseConversationMemory]: - from griptape.memory.structure import BaseConversationMemory - - key = self.index - memory_json = self.client.hget(key, self.conversation_id) + def load(self) -> tuple[list[Run], dict[str, Any]]: + memory_json = self.client.hget(self.index, self.conversation_id) if memory_json is not None: - memory_dict = json.loads(memory_json) - # needed to avoid recursive method calls - memory_dict["autoload"] = False - memory = BaseConversationMemory.from_dict(memory_dict) - memory.driver = self - return memory - return None + return self._from_params_dict(json.loads(memory_json)) + return [], {} diff --git a/griptape/memory/structure/base_conversation_memory.py b/griptape/memory/structure/base_conversation_memory.py index 15d0a9e99..92f5bd942 100644 --- a/griptape/memory/structure/base_conversation_memory.py +++ b/griptape/memory/structure/base_conversation_memory.py @@ -1,13 +1,14 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Any, Optional from attrs import Factory, define, field from griptape.common import PromptStack from griptape.configs import Defaults from griptape.mixins import SerializableMixin +from griptape.utils import dict_merge if TYPE_CHECKING: from griptape.drivers import BaseConversationMemoryDriver, BasePromptDriver @@ -16,22 +17,20 @@ @define class BaseConversationMemory(SerializableMixin, ABC): - driver: Optional[BaseConversationMemoryDriver] = field( + conversation_memory_driver: BaseConversationMemoryDriver = field( default=Factory(lambda: Defaults.drivers_config.conversation_memory_driver), kw_only=True ) - prompt_driver: BasePromptDriver = field( - default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True - ) runs: list[Run] = field(factory=list, kw_only=True, metadata={"serializable": True}) + meta: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) autoload: bool = field(default=True, kw_only=True) autoprune: bool = field(default=True, kw_only=True) max_runs: Optional[int] = field(default=None, kw_only=True, metadata={"serializable": True}) def __attrs_post_init__(self) -> None: - if self.driver and self.autoload: - memory = self.driver.load() - if memory is not None: - [self.add_run(r) for r in memory.runs] + if self.autoload: + runs, meta = self.conversation_memory_driver.load() + self.runs.extend(runs) + self.meta = dict_merge(self.meta, meta) def before_add_run(self) -> None: pass @@ -44,8 +43,7 @@ def add_run(self, run: Run) -> BaseConversationMemory: return self def after_add_run(self) -> None: - if self.driver: - self.driver.store(self) + self.conversation_memory_driver.store(self.runs, self.meta) @abstractmethod def try_add_run(self, run: Run) -> None: ... @@ -53,13 +51,16 @@ def try_add_run(self, run: Run) -> None: ... @abstractmethod def to_prompt_stack(self, last_n: Optional[int] = None) -> PromptStack: ... - def add_to_prompt_stack(self, prompt_stack: PromptStack, index: Optional[int] = None) -> PromptStack: + def add_to_prompt_stack( + self, prompt_driver: BasePromptDriver, prompt_stack: PromptStack, index: Optional[int] = None + ) -> PromptStack: """Add the Conversation Memory runs to the Prompt Stack by modifying the messages in place. If autoprune is enabled, this will fit as many Conversation Memory runs into the Prompt Stack as possible without exceeding the token limit. Args: + prompt_driver: The Prompt Driver to use for token counting. prompt_stack: The Prompt Stack to add the Conversation Memory to. index: Optional index to insert the Conversation Memory runs at. Defaults to appending to the end of the Prompt Stack. @@ -82,8 +83,8 @@ def add_to_prompt_stack(self, prompt_stack: PromptStack, index: Optional[int] = temp_stack.messages.extend(memory_inputs) # Convert the Prompt Stack into tokens left. - tokens_left = self.prompt_driver.tokenizer.count_input_tokens_left( - self.prompt_driver.prompt_stack_to_string(temp_stack), + tokens_left = prompt_driver.tokenizer.count_input_tokens_left( + prompt_driver.prompt_stack_to_string(temp_stack), ) if tokens_left > 0: # There are still tokens left, no need to prune. diff --git a/griptape/memory/structure/run.py b/griptape/memory/structure/run.py index 3d8ca3869..5d2a182ad 100644 --- a/griptape/memory/structure/run.py +++ b/griptape/memory/structure/run.py @@ -1,13 +1,19 @@ +from __future__ import annotations + import uuid +from typing import TYPE_CHECKING, Optional from attrs import Factory, define, field -from griptape.artifacts.base_artifact import BaseArtifact from griptape.mixins import SerializableMixin +if TYPE_CHECKING: + from griptape.artifacts import BaseArtifact + -@define +@define(kw_only=True) class Run(SerializableMixin): - id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={"serializable": True}) - input: BaseArtifact = field(kw_only=True, metadata={"serializable": True}) - output: BaseArtifact = field(kw_only=True, metadata={"serializable": True}) + id: str = field(default=Factory(lambda: uuid.uuid4().hex), metadata={"serializable": True}) + meta: Optional[dict] = field(default=None, metadata={"serializable": True}) + input: BaseArtifact = field(metadata={"serializable": True}) + output: BaseArtifact = field(metadata={"serializable": True}) diff --git a/griptape/tasks/prompt_task.py b/griptape/tasks/prompt_task.py index 17a73e4cd..9c0060039 100644 --- a/griptape/tasks/prompt_task.py +++ b/griptape/tasks/prompt_task.py @@ -58,7 +58,7 @@ def prompt_stack(self) -> PromptStack: if memory is not None: # insert memory into the stack right before the user messages - memory.add_to_prompt_stack(stack, 1 if system_template else 0) + memory.add_to_prompt_stack(self.prompt_driver, stack, 1 if system_template else 0) return stack diff --git a/griptape/tasks/toolkit_task.py b/griptape/tasks/toolkit_task.py index 24607a352..ff1194440 100644 --- a/griptape/tasks/toolkit_task.py +++ b/griptape/tasks/toolkit_task.py @@ -115,7 +115,7 @@ def prompt_stack(self) -> PromptStack: if memory: # inserting at index 1 to place memory right after system prompt - memory.add_to_prompt_stack(stack, 1) + memory.add_to_prompt_stack(self.prompt_driver, stack, 1) return stack diff --git a/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py index 129fe281f..d74c273f6 100644 --- a/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py +++ b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py @@ -25,7 +25,10 @@ def config_with_values(self): def test_to_dict(self, config): assert config.to_dict() == { - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "embedding_driver": {"model": "amazon.titan-embed-text-v1", "type": "AmazonBedrockTitanEmbeddingDriver"}, "image_generation_driver": { "image_generation_model_driver": { @@ -77,7 +80,10 @@ def test_from_dict_with_values(self, config_with_values): def test_to_dict_with_values(self, config_with_values): assert config_with_values.to_dict() == { - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "embedding_driver": {"model": "amazon.titan-embed-text-v1", "type": "AmazonBedrockTitanEmbeddingDriver"}, "image_generation_driver": { "image_generation_model_driver": { diff --git a/tests/unit/configs/drivers/test_anthropic_drivers_config.py b/tests/unit/configs/drivers/test_anthropic_drivers_config.py index b2335d92a..2bdb9497d 100644 --- a/tests/unit/configs/drivers/test_anthropic_drivers_config.py +++ b/tests/unit/configs/drivers/test_anthropic_drivers_config.py @@ -45,7 +45,10 @@ def test_to_dict(self, config): "input_type": "document", }, }, - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, } diff --git a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py index 5c514c947..01886962e 100644 --- a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py +++ b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py @@ -36,7 +36,10 @@ def test_to_dict(self, config): "user": "", "use_native_tools": True, }, - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "embedding_driver": { "base_url": None, "model": "text-embedding-3-small", diff --git a/tests/unit/configs/drivers/test_cohere_drivers_config.py b/tests/unit/configs/drivers/test_cohere_drivers_config.py index 3c267d73d..0d16d1ab2 100644 --- a/tests/unit/configs/drivers/test_cohere_drivers_config.py +++ b/tests/unit/configs/drivers/test_cohere_drivers_config.py @@ -13,7 +13,10 @@ def test_to_dict(self, config): "type": "CohereDriversConfig", "image_generation_driver": {"type": "DummyImageGenerationDriver"}, "image_query_driver": {"type": "DummyImageQueryDriver"}, - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, "prompt_driver": { diff --git a/tests/unit/configs/drivers/test_drivers_config.py b/tests/unit/configs/drivers/test_drivers_config.py index 20cc0926c..e2476c437 100644 --- a/tests/unit/configs/drivers/test_drivers_config.py +++ b/tests/unit/configs/drivers/test_drivers_config.py @@ -18,7 +18,10 @@ def test_to_dict(self, config): "stream": False, "use_native_tools": False, }, - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "embedding_driver": {"type": "DummyEmbeddingDriver"}, "image_generation_driver": {"type": "DummyImageGenerationDriver"}, "image_query_driver": {"type": "DummyImageQueryDriver"}, @@ -56,7 +59,7 @@ def test_lazy_init(self): assert Defaults.drivers_config.image_query_driver is not None assert Defaults.drivers_config.embedding_driver is not None assert Defaults.drivers_config.vector_store_driver is not None - assert Defaults.drivers_config.conversation_memory_driver is None + assert Defaults.drivers_config.conversation_memory_driver is not None assert Defaults.drivers_config.text_to_speech_driver is not None assert Defaults.drivers_config.audio_transcription_driver is not None @@ -65,6 +68,6 @@ def test_lazy_init(self): assert Defaults.drivers_config._image_query_driver is not None assert Defaults.drivers_config._embedding_driver is not None assert Defaults.drivers_config._vector_store_driver is not None - assert Defaults.drivers_config._conversation_memory_driver is None + assert Defaults.drivers_config._conversation_memory_driver is not None assert Defaults.drivers_config._text_to_speech_driver is not None assert Defaults.drivers_config._audio_transcription_driver is not None diff --git a/tests/unit/configs/drivers/test_google_drivers_config.py b/tests/unit/configs/drivers/test_google_drivers_config.py index f6df1afef..7a752c6de 100644 --- a/tests/unit/configs/drivers/test_google_drivers_config.py +++ b/tests/unit/configs/drivers/test_google_drivers_config.py @@ -43,7 +43,10 @@ def test_to_dict(self, config): "title": None, }, }, - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, } diff --git a/tests/unit/configs/drivers/test_openai_driver_config.py b/tests/unit/configs/drivers/test_openai_driver_config.py index 2425b178f..40a755e50 100644 --- a/tests/unit/configs/drivers/test_openai_driver_config.py +++ b/tests/unit/configs/drivers/test_openai_driver_config.py @@ -28,7 +28,10 @@ def test_to_dict(self, config): "user": "", "use_native_tools": True, }, - "conversation_memory_driver": None, + "conversation_memory_driver": { + "type": "LocalConversationMemoryDriver", + "persist_file": None, + }, "embedding_driver": { "base_url": None, "model": "text-embedding-3-small", diff --git a/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py index f1a5df1be..96e2ca969 100644 --- a/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py @@ -46,7 +46,7 @@ def test_store(self): value_attribute_key=self.VALUE_ATTRIBUTE_KEY, partition_key_value=self.PARTITION_KEY_VALUE, ) - memory = ConversationMemory(driver=memory_driver) + memory = ConversationMemory(conversation_memory_driver=memory_driver) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -72,7 +72,7 @@ def test_store_with_sort_key(self): sort_key="sortKey", sort_key_value="foo", ) - memory = ConversationMemory(driver=memory_driver) + memory = ConversationMemory(conversation_memory_driver=memory_driver) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -93,7 +93,7 @@ def test_load(self): value_attribute_key=self.VALUE_ATTRIBUTE_KEY, partition_key_value=self.PARTITION_KEY_VALUE, ) - memory = ConversationMemory(driver=memory_driver) + memory = ConversationMemory(conversation_memory_driver=memory_driver, meta={"foo": "bar"}) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -101,12 +101,10 @@ def test_load(self): pipeline.run() pipeline.run() - new_memory = memory_driver.load() + runs, metadata = memory_driver.load() - assert new_memory.type == "ConversationMemory" - assert len(new_memory.runs) == 2 - assert new_memory.runs[0].input.value == "test" - assert new_memory.runs[0].output.value == "mock output" + assert len(runs) == 2 + assert metadata == {"foo": "bar"} def test_load_with_sort_key(self): memory_driver = AmazonDynamoDbConversationMemoryDriver( @@ -118,7 +116,7 @@ def test_load_with_sort_key(self): sort_key="sortKey", sort_key_value="foo", ) - memory = ConversationMemory(driver=memory_driver) + memory = ConversationMemory(conversation_memory_driver=memory_driver, meta={"foo": "bar"}) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -126,9 +124,7 @@ def test_load_with_sort_key(self): pipeline.run() pipeline.run() - new_memory = memory_driver.load() + runs, metadata = memory_driver.load() - assert new_memory.type == "ConversationMemory" - assert len(new_memory.runs) == 2 - assert new_memory.runs[0].input.value == "test" - assert new_memory.runs[0].output.value == "mock output" + assert len(runs) == 2 + assert metadata == {"foo": "bar"} diff --git a/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py index 707132ef5..dccdc9fd0 100644 --- a/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py @@ -1,10 +1,11 @@ import json +import os import pytest from griptape.artifacts import BaseArtifact from griptape.drivers import GriptapeCloudConversationMemoryDriver -from griptape.memory.structure import BaseConversationMemory, ConversationMemory, Run, SummaryConversationMemory +from griptape.memory.structure import Run TEST_CONVERSATION = '{"type": "SummaryConversationMemory", "runs": [{"type": "Run", "id": "729ca6be5d79433d9762eb06dfd677e2", "input": {"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}, "output": {"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}}], "max_runs": 2}' @@ -23,6 +24,7 @@ def get(*args, **kwargs): "input": '{"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}', "output": '{"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}', "index": 0, + "metadata": {"run_id": "1234"}, } ] }, @@ -32,7 +34,7 @@ def get(*args, **kwargs): return mocker.Mock( raise_for_status=lambda: None, json=lambda: { - "metadata": json.loads(TEST_CONVERSATION), + "metadata": {"foo": "bar"}, "name": "test", "thread_id": "test_metadata", } @@ -44,12 +46,22 @@ def get(*args, **kwargs): "requests.get", side_effect=get, ) + + def post(*args, **kwargs): + if str(args[0]).endswith("/threads"): + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: {"thread_id": "test", "name": "test"}, + ) + else: + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: {"message_id": "test"}, + ) + mocker.patch( "requests.post", - return_value=mocker.Mock( - raise_for_status=lambda: None, - json=lambda: {"thread_id": "test", "name": "test"}, - ), + side_effect=post, ) mocker.patch( "requests.patch", @@ -66,26 +78,28 @@ def test_no_api_key(self): with pytest.raises(ValueError): GriptapeCloudConversationMemoryDriver(api_key=None, thread_id="test") - def test_no_thread_id(self): + def test_thread_id(self): driver = GriptapeCloudConversationMemoryDriver(api_key="test") assert driver.thread_id == "test" + os.environ["GT_CLOUD_THREAD_ID"] = "test_env" + driver = GriptapeCloudConversationMemoryDriver(api_key="test") + assert driver.thread_id == "test_env" + driver = GriptapeCloudConversationMemoryDriver(api_key="test", thread_id="test_init") + assert driver.thread_id == "test_init" - def test_store(self, driver): - memory = ConversationMemory( - runs=[ - Run(input=BaseArtifact.from_dict(run["input"]), output=BaseArtifact.from_dict(run["output"])) - for run in json.loads(TEST_CONVERSATION)["runs"] - ], - ) - assert driver.store(memory) is None + def test_store(self, driver: GriptapeCloudConversationMemoryDriver): + runs = [ + Run(input=BaseArtifact.from_dict(run["input"]), output=BaseArtifact.from_dict(run["output"])) + for run in json.loads(TEST_CONVERSATION)["runs"] + ] + assert driver.store(runs, {}) is None def test_load(self, driver): - memory = driver.load() - assert isinstance(memory, BaseConversationMemory) - assert len(memory.runs) == 1 - - def test_load_metadata(self, driver): + runs, metadata = driver.load() + assert len(runs) == 1 + assert runs[0].id == "1234" + assert metadata == {} driver.thread_id = "test_metadata" - memory = driver.load() - assert isinstance(memory, SummaryConversationMemory) - assert len(memory.runs) == 1 + runs, metadata = driver.load() + assert len(runs) == 1 + assert metadata == {"foo": "bar"} diff --git a/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py index dff66d0fc..52e8d31e2 100644 --- a/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py @@ -1,5 +1,6 @@ import contextlib import os +from pathlib import Path import pytest @@ -21,26 +22,23 @@ def _run_before_and_after_tests(self): self.__delete_file(self.MEMORY_FILE_PATH) def test_store(self): - memory_driver = LocalConversationMemoryDriver(file_path=self.MEMORY_FILE_PATH) - memory = ConversationMemory(driver=memory_driver, autoload=False) + memory_driver = LocalConversationMemoryDriver(persist_file=self.MEMORY_FILE_PATH) + memory = ConversationMemory(conversation_memory_driver=memory_driver, autoload=False) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) - try: - with open(self.MEMORY_FILE_PATH): - raise AssertionError() - except FileNotFoundError: - assert True + assert not os.path.exists(self.MEMORY_FILE_PATH) pipeline.run() - with open(self.MEMORY_FILE_PATH): - assert True + assert os.path.exists(self.MEMORY_FILE_PATH) def test_load(self): - memory_driver = LocalConversationMemoryDriver(file_path=self.MEMORY_FILE_PATH) - memory = ConversationMemory(driver=memory_driver, autoload=False, max_runs=5) + memory_driver = LocalConversationMemoryDriver(persist_file=self.MEMORY_FILE_PATH) + memory = ConversationMemory( + conversation_memory_driver=memory_driver, autoload=False, max_runs=5, meta={"foo": "bar"} + ) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -48,17 +46,25 @@ def test_load(self): pipeline.run() pipeline.run() - new_memory = memory_driver.load() + runs, metadata = memory_driver.load() - assert new_memory.type == "ConversationMemory" - assert len(new_memory.runs) == 2 - assert new_memory.runs[0].input.value == "test" - assert new_memory.runs[0].output.value == "mock output" - assert new_memory.max_runs == 5 + assert len(runs) == 2 + assert runs[0].input.value == "test" + assert runs[0].output.value == "mock output" + assert metadata == {"foo": "bar"} + + runs[0].input.value = "new test" + + def test_load_bad_data(self): + Path(self.MEMORY_FILE_PATH).write_text("bad data") + memory_driver = LocalConversationMemoryDriver(persist_file=self.MEMORY_FILE_PATH) + + with pytest.raises(ValueError, match="Unable to load data from test_memory.json"): + ConversationMemory(conversation_memory_driver=memory_driver) def test_autoload(self): - memory_driver = LocalConversationMemoryDriver(file_path=self.MEMORY_FILE_PATH) - memory = ConversationMemory(driver=memory_driver) + memory_driver = LocalConversationMemoryDriver(persist_file=self.MEMORY_FILE_PATH) + memory = ConversationMemory(conversation_memory_driver=memory_driver, autoload=False) pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -66,13 +72,13 @@ def test_autoload(self): pipeline.run() pipeline.run() - autoloaded_memory = ConversationMemory(driver=memory_driver) + autoloaded_memory = ConversationMemory(conversation_memory_driver=memory_driver) assert autoloaded_memory.type == "ConversationMemory" assert len(autoloaded_memory.runs) == 2 assert autoloaded_memory.runs[0].input.value == "test" assert autoloaded_memory.runs[0].output.value == "mock output" - def __delete_file(self, file_path) -> None: + def __delete_file(self, persist_file) -> None: with contextlib.suppress(FileNotFoundError): - os.remove(file_path) + os.remove(persist_file) diff --git a/tests/unit/drivers/memory/conversation/test_redis_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_redis_conversation_memory_driver.py index 4a92a28a8..e7ef45c42 100644 --- a/tests/unit/drivers/memory/conversation/test_redis_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_redis_conversation_memory_driver.py @@ -4,7 +4,8 @@ from griptape.drivers.memory.conversation.redis_conversation_memory_driver import RedisConversationMemoryDriver from griptape.memory.structure.base_conversation_memory import BaseConversationMemory -TEST_CONVERSATION = '{"type": "ConversationMemory", "runs": [{"type": "Run", "id": "729ca6be5d79433d9762eb06dfd677e2", "input": {"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}, "output": {"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}}], "max_runs": 2}' +TEST_DATA = '{"runs": [{"input": {"type": "TextArtifact", "value": "Hi There, Hello"}, "output": {"type": "TextArtifact", "value": "Hello! How can I assist you today?"}}], "metadata": {"foo": "bar"}}' +TEST_MEMORY = '{"type": "ConversationMemory", "runs": [{"type": "Run", "id": "729ca6be5d79433d9762eb06dfd677e2", "input": {"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}, "output": {"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}}], "max_runs": 2}' CONVERSATION_ID = "117151897f344ff684b553d0655d8f39" INDEX = "griptape_conversation" HOST = "127.0.0.1" @@ -17,7 +18,7 @@ class TestRedisConversationMemoryDriver: def _mock_redis(self, mocker): mocker.patch.object(redis.StrictRedis, "hset", return_value=None) mocker.patch.object(redis.StrictRedis, "keys", return_value=[b"test"]) - mocker.patch.object(redis.StrictRedis, "hget", return_value=TEST_CONVERSATION) + mocker.patch.object(redis.StrictRedis, "hget", return_value=TEST_DATA) fake_redisearch = mocker.MagicMock() fake_redisearch.search = mocker.MagicMock(return_value=mocker.MagicMock(docs=[])) @@ -31,11 +32,16 @@ def driver(self): return RedisConversationMemoryDriver(host=HOST, port=PORT, db=0, index=INDEX, conversation_id=CONVERSATION_ID) def test_store(self, driver): - memory = BaseConversationMemory.from_json(TEST_CONVERSATION) - assert driver.store(memory) is None + memory = BaseConversationMemory.from_json(TEST_MEMORY) + assert driver.store(memory.runs, memory.meta) is None def test_load(self, driver): - memory = driver.load() - assert memory.type == "ConversationMemory" - assert memory.max_runs == 2 - assert memory.runs == BaseConversationMemory.from_json(TEST_CONVERSATION).runs + runs, metadata = driver.load() + assert len(runs) == 1 + assert metadata == {"foo": "bar"} + + def test_load_empty(self, mocker, driver): + mocker.patch.object(redis.StrictRedis, "hget", return_value=None) + runs, metadata = driver.load() + assert len(runs) == 0 + assert metadata == {} diff --git a/tests/unit/memory/structure/test_conversation_memory.py b/tests/unit/memory/structure/test_conversation_memory.py index 3f9ac2344..84c8591d8 100644 --- a/tests/unit/memory/structure/test_conversation_memory.py +++ b/tests/unit/memory/structure/test_conversation_memory.py @@ -90,7 +90,7 @@ def test_add_to_prompt_stack_autopruing_disabled(self): prompt_stack = PromptStack() prompt_stack.add_user_message(TextArtifact("foo")) prompt_stack.add_assistant_message("bar") - memory.add_to_prompt_stack(prompt_stack) + memory.add_to_prompt_stack(agent.prompt_driver, prompt_stack) assert len(prompt_stack.messages) == 12 @@ -116,7 +116,7 @@ def test_add_to_prompt_stack_autopruning_enabled(self, mock_config): prompt_stack.add_system_message("fizz") prompt_stack.add_user_message("foo") prompt_stack.add_assistant_message("bar") - memory.add_to_prompt_stack(prompt_stack) + memory.add_to_prompt_stack(agent.prompt_driver, prompt_stack) assert len(prompt_stack.messages) == 3 @@ -140,7 +140,7 @@ def test_add_to_prompt_stack_autopruning_enabled(self, mock_config): prompt_stack.add_system_message("fizz") prompt_stack.add_user_message("foo") prompt_stack.add_assistant_message("bar") - memory.add_to_prompt_stack(prompt_stack) + memory.add_to_prompt_stack(agent.prompt_driver, prompt_stack) assert len(prompt_stack.messages) == 13 @@ -168,7 +168,7 @@ def test_add_to_prompt_stack_autopruning_enabled(self, mock_config): prompt_stack.add_system_message("fizz") prompt_stack.add_user_message("foo") prompt_stack.add_assistant_message("bar") - memory.add_to_prompt_stack(prompt_stack, 1) + memory.add_to_prompt_stack(agent.prompt_driver, prompt_stack, 1) # We expect one run (2 Prompt Stack inputs) to be pruned. assert len(prompt_stack.messages) == 11 From 4ae9711993a5ac8b9e79885efa57654c664d6478 Mon Sep 17 00:00:00 2001 From: Matt Vallillo Date: Tue, 27 Aug 2024 14:11:04 -0500 Subject: [PATCH 05/39] Fix issue in `PromptSummaryEngine` (#1111) --- CHANGELOG.md | 1 + griptape/engines/summary/prompt_summary_engine.py | 5 +++++ tests/unit/engines/summary/test_prompt_summary_engine.py | 7 +++++++ 3 files changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 555306f90..9d9acb8c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Parsing streaming response with some OpenAi compatible services. +- Issue in `PromptSummaryEngine` if there are no artifacts during recursive summarization. **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. diff --git a/griptape/engines/summary/prompt_summary_engine.py b/griptape/engines/summary/prompt_summary_engine.py index 99e133844..3cc3dd470 100644 --- a/griptape/engines/summary/prompt_summary_engine.py +++ b/griptape/engines/summary/prompt_summary_engine.py @@ -60,6 +60,11 @@ def summarize_artifacts_rec( summary: Optional[str] = None, rulesets: Optional[list[Ruleset]] = None, ) -> TextArtifact: + if not artifacts: + if summary is None: + raise ValueError("No artifacts to summarize") + return TextArtifact(summary) + artifacts_text = self.chunk_joiner.join([a.to_text() for a in artifacts]) system_prompt = self.system_template_generator.render( diff --git a/tests/unit/engines/summary/test_prompt_summary_engine.py b/tests/unit/engines/summary/test_prompt_summary_engine.py index 138444ae3..c750a26ee 100644 --- a/tests/unit/engines/summary/test_prompt_summary_engine.py +++ b/tests/unit/engines/summary/test_prompt_summary_engine.py @@ -42,3 +42,10 @@ def copy_test_resource(resource_path: str): return Path(full_path).read_text() assert engine.summarize_text(copy_test_resource("test.txt") * 50) + + def test_summarize_artifacts_rec_no_artifacts(self, engine): + with pytest.raises(ValueError): + engine.summarize_artifacts_rec([]) + + output = engine.summarize_artifacts_rec([], "summary") + assert output.value == "summary" From c1ee9f63892b4c0b1fd5a3bcf7a1e8c6cdcc40e9 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Tue, 27 Aug 2024 13:10:23 -0700 Subject: [PATCH 06/39] Don't send empty properties (#1112) --- CHANGELOG.md | 1 + griptape/drivers/prompt/google_prompt_driver.py | 16 +++++++++++----- .../drivers/prompt/test_google_prompt_driver.py | 4 ++-- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d9acb8c2..c351588c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Parsing streaming response with some OpenAi compatible services. - Issue in `PromptSummaryEngine` if there are no artifacts during recursive summarization. +- Issue in `GooglePromptDriver` using Tools with no schema. **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. diff --git a/griptape/drivers/prompt/google_prompt_driver.py b/griptape/drivers/prompt/google_prompt_driver.py index bbba4e0f9..6b18f6041 100644 --- a/griptape/drivers/prompt/google_prompt_driver.py +++ b/griptape/drivers/prompt/google_prompt_driver.py @@ -187,11 +187,17 @@ def __to_google_tools(self, tools: list[BaseTool]) -> list[dict]: tool_declaration = types.FunctionDeclaration( name=tool.to_native_tool_name(activity), description=tool.activity_description(activity), - parameters={ - "type": schema["type"], - "properties": schema["properties"], - "required": schema.get("required", []), - }, + **( + { + "parameters": { + "type": schema["type"], + "properties": schema["properties"], + "required": schema.get("required", []), + } + } + if schema.get("properties") + else {} + ), ) tool_declarations.append(tool_declaration) diff --git a/tests/unit/drivers/prompt/test_google_prompt_driver.py b/tests/unit/drivers/prompt/test_google_prompt_driver.py index ce3db921f..5d01217d9 100644 --- a/tests/unit/drivers/prompt/test_google_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_google_prompt_driver.py @@ -29,8 +29,8 @@ class TestGooglePromptDriver: "description": "test description: foo", "parameters": {"type": "OBJECT", "properties": {"test": {"type": "STRING"}}, "required": ["test"]}, }, - {"name": "MockTool_test_list_output", "description": "test description", "parameters": {"type": "OBJECT"}}, - {"name": "MockTool_test_no_schema", "description": "test description", "parameters": {"type": "OBJECT"}}, + {"name": "MockTool_test_list_output", "description": "test description"}, + {"name": "MockTool_test_no_schema", "description": "test description"}, { "name": "MockTool_test_str_output", "description": "test description: foo", From 10c0170af606654ccee116ecd44eb159ca510bdb Mon Sep 17 00:00:00 2001 From: Matt Vallillo Date: Thu, 29 Aug 2024 14:34:14 -0400 Subject: [PATCH 07/39] Update GriptapeCloudEventListenerDriver (#1115) --- .../griptape_cloud_event_listener_driver.py | 24 ++++++++++++------- tests/mocks/mock_event.py | 2 +- ...st_griptape_cloud_event_listener_driver.py | 8 +++---- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py b/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py index 98f52b914..3e06eaa88 100644 --- a/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py +++ b/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py @@ -54,13 +54,21 @@ def publish_event(self, event: BaseEvent | dict, *, flush: bool = False) -> None super().publish_event(event_payload, flush=flush) def try_publish_event_payload(self, event_payload: dict) -> None: - url = urljoin(self.base_url.strip("/"), f"/api/structure-runs/{self.structure_run_id}/events") - - response = requests.post(url=url, json=event_payload, headers=self.headers) - response.raise_for_status() + self._post_event(self._get_event_request(event_payload)) def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> None: - url = urljoin(self.base_url.strip("/"), f"/api/structure-runs/{self.structure_run_id}/events") - - response = requests.post(url=url, json=event_payload_batch, headers=self.headers) - response.raise_for_status() + self._post_event([self._get_event_request(event_payload) for event_payload in event_payload_batch]) + + def _get_event_request(self, event_payload: dict) -> dict: + return { + "payload": event_payload, + "timestamp": event_payload["timestamp"], + "type": event_payload["type"], + } + + def _post_event(self, json: list[dict] | dict) -> None: + requests.post( + url=urljoin(self.base_url.strip("/"), f"/api/structure-runs/{self.structure_run_id}/events"), + json=json, + headers=self.headers, + ).raise_for_status() diff --git a/tests/mocks/mock_event.py b/tests/mocks/mock_event.py index 2b9d9ade3..a8737f47c 100644 --- a/tests/mocks/mock_event.py +++ b/tests/mocks/mock_event.py @@ -3,4 +3,4 @@ class MockEvent(BaseEvent): def to_dict(self) -> dict: - return {"timestamp": self.timestamp, "id": self.id} + return {"timestamp": self.timestamp, "id": self.id, "meta": self.meta, "type": self.__class__.__name__} diff --git a/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py b/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py index 0bf298870..441589774 100644 --- a/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py @@ -49,7 +49,7 @@ def test_publish_event_without_span_id(self, mock_post, driver): mock_post.assert_called_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", - json=[event.to_dict()], + json=[driver._get_event_request(event.to_dict())], headers={"Authorization": "Bearer foo bar"}, ) @@ -63,7 +63,7 @@ def test_publish_event_with_span_id(self, mock_post, driver): mock_post.assert_called_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", - json=[{**event.to_dict(), "span_id": "test"}], + json=[driver._get_event_request({**event.to_dict(), "span_id": "test"})], headers={"Authorization": "Bearer foo bar"}, ) @@ -73,7 +73,7 @@ def test_try_publish_event_payload(self, mock_post, driver): mock_post.assert_called_once_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", - json=event.to_dict(), + json=driver._get_event_request(event.to_dict()), headers={"Authorization": "Bearer foo bar"}, ) @@ -84,6 +84,6 @@ def try_publish_event_payload_batch(self, mock_post, driver): mock_post.assert_called_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", - json=event.to_dict(), + json=driver._get_event_request(event.to_dict()), headers={"Authorization": "Bearer foo bar"}, ) From 49fb10477d46cdd80e9d9be6ff6ac4df6b95ab82 Mon Sep 17 00:00:00 2001 From: CJ Kindel Date: Thu, 29 Aug 2024 14:29:02 -0700 Subject: [PATCH 08/39] Migrate GriptapeCloudStructureRunDriver to use `env_var` over `env` field (#1118) Co-authored-by: Collin Dutter --- .../griptape_cloud_structure_run_driver.py | 4 +++- .../test_griptape_cloud_structure_run_driver.py | 17 ++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py b/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py index a6e2064b6..46b54c528 100644 --- a/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py +++ b/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py @@ -28,9 +28,11 @@ def try_run(self, *args: BaseArtifact) -> BaseArtifact | InfoArtifact: url = urljoin(self.base_url.strip("/"), f"/api/structures/{self.structure_id}/runs") + env_vars = [{"name": key, "value": value, "source": "manual"} for key, value in self.env.items()] + response: Response = post( url, - json={"args": [arg.value for arg in args], "env": self.env}, + json={"args": [arg.value for arg in args], "env_vars": env_vars}, headers=self.headers, ) response.raise_for_status() diff --git a/tests/unit/drivers/structure_run/test_griptape_cloud_structure_run_driver.py b/tests/unit/drivers/structure_run/test_griptape_cloud_structure_run_driver.py index bdd5cd3ed..ccc8ac303 100644 --- a/tests/unit/drivers/structure_run/test_griptape_cloud_structure_run_driver.py +++ b/tests/unit/drivers/structure_run/test_griptape_cloud_structure_run_driver.py @@ -5,12 +5,14 @@ class TestGriptapeCloudStructureRunDriver: @pytest.fixture() - def driver(self, mocker): - from griptape.drivers import GriptapeCloudStructureRunDriver - + def mock_requests_post(self, mocker): mock_response = mocker.Mock() mock_response.json.return_value = {"structure_run_id": 1} - mocker.patch("requests.post", return_value=mock_response) + return mocker.patch("requests.post", return_value=mock_response) + + @pytest.fixture() + def driver(self, mocker, mock_requests_post): + from griptape.drivers import GriptapeCloudStructureRunDriver mock_response = mocker.Mock() mock_response.json.return_value = { @@ -24,10 +26,15 @@ def driver(self, mocker): base_url="https://cloud-foo.griptape.ai", api_key="foo bar", structure_id="1", env={"key": "value"} ) - def test_run(self, driver): + def test_run(self, driver, mock_requests_post): result = driver.run(TextArtifact("foo bar")) assert isinstance(result, TextArtifact) assert result.value == "foo bar" + mock_requests_post.assert_called_once_with( + "https://cloud-foo.griptape.ai/api/structures/1/runs", + json={"args": ["foo bar"], "env_vars": [{"name": "key", "value": "value", "source": "manual"}]}, + headers={"Authorization": "Bearer foo bar"}, + ) def test_async_run(self, driver): driver.async_run = True From ba47112550d08704b6f26f2abb58d23f091971d7 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Thu, 29 Aug 2024 15:52:14 -0700 Subject: [PATCH 09/39] Fix tts model (#1122) --- CHANGELOG.md | 1 + griptape/configs/drivers/openai_drivers_config.py | 2 +- tests/unit/configs/drivers/test_openai_driver_config.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c351588c4..13141045d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Parsing streaming response with some OpenAi compatible services. - Issue in `PromptSummaryEngine` if there are no artifacts during recursive summarization. - Issue in `GooglePromptDriver` using Tools with no schema. +- Incorrect model in `OpenAiDriverConfig`'s `text_to_speech_driver`. **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. diff --git a/griptape/configs/drivers/openai_drivers_config.py b/griptape/configs/drivers/openai_drivers_config.py index 205cfb0e1..ec1a4dc79 100644 --- a/griptape/configs/drivers/openai_drivers_config.py +++ b/griptape/configs/drivers/openai_drivers_config.py @@ -37,7 +37,7 @@ def vector_store_driver(self) -> LocalVectorStoreDriver: @lazy_property() def text_to_speech_driver(self) -> OpenAiTextToSpeechDriver: - return OpenAiTextToSpeechDriver(model="tts") + return OpenAiTextToSpeechDriver(model="tts-1") @lazy_property() def audio_transcription_driver(self) -> OpenAiAudioTranscriptionDriver: diff --git a/tests/unit/configs/drivers/test_openai_driver_config.py b/tests/unit/configs/drivers/test_openai_driver_config.py index 40a755e50..016383c32 100644 --- a/tests/unit/configs/drivers/test_openai_driver_config.py +++ b/tests/unit/configs/drivers/test_openai_driver_config.py @@ -72,7 +72,7 @@ def test_to_dict(self, config): "api_version": None, "base_url": None, "format": "mp3", - "model": "tts", + "model": "tts-1", "organization": None, "voice": "alloy", }, From ab257354c8dd6f813c1fe97345b0062abc1624da Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Thu, 29 Aug 2024 16:24:45 -0700 Subject: [PATCH 10/39] Fix missing maxTokens in AmazonBedrockPromptDriver (#1123) --- CHANGELOG.md | 1 + griptape/drivers/prompt/amazon_bedrock_prompt_driver.py | 2 +- .../unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 13141045d..0543df42d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Parsing streaming response with some OpenAi compatible services. - Issue in `PromptSummaryEngine` if there are no artifacts during recursive summarization. - Issue in `GooglePromptDriver` using Tools with no schema. +- Missing `maxTokens` inference parameter in `AmazonBedrockPromptDriver`. - Incorrect model in `OpenAiDriverConfig`'s `text_to_speech_driver`. **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. diff --git a/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py b/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py index b663d06fd..bc339f618 100644 --- a/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py +++ b/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py @@ -98,7 +98,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: "modelId": self.model, "messages": messages, "system": system_messages, - "inferenceConfig": {"temperature": self.temperature}, + "inferenceConfig": {"temperature": self.temperature, "maxTokens": self.max_tokens}, "additionalModelRequestFields": self.additional_model_request_fields, **( {"toolConfig": {"tools": self.__to_bedrock_tools(prompt_stack.tools), "toolChoice": self.tool_choice}} diff --git a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py index ebe25bb28..c36c46074 100644 --- a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py @@ -344,7 +344,7 @@ def test_try_run(self, mock_converse, prompt_stack, messages, use_native_tools): mock_converse.assert_called_once_with( modelId=driver.model, messages=messages, - inferenceConfig={"temperature": driver.temperature}, + inferenceConfig={"temperature": driver.temperature, "maxTokens": driver.max_tokens}, additionalModelRequestFields={}, **({"system": [{"text": "system-input"}]} if prompt_stack.system_messages else {"system": []}), **( @@ -376,7 +376,7 @@ def test_try_stream_run(self, mock_converse_stream, prompt_stack, messages, use_ mock_converse_stream.assert_called_once_with( modelId=driver.model, messages=messages, - inferenceConfig={"temperature": driver.temperature}, + inferenceConfig={"temperature": driver.temperature, "maxTokens": driver.max_tokens}, additionalModelRequestFields={}, **({"system": [{"text": "system-input"}]} if prompt_stack.system_messages else {"system": []}), **( From a6a294f6bc09378dc1d1b550238737b0408f0415 Mon Sep 17 00:00:00 2001 From: Emily Danielson <2302515+emjay07@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:56:22 -0700 Subject: [PATCH 11/39] initial cloud docs for data sources, knowledge bases, and structures (#1110) Co-authored-by: Collin Dutter --- .../data-sources/create-data-source.md | 27 +++++++++++++++ .../data-sources/refresh-data.md | 17 ++++++++++ docs/griptape-cloud/index.md | 11 +++++-- .../knowledge-bases/accessing-data.md | 33 +++++++++++++++++++ .../knowledge-bases/create-knowledge-base.md | 7 ++++ .../structures/create-structure.md | 17 ++++++++++ .../structures/run-structure.md | 32 ++++++++++++++++++ docs/index.md | 2 +- mkdocs.yml | 12 +++++-- 9 files changed, 153 insertions(+), 5 deletions(-) create mode 100644 docs/griptape-cloud/data-sources/create-data-source.md create mode 100644 docs/griptape-cloud/data-sources/refresh-data.md create mode 100644 docs/griptape-cloud/knowledge-bases/accessing-data.md create mode 100644 docs/griptape-cloud/knowledge-bases/create-knowledge-base.md create mode 100644 docs/griptape-cloud/structures/create-structure.md create mode 100644 docs/griptape-cloud/structures/run-structure.md diff --git a/docs/griptape-cloud/data-sources/create-data-source.md b/docs/griptape-cloud/data-sources/create-data-source.md new file mode 100644 index 000000000..aede0e9ee --- /dev/null +++ b/docs/griptape-cloud/data-sources/create-data-source.md @@ -0,0 +1,27 @@ +# Data Sources + +Data Sources are the first step to Griptape's RAG pipeline. They allow you to bring your own data to ingest and transform. You can then make one or more Data Source available to your AI applications via [Knowledge Bases](../knowledge-bases/create-knowledge-base.md) + +## Create a Data Source + +You can [create a Data Source in the Griptape Cloud console](https://cloud.griptape.ai/data-sources/create) by specifying the required configuration for your chosen Data Source in the cloud console. + +### Web Page + +You can scrape and ingest a single, public web page by providing a URL. If you wish to scrape multiple pages, you must create multiple Data Sources. However, you can then add all of the pages to the same Knowledge Base if you wish to access all the pages together. + +### Google Drive + +You can ingest documents and spreadsheets stored in a Google Drive account. We support all standard file formats such as text, markdown, spreadsheets, and presentations. + +### Confluence + +You can connect to your personal or company Confluence by providing a URL, [Atlassian API Token](https://id.atlassian.com/manage-profile/security/api-tokens), and the email address for the token holder's account. Each Confluence Data Source can be limited to a single Space in Confluence by specifying the [specific URL for that Space](https://support.atlassian.com/confluence-cloud/docs/use-spaces-to-organize-your-work/). + +### Structure (Experimental) + +You can specify a [Structure](../structures/create-structure.md) to run as a Data Source as long as your Structure returns a [`TextArtifact` or `ListArtifact` from the Griptape Framework](../../griptape-framework/data/artifacts.md). You can use this as a way to build custom Data Sources. + +## Other Data Source Types + +If you do not see a Data Source configuration you'd wish to use, you can submit a request via [Discord](https://discord.gg/gnWRz88eym) or `hello@griptape.ai`. diff --git a/docs/griptape-cloud/data-sources/refresh-data.md b/docs/griptape-cloud/data-sources/refresh-data.md new file mode 100644 index 000000000..548745218 --- /dev/null +++ b/docs/griptape-cloud/data-sources/refresh-data.md @@ -0,0 +1,17 @@ +# Refresh a Data Source + +## Scheduled Refresh + +By default your Data Source will not refresh automatically. When creating a Data Source, you can enable scheduled refresh and specify a [CRON expression](https://crontab.guru/). For example, if you wish your Data Source to refresh every day at midnight PDT you can use the following expression: `0 7 * * *`. + +## Manual Refresh + +If you wish to manually refresh a Data Source you can do so either via the `Refresh` button in the cloud console or by API using the `Data Source ID` on the `Config` tab and a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys). + +The following shell commands will create a new data refresh job. You will need to specify your API key and data source id. + +```shell +export GT_CLOUD_API_KEY= +export DATA_SOURCE_ID= +curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" --json '{}' https://cloud.griptape.ai/api/data-connectors/${DATA_SOURCE_ID}/data-jobs +``` diff --git a/docs/griptape-cloud/index.md b/docs/griptape-cloud/index.md index 6c1c89b8b..74a78eaf1 100644 --- a/docs/griptape-cloud/index.md +++ b/docs/griptape-cloud/index.md @@ -1,5 +1,12 @@ # Griptape Cloud -Griptape Cloud provides managed services for your AI app stack. Deploy and scale end-to-end solutions, from LLM-powered data prep and retrieval to AI agents, pipelines and workflows. +[Griptape Cloud](https://cloud.griptape.ai/) provides managed services for your AI app stack. Deploy and scale end-to-end solutions, from LLM-powered data prep and retrieval to AI Agents, Pipelines, and Workflows. -To get started with AI Structures in the Cloud, check out the [managed-structure-template](https://github.com/griptape-ai/managed-structure-template) or deploy one of the [griptape-sample-structures](https://github.com/griptape-ai/griptape-sample-structures/tree/main). \ No newline at end of file +## Build Your Own RAG Pipeline +Connect to your data with our [Data Sources](data-sources/create-data-source.md) and prepare them for retrieval with [Knowledge Bases](knowledge-bases/create-knowledge-base.md). + +## Host and Run Your Code +Have Griptape code? Have existing code with another LLM framework? You can host your Python code using [Structures](structures/create-structure.md) whether it uses the Griptape Framework or not. + +## APIs +All of our features can be called via API with a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys). See the [API Reference](api/api-reference.md) for detailed information. diff --git a/docs/griptape-cloud/knowledge-bases/accessing-data.md b/docs/griptape-cloud/knowledge-bases/accessing-data.md new file mode 100644 index 000000000..8cb2f7e7b --- /dev/null +++ b/docs/griptape-cloud/knowledge-bases/accessing-data.md @@ -0,0 +1,33 @@ +# Accessing Data in a Knowledge Base + +You can `Search` or `Query` the Knowledge Base for information contained in your Data Sources. `Search` will return a natural language response while `Query` will return the individual entries. Use whichever one best fits your use case. + +## From the Cloud Console + +You can explore your data with a natural language question on the `Test` tab of your Knowledge Base. Compare and contrast the results of `Search` vs. `Query` to understand which is correct for your application. + +## From the API + +You can enact both `Search` and `Query` via the API by hitting their respective endpoints using a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys) and the Knowledge Base ID found on the `Config` tab of your Knowledge Base. + +The following example commands will send the string `"test question"` and return the results from the Knowledge Base. + +### Search + +```shell +export GT_CLOUD_API_KEY= +export KNOWLEDGE_BASE_ID= +curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" --json '{"query": "test question"}' https://cloud.griptape.ai/api/knowledge-bases/${KNOWLEDGE_BASE_ID}/search +``` + +### Query + +```shell +export GT_CLOUD_API_KEY= +export KNOWLEDGE_BASE_ID= +curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" --json '{"query": "test question"}' https://cloud.griptape.ai/api/knowledge-bases/${KNOWLEDGE_BASE_ID}/query +``` + +## Using the Griptape Framework + +You can use the [GriptapeCloudKnowledgeBaseVectorStoreDriver](../../griptape-framework/drivers/vector-store-drivers.md/#griptape-cloud-knowledge-base) to query your Knowledge Base with Griptape and the [GriptapeCloudKnowledgeBaseTool](../../griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md) to search. diff --git a/docs/griptape-cloud/knowledge-bases/create-knowledge-base.md b/docs/griptape-cloud/knowledge-bases/create-knowledge-base.md new file mode 100644 index 000000000..254c889d3 --- /dev/null +++ b/docs/griptape-cloud/knowledge-bases/create-knowledge-base.md @@ -0,0 +1,7 @@ +# Knowledge Bases + +Knowledge Bases are the way to organize and access your data ingested from [Data Sources](../data-sources/create-data-source.md). You can specify multiple Data Sources per Knowledge Base in order to access data ingested from different sources all in one place. + +## Create a Knowledge Base + +You can [create a Knowledge Base in the Griptape Cloud console](https://cloud.griptape.ai/knowledge-bases/create) by specifying which Data Sources you wish to include. Once created, you can [access your data](accessing-data.md). diff --git a/docs/griptape-cloud/structures/create-structure.md b/docs/griptape-cloud/structures/create-structure.md new file mode 100644 index 000000000..df0449891 --- /dev/null +++ b/docs/griptape-cloud/structures/create-structure.md @@ -0,0 +1,17 @@ +# Structures + +Structures are a primary component in Griptape for organizing and executing Tasks against a LLM. + +## Create a Structure + +1. [Connect Your GitHub Account in your Griptape Cloud account](https://cloud.griptape.ai/account) +1. Install the [Griptape Cloud GitHub app to your GitHub account or organization](https://github.com/apps/griptape-cloud/installations/new/) + - Be sure to allow the app access to `All Repositories` or select the specific repositories you need +1. Ensure your repository has a Structure Config YAML file + - To learn more see [Structure Config YAML](structure-config.md) + +You can now [create a Structure in the Griptape Cloud console](https://cloud.griptape.ai/structures/create) by providing your GitHub repository information. + +### Quickstart With Samples and Templates + +To get started with Structures in the Cloud, check out the [managed-structure-template on GitHub](https://github.com/griptape-ai/managed-structure-template) or deploy one of the [griptape-sample-structures from GitHub](https://github.com/griptape-ai/griptape-sample-structures/tree/main). diff --git a/docs/griptape-cloud/structures/run-structure.md b/docs/griptape-cloud/structures/run-structure.md new file mode 100644 index 000000000..995fcff01 --- /dev/null +++ b/docs/griptape-cloud/structures/run-structure.md @@ -0,0 +1,32 @@ +# Running a Structure + +Once your Structure is created and deployed, you can run your Structure one of three ways outlined below. You view the output of any of your runs, no matter how you created them, in the `Runs` tab of your Structure. + +## From the Cloud Console + +In the cloud console, click on the name of the Structure you wish to run and then go to the `Test` tab. Here you can specify arguments to pass to your Structure run and any run-specific environment variables you need. + +When passing arguments through the cloud console, pass each new argument on a new line. For example if your local code is ran with the inputs `-i input_file.txt` then the arguments you would pass in the cloud would be: + +``` +-i +input_file.txt +``` + +## From the API + +You can run your Structure via the API using CURL or any other code that can make HTTP requests. You will need a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys) and the `Structure Invocation URL` which is located on the `Config` tab of your Structure. + +The example below will kick off a run with the args you pass as a json object. + +```shell +export GT_CLOUD_API_KEY= +export INVOCATION_URL= +curl -H "Authorization: Bearer ${GT_CLOUD_API_KEY}" --json '{"args": ["arg1"], ""env_vars"": [{"name":"var1", "value": "value"}]}' ${INVOCATION_URL} +``` + +For more information on other Structure run APIs, check out the [StructureRuns API docs](../api/api-reference.md/#/StructureRuns). + +## Using the Griptape Framework + +You can use [StructureRunDrivers](../../griptape-framework/drivers/structure-run-drivers.md/#griptape-cloud) to run your Structure with Griptape. diff --git a/docs/index.md b/docs/index.md index 5d22224e7..2c6ee5d50 100644 --- a/docs/index.md +++ b/docs/index.md @@ -8,7 +8,7 @@ Griptape Topic Guides discuss key topics at a high level and provide useful back ### Griptape Cloud -[Griptape Cloud](griptape-cloud/api/api-reference.md) provides an overview of the APIs available in the managed cloud service. +[Griptape Cloud](griptape-cloud/index.md) provides an overview of the features in Griptape's cloud offering. ### Griptape Framework diff --git a/mkdocs.yml b/mkdocs.yml index 4207d2171..35f1a74cd 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -77,10 +77,18 @@ nav: - Contributing: "contributing.md" - Cloud: - Overview: "griptape-cloud/index.md" + - Data Sources: + - Create a Data Source: "griptape-cloud/data-sources/create-data-source.md" + - Refreshing Your Data: "griptape-cloud/data-sources/refresh-data.md" + - Knowledge Bases: + - Create a Knowledge Base: "griptape-cloud/knowledge-bases/create-knowledge-base.md" + - Accessing Your Data: "griptape-cloud/knowledge-bases/accessing-data.md" + - Structures: + - Create a Structure: "griptape-cloud/structures/create-structure.md" + - Structure Config YAML: "griptape-cloud/structures/structure-config.md" + - Running Your Structure: "griptape-cloud/structures/run-structure.md" - Cloud API: - API Reference: "griptape-cloud/api/api-reference.md" - - Structures: - - Structure Config: "griptape-cloud/structures/structure-config.md" - Framework: - Overview: "griptape-framework/index.md" - Structures: From 6b0bfa2d443c6aef562b86fc3b761c39ca7d312d Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Fri, 30 Aug 2024 09:20:38 -0700 Subject: [PATCH 12/39] Add dependabot auto updates (#1124) --- .dependabot.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .dependabot.yml diff --git a/.dependabot.yml b/.dependabot.yml new file mode 100644 index 000000000..645c171aa --- /dev/null +++ b/.dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" From 2b1566a0d0da27cfd6e5b6263025e5fa839d102d Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Tue, 3 Sep 2024 09:33:43 -0700 Subject: [PATCH 13/39] Use textual value for reranking (#1121) --- CHANGELOG.md | 2 ++ griptape/artifacts/csv_row_artifact.py | 1 + griptape/drivers/rerank/cohere_rerank_driver.py | 4 ++-- tests/unit/artifacts/test_csv_row_artifact.py | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0543df42d..17fe7f136 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING**: `BaseConversationMemoryDriver.store` now takes `runs: list[Run]` and `metadata: Optional[dict]` as input. - **BREAKING**: Parameter `file_path` on `LocalConversationMemoryDriver` renamed to `persist_file` and is now type `Optional[str]`. - `Defaults.drivers_config.conversation_memory_driver` now defaults to `LocalConversationMemoryDriver` instead of `None`. +- `CsvRowArtifact.to_text()` now includes the header. ### Fixed - Parsing streaming response with some OpenAi compatible services. @@ -23,6 +24,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Issue in `GooglePromptDriver` using Tools with no schema. - Missing `maxTokens` inference parameter in `AmazonBedrockPromptDriver`. - Incorrect model in `OpenAiDriverConfig`'s `text_to_speech_driver`. +- Crash when using `CohereRerankDriver` with `CsvRowArtifact`s. **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. diff --git a/griptape/artifacts/csv_row_artifact.py b/griptape/artifacts/csv_row_artifact.py index c4347099e..00f1047fc 100644 --- a/griptape/artifacts/csv_row_artifact.py +++ b/griptape/artifacts/csv_row_artifact.py @@ -28,6 +28,7 @@ def to_text(self) -> str: delimiter=self.delimiter, ) + writer.writeheader() writer.writerow(self.value) return csvfile.getvalue().strip() diff --git a/griptape/drivers/rerank/cohere_rerank_driver.py b/griptape/drivers/rerank/cohere_rerank_driver.py index 12793846b..5ca03cf63 100644 --- a/griptape/drivers/rerank/cohere_rerank_driver.py +++ b/griptape/drivers/rerank/cohere_rerank_driver.py @@ -24,11 +24,11 @@ class CohereRerankDriver(BaseRerankDriver): ) def run(self, query: str, artifacts: list[TextArtifact]) -> list[TextArtifact]: - artifacts_dict = {str(hash(a.value)): a for a in artifacts} + artifacts_dict = {str(hash(a.to_text())): a for a in artifacts} response = self.client.rerank( model=self.model, query=query, - documents=[a.value for a in artifacts_dict.values()], + documents=[a.to_text() for a in artifacts_dict.values()], return_documents=True, top_n=self.top_n, ) diff --git a/tests/unit/artifacts/test_csv_row_artifact.py b/tests/unit/artifacts/test_csv_row_artifact.py index 986ece409..fe0b8cd64 100644 --- a/tests/unit/artifacts/test_csv_row_artifact.py +++ b/tests/unit/artifacts/test_csv_row_artifact.py @@ -14,7 +14,7 @@ def test___add__(self): } def test_to_text(self): - assert CsvRowArtifact({"test1": "foo|bar", "test2": 1}, delimiter="|").to_text() == '"foo|bar"|1' + assert CsvRowArtifact({"test1": "foo|bar", "test2": 1}, delimiter="|").to_text() == 'test1|test2\r\n"foo|bar"|1' def test_to_dict(self): assert CsvRowArtifact({"test1": "foo"}).to_dict()["value"] == {"test1": "foo"} From 39da2bbe13a3651842da9ec548a8df70513d6018 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Tue, 3 Sep 2024 11:09:36 -0700 Subject: [PATCH 14/39] Merge main into dev (#1126) --- CHANGELOG.md | 6 +++++ .../base_event_listener_driver.py | 11 +++++--- griptape/tools/file_manager/tool.py | 3 ++- pyproject.toml | 2 +- tests/unit/tools/test_file_manager.py | 25 ++++++++++++++++++- 5 files changed, 40 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17fe7f136..8542bfbb5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. +## [0.30.2] - 2024-08-26 + +### Fixed +- Ensure thread safety when publishing events by adding a thread lock to batch operations in `BaseEventListenerDriver`. +- `FileManagerTool` failing to save Artifacts created by `ExtractionTool` with a `CsvExtractionEngine`. + ## [0.30.1] - 2024-08-21 ### Fixed diff --git a/griptape/drivers/event_listener/base_event_listener_driver.py b/griptape/drivers/event_listener/base_event_listener_driver.py index 0af57f0f3..75bdc9f75 100644 --- a/griptape/drivers/event_listener/base_event_listener_driver.py +++ b/griptape/drivers/event_listener/base_event_listener_driver.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +import threading from abc import ABC, abstractmethod from typing import TYPE_CHECKING @@ -18,6 +19,7 @@ class BaseEventListenerDriver(FuturesExecutorMixin, ABC): batched: bool = field(default=True, kw_only=True) batch_size: int = field(default=10, kw_only=True) + thread_lock: threading.Lock = field(default=Factory(lambda: threading.Lock())) _batch: list[dict] = field(default=Factory(list), kw_only=True) @@ -39,10 +41,11 @@ def _safe_try_publish_event(self, event: BaseEvent | dict, *, flush: bool) -> No event_payload = event if isinstance(event, dict) else event.to_dict() if self.batched: - self._batch.append(event_payload) - if len(self.batch) >= self.batch_size or flush: - self.try_publish_event_payload_batch(self.batch) - self._batch = [] + with self.thread_lock: + self._batch.append(event_payload) + if len(self.batch) >= self.batch_size or flush: + self.try_publish_event_payload_batch(self.batch) + self._batch = [] return else: self.try_publish_event_payload(event_payload) diff --git a/griptape/tools/file_manager/tool.py b/griptape/tools/file_manager/tool.py index 2ca14d565..b72f82329 100644 --- a/griptape/tools/file_manager/tool.py +++ b/griptape/tools/file_manager/tool.py @@ -94,7 +94,8 @@ def save_memory_artifacts_to_disk(self, params: dict) -> ErrorArtifact | InfoArt for artifact in list_artifact.value: formatted_file_name = f"{artifact.name}-{file_name}" if len(list_artifact) > 1 else file_name try: - self.file_manager_driver.save_file(os.path.join(dir_name, formatted_file_name), artifact.value) + value = artifact.value if isinstance(artifact.value, (str, bytes)) else artifact.to_text() + self.file_manager_driver.save_file(os.path.join(dir_name, formatted_file_name), value) except FileNotFoundError: return ErrorArtifact("Path not found") except IsADirectoryError: diff --git a/pyproject.toml b/pyproject.toml index 2afdc5910..7bff51e16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.30.1" +version = "0.30.2" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0" diff --git a/tests/unit/tools/test_file_manager.py b/tests/unit/tools/test_file_manager.py index 469918a02..569c0a280 100644 --- a/tests/unit/tools/test_file_manager.py +++ b/tests/unit/tools/test_file_manager.py @@ -5,7 +5,7 @@ import pytest -from griptape.artifacts import ListArtifact, TextArtifact +from griptape.artifacts import CsvRowArtifact, ListArtifact, TextArtifact from griptape.drivers.file_manager.local_file_manager_driver import LocalFileManagerDriver from griptape.loaders.text_loader import TextLoader from griptape.tools import FileManagerTool @@ -106,6 +106,29 @@ def test_save_memory_artifacts_to_disk_for_multiple_artifacts(self, temp_dir): assert Path(os.path.join(temp_dir, "test", f"{artifacts[1].name}-{file_name}")).read_text() == "baz" assert result.value == "Successfully saved memory artifacts to disk" + def test_save_memory_artifacts_to_disk_for_non_string_artifact(self, temp_dir): + memory = defaults.text_task_memory("Memory1") + artifact = CsvRowArtifact({"foo": "bar"}) + + memory.store_artifact("foobar", artifact) + + file_manager = FileManagerTool( + input_memory=[memory], file_manager_driver=LocalFileManagerDriver(workdir=temp_dir) + ) + result = file_manager.save_memory_artifacts_to_disk( + { + "values": { + "dir_name": "test", + "file_name": "foobar.txt", + "memory_name": memory.name, + "artifact_namespace": "foobar", + } + } + ) + + assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foo\nbar" + assert result.value == "Successfully saved memory artifacts to disk" + def test_save_content_to_file(self, temp_dir): file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(workdir=temp_dir)) result = file_manager.save_content_to_file( From 4087de917b948c3640c71ad67a45f9c3e2ce2e2c Mon Sep 17 00:00:00 2001 From: William Price <82848178+william-price01@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:28:11 -0700 Subject: [PATCH 15/39] Update griptape-cloud-knowledge-base-tool.md with new api-keys link (#1127) Co-authored-by: Collin Dutter --- .../official-tools/griptape-cloud-knowledge-base-tool.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md b/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md index 96af51782..c0ce7c1d1 100644 --- a/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md +++ b/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md @@ -2,7 +2,7 @@ The [GriptapeCloudKnowledgeBaseTool](../../reference/griptape/tools/griptape_cloud_knowledge_base/tool.md) is a lightweight Tool to retrieve data from a RAG pipeline and vector store hosted in [Griptape Cloud](https://cloud.griptape.ai). It enables searching across a centralized [Knowledge Base](https://cloud.griptape.ai/knowledge-bases) that can consist of various data sources such as Confluence, Google Docs, and web pages. -**Note:** This tool requires a [Knowledge Base](https://cloud.griptape.ai/knowledge-bases) hosted in Griptape Cloud and an [API Key](https://cloud.griptape.ai/account/api-keys) for access. +**Note:** This tool requires a [Knowledge Base](https://cloud.griptape.ai/knowledge-bases) hosted in Griptape Cloud and an [API Key](https://cloud.griptape.ai/configuration/api-keys) for access. ```python --8<-- "docs/griptape-tools/official-tools/src/griptape_cloud_knowledge_base_tool_1.py" From a7bfc1441a783310b91dc4b9cf14ddab4d32833c Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Tue, 3 Sep 2024 12:03:20 -0700 Subject: [PATCH 16/39] Chore/main (#1129) Co-authored-by: Andrew French Co-authored-by: Vasily Vasinov Co-authored-by: Matt Vallillo Co-authored-by: dylanholmes <4370153+dylanholmes@users.noreply.github.com> Co-authored-by: Michal Co-authored-by: Zach Giordano <32624672+zachgiordano@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: torabshaikh Co-authored-by: Aodhan Roche Co-authored-by: Kyle Roche Co-authored-by: Emily Danielson <2302515+emjay07@users.noreply.github.com> Co-authored-by: CJ Kindel Co-authored-by: hkhajgiwale Co-authored-by: Harsh Khajgiwale <13365920+hkhajgiwale@users.noreply.github.com> Co-authored-by: Anush Co-authored-by: datashaman Co-authored-by: Stefano Lottini Co-authored-by: James Clarendon From 6935587d862fb7752015b4ae874d2c973fbd2bad Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Tue, 3 Sep 2024 16:23:04 -0700 Subject: [PATCH 17/39] Chore/main (#1131) Co-authored-by: Andrew French Co-authored-by: Vasily Vasinov Co-authored-by: Matt Vallillo Co-authored-by: dylanholmes <4370153+dylanholmes@users.noreply.github.com> Co-authored-by: Michal Co-authored-by: Zach Giordano <32624672+zachgiordano@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: torabshaikh Co-authored-by: Aodhan Roche Co-authored-by: Kyle Roche Co-authored-by: Emily Danielson <2302515+emjay07@users.noreply.github.com> Co-authored-by: CJ Kindel Co-authored-by: hkhajgiwale Co-authored-by: Harsh Khajgiwale <13365920+hkhajgiwale@users.noreply.github.com> Co-authored-by: Anush Co-authored-by: datashaman Co-authored-by: Stefano Lottini Co-authored-by: James Clarendon Co-authored-by: William Price <82848178+william-price01@users.noreply.github.com> --- CHANGELOG.md | 12 ++++++++---- MIGRATION.md | 2 +- README.md | 3 ++- pyproject.toml | 2 +- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8542bfbb5..e7d833612 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,27 +6,31 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## [0.31.0] - 2024-09-03 + +**Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. + ### Added - Parameter `meta: dict` on `BaseEvent`. ### Changed +- **BREAKING**: Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifacts`. - **BREAKING**: Parameter `driver` on `BaseConversationMemory` renamed to `conversation_memory_driver`. - **BREAKING**: `BaseConversationMemory.add_to_prompt_stack` now takes a `prompt_driver` parameter. -- **BREAKING**: `BaseConversationMemoryDriver.load` now returns `tuple[list[Run], Optional[dict]]`. -- **BREAKING**: `BaseConversationMemoryDriver.store` now takes `runs: list[Run]` and `metadata: Optional[dict]` as input. +- **BREAKING**: `BaseConversationMemoryDriver.load` now returns `tuple[list[Run], dict]`. This represents the runs and metadata. +- **BREAKING**: `BaseConversationMemoryDriver.store` now takes `runs: list[Run]` and `metadata: dict` as input. - **BREAKING**: Parameter `file_path` on `LocalConversationMemoryDriver` renamed to `persist_file` and is now type `Optional[str]`. - `Defaults.drivers_config.conversation_memory_driver` now defaults to `LocalConversationMemoryDriver` instead of `None`. - `CsvRowArtifact.to_text()` now includes the header. ### Fixed -- Parsing streaming response with some OpenAi compatible services. +- Parsing streaming response with some OpenAI compatible services. - Issue in `PromptSummaryEngine` if there are no artifacts during recursive summarization. - Issue in `GooglePromptDriver` using Tools with no schema. - Missing `maxTokens` inference parameter in `AmazonBedrockPromptDriver`. - Incorrect model in `OpenAiDriverConfig`'s `text_to_speech_driver`. - Crash when using `CohereRerankDriver` with `CsvRowArtifact`s. -**Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. ## [0.30.2] - 2024-08-26 diff --git a/MIGRATION.md b/MIGRATION.md index 89ba95494..af8835e5b 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -6,7 +6,7 @@ This document provides instructions for migrating your codebase to accommodate b ### Exceptions Over `ErrorArtifact`s -Drivers, Loaders, and Engines will now raises exceptions rather than returning `ErrorArtifact`s. +Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifact`s. Update any logic that expects `ErrorArtifact` to handle exceptions instead. #### Before diff --git a/README.md b/README.md index c127a6077..95f6326dd 100644 --- a/README.md +++ b/README.md @@ -170,7 +170,8 @@ The important thing to note here is that no matter how big the webpage is it can In the above example, we set [off_prompt](https://docs.griptape.ai/stable/griptape-framework/structures/task-memory.md#off-prompt) to `True`, which means that the LLM can never see the data it manipulates, but can send it to other Tools. > [!IMPORTANT] -> This example uses Griptape's [ToolkitTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#toolkit-task), which requires a highly capable LLM to function correctly. If you're using a less powerful LLM, consider using the [ToolTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#tool-task) instead, as the `ToolkitTask` might not work properly or at all. +> This example uses Griptape's [ToolkitTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#toolkit-task), which requires a highly capable LLM to function correctly. By default, Griptape uses the [OpenAiChatPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#openai-chat); for another powerful LLM try swapping to the [AnthropicPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#anthropic)! +If you're using a less powerful LLM, consider using the [ToolTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#tool-task) instead, as the `ToolkitTask` might not work properly or at all. [Check out our docs](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/) to learn more about how to use Griptape with other LLM providers like Anthropic, Claude, Hugging Face, and Azure. diff --git a/pyproject.toml b/pyproject.toml index 7bff51e16..13c7d5219 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.30.2" +version = "0.31.0" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0" From ec8ba24ec0871a59937d7c03d418d958e49cd842 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 4 Sep 2024 11:05:35 -0700 Subject: [PATCH 18/39] Fix location of dependabot configuration (#1132) --- .dependabot.yml => .github/dependabot.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .dependabot.yml => .github/dependabot.yml (100%) diff --git a/.dependabot.yml b/.github/dependabot.yml similarity index 100% rename from .dependabot.yml rename to .github/dependabot.yml From f21e493450bd70406b9b01872c1b1a1b2cd1323c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 11:22:34 -0700 Subject: [PATCH 19/39] Bump actions/checkout from 3 to 4 (#1133) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/code-checks.yml | 10 +++++----- .github/workflows/docs-integration-tests.yml | 2 +- .github/workflows/unit-tests.yml | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index 51ffc543f..902dddcae 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -19,7 +19,7 @@ jobs: python-version: ["3.12"] steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run formatter @@ -32,7 +32,7 @@ jobs: python-version: [ "3.12" ] steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run type checker @@ -45,7 +45,7 @@ jobs: python-version: [ "3.12" ] steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run linter @@ -58,7 +58,7 @@ jobs: python-version: ["3.12"] steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run unit tests @@ -78,7 +78,7 @@ jobs: python-version: [ "3.12" ] steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run linter diff --git a/.github/workflows/docs-integration-tests.yml b/.github/workflows/docs-integration-tests.yml index 33ebdd562..61111b20b 100644 --- a/.github/workflows/docs-integration-tests.yml +++ b/.github/workflows/docs-integration-tests.yml @@ -137,7 +137,7 @@ jobs: --health-retries 5 steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run integration tests diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index dd819fbb3..4700e2124 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -19,7 +19,7 @@ jobs: python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run unit tests @@ -32,7 +32,7 @@ jobs: python-version: ["3.9"] steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-bare-environment - name: Run unit tests @@ -48,7 +48,7 @@ jobs: shell: bash steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Init environment uses: ./.github/actions/init-environment - name: Run unit tests From be5850a26aedf4a155b922f8998c665222b7c114 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 11:59:37 -0700 Subject: [PATCH 20/39] Bump anthropic from 0.29.2 to 0.34.2 (#1134) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Collin Dutter --- poetry.lock | 13 ++++--------- pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2b9bd3641..a018808d6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -192,13 +192,13 @@ files = [ [[package]] name = "anthropic" -version = "0.29.2" +version = "0.34.2" description = "The official Python library for the anthropic API" optional = true python-versions = ">=3.7" files = [ - {file = "anthropic-0.29.2-py3-none-any.whl", hash = "sha256:b49804cfe614859a38fe947797cdc59e1ebdf25cc7dfe6c5d9ae0301b9637217"}, - {file = "anthropic-0.29.2.tar.gz", hash = "sha256:466494014471b13ab4004152145ac5b796519b02771a1881ddb6a842f1917110"}, + {file = "anthropic-0.34.2-py3-none-any.whl", hash = "sha256:f50a628eb71e2c76858b106c8cbea278c45c6bd2077cb3aff716a112abddc9fc"}, + {file = "anthropic-0.34.2.tar.gz", hash = "sha256:808ea19276f26646bfde9ee535669735519376e4eeb301a2974fc69892be1d6e"}, ] [package.dependencies] @@ -6374,11 +6374,6 @@ files = [ {file = "triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb"}, {file = "triton-3.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bcbf3b1c48af6a28011a5c40a5b3b9b5330530c3827716b5fbf6d7adcc1e53e9"}, {file = "triton-3.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6e5727202f7078c56f91ff13ad0c1abab14a0e7f2c87e91b12b6f64f3e8ae609"}, - {file = "triton-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b052da883351fdf6be3d93cedae6db3b8e3988d3b09ed221bccecfa9612230"}, - {file = "triton-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd34f19a8582af96e6291d4afce25dac08cb2a5d218c599163761e8e0827208e"}, - {file = "triton-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d5e10de8c011adeb7c878c6ce0dd6073b14367749e34467f1cff2bde1b78253"}, - {file = "triton-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8903767951bf86ec960b4fe4e21bc970055afc65e9d57e916d79ae3c93665e3"}, - {file = "triton-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41004fb1ae9a53fcb3e970745feb87f0e3c94c6ce1ba86e95fa3b8537894bef7"}, ] [package.dependencies] @@ -7012,4 +7007,4 @@ loaders-sql = ["sqlalchemy"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "d368587717dd8496f0db30403afa59ca6ff9e0b4e2d747f2b4c703e832d904c3" +content-hash = "5c8d23b1fdeb14a1dc7efd7c757a8282a60f6a006cceed741ab99b37467ef57e" diff --git a/pyproject.toml b/pyproject.toml index 13c7d5219..df3468e53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ requests = "^2.32.0" # drivers cohere = { version = "^5.5.4", optional = true } -anthropic = { version = "^0.29.0", optional = true } +anthropic = { version = ">=0.29,<0.35", optional = true } transformers = { version = "^4.41.1", optional = true, extras=["torch"] } huggingface-hub = { version = "^0.24.0", optional = true } boto3 = { version = "^1.34.119", optional = true } From 7a22856a62b1194670b50757e4ec50afe6a75503 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:23:55 -0700 Subject: [PATCH 21/39] Bump typos from 1.23.6 to 1.24.5 (#1135) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Collin Dutter --- poetry.lock | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/poetry.lock b/poetry.lock index a018808d6..51e51c23f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -6466,21 +6466,21 @@ files = [ [[package]] name = "typos" -version = "1.23.6" +version = "1.24.5" description = "Source Code Spelling Correction" optional = false python-versions = ">=3.7" files = [ - {file = "typos-1.23.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9209947ab1e815bcb8cb781fc73fd6ad88eacdea7b1c15e73ca49217fa7c44e7"}, - {file = "typos-1.23.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b049bfce407d7d61c5be4955d2fae6db644dc5d56ca236224cae0c3978024a75"}, - {file = "typos-1.23.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0b17e19c5e6b4f46acf0f60d053e0c188d31c09748f487f171465623f5f3380"}, - {file = "typos-1.23.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b609d525078b222cf8e25bd8e5cd60a56a542129d7bccb4f6cc992f686410331"}, - {file = "typos-1.23.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fbf955dc4a09a95d3358f8edb10c1418e45bf07a6c9c414432320009a74dd5f"}, - {file = "typos-1.23.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c686b06039b7fd95eed661cd2093fa7f048c76cb40b6bad55827a68aa707240a"}, - {file = "typos-1.23.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0fda8c8502bce101277eb0a4b4d04847fc7018e2f9cff6d2fc86b3fdec239755"}, - {file = "typos-1.23.6-py3-none-win32.whl", hash = "sha256:8edaba24813be7ef678868e8ed49c48eb70cf128afc41ae86cc2127fb32e326b"}, - {file = "typos-1.23.6-py3-none-win_amd64.whl", hash = "sha256:d47b7d0e08975adf67873a8e43dc09fc1b6ff655a4241497348808ee54442668"}, - {file = "typos-1.23.6.tar.gz", hash = "sha256:2691988d2a15cde2cdd4f2fa5fd32880765b2a68ed6ccd48d6dc693c44447bcf"}, + {file = "typos-1.24.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:95a75c76ecd4aa32b8a18b5aed9f20e4223276851ffa9d77d552533ed3e23198"}, + {file = "typos-1.24.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9a4634eda1082fbe9e7b3fc947870b36b50a964f6b89861ccf19bb9ebf26ddd9"}, + {file = "typos-1.24.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00e1f569ddc8ed80255114cbbbdcb9db278ae738f4ee435ba60803b2c8e7d519"}, + {file = "typos-1.24.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8940a50ad420dc7924e0db520c88cedce2c6cc88f206c621755e5a966c0ad645"}, + {file = "typos-1.24.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1433c987eb2dec6ce627e381870aa36f44cb98696ca4f9ff194abb87bc2075d3"}, + {file = "typos-1.24.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b3e2f44f7f39272ae0cce3f3b89157218db82f5214354d76d3a60f1af0bd0602"}, + {file = "typos-1.24.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b226d2f2960e6a5430a0af30b896e2e067ffa9fe98ea4196c0f514ad97219c47"}, + {file = "typos-1.24.5-py3-none-win32.whl", hash = "sha256:e6a7b77c13e49a5791d3be537eede2e8f4496e662aa7501260344edd5ba7df86"}, + {file = "typos-1.24.5-py3-none-win_amd64.whl", hash = "sha256:47c237a0bbcd8ab432a562020c386abe45f8ea71218b74d800d799d65b39d08b"}, + {file = "typos-1.24.5.tar.gz", hash = "sha256:b31af4d73fd35c6cda7530c5f9d7ca23ecfa11e97d4709783496353cef7e7a73"}, ] [[package]] From c2ee3dd83965292779a4e1a726abbb102ee08bd8 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 4 Sep 2024 12:55:58 -0700 Subject: [PATCH 22/39] Create dependabot groups, update for minor/patch (#1139) --- .github/dependabot.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 645c171aa..714fa9ae3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,6 +4,17 @@ updates: directory: "/" schedule: interval: "weekly" + groups: + dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + group-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" - package-ecosystem: "github-actions" directory: "/" schedule: From f11b594c1e513e402bf1265783c655edc9573c96 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 13:30:02 -0700 Subject: [PATCH 23/39] Bump boto3-stubs from 1.35.2 to 1.35.11 (#1136) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Collin Dutter --- poetry.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index 51e51c23f..041214238 100644 --- a/poetry.lock +++ b/poetry.lock @@ -368,13 +368,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.35.2" -description = "Type annotations for boto3 1.35.2 generated with mypy-boto3-builder 7.26.0" +version = "1.35.11" +description = "Type annotations for boto3 1.35.11 generated with mypy-boto3-builder 8.0.1" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs-1.35.2-py3-none-any.whl", hash = "sha256:b86347f84329ee616a5c583c6087f3708e3166d325f1600d09117db07875262a"}, - {file = "boto3_stubs-1.35.2.tar.gz", hash = "sha256:3b06987af5e125e35c61d3ee530cafeda8e63e45075349aaf783419af52c5587"}, + {file = "boto3_stubs-1.35.11-py3-none-any.whl", hash = "sha256:43611ee8fe11402b78241d76a2866086dc836541ef1332bf558f852bf465ac85"}, + {file = "boto3_stubs-1.35.11.tar.gz", hash = "sha256:c2d803a9a125648afdda5551e108a59f1ce0d70070b7ef39b27c09699b74735a"}, ] [package.dependencies] @@ -392,7 +392,7 @@ accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)"] account = ["mypy-boto3-account (>=1.35.0,<1.36.0)"] acm = ["mypy-boto3-acm (>=1.35.0,<1.36.0)"] acm-pca = ["mypy-boto3-acm-pca (>=1.35.0,<1.36.0)"] -all = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)", "mypy-boto3-account (>=1.35.0,<1.36.0)", "mypy-boto3-acm (>=1.35.0,<1.36.0)", "mypy-boto3-acm-pca (>=1.35.0,<1.36.0)", "mypy-boto3-amp (>=1.35.0,<1.36.0)", "mypy-boto3-amplify (>=1.35.0,<1.36.0)", "mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)", "mypy-boto3-amplifyuibuilder (>=1.35.0,<1.36.0)", "mypy-boto3-apigateway (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewaymanagementapi (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewayv2 (>=1.35.0,<1.36.0)", "mypy-boto3-appconfig (>=1.35.0,<1.36.0)", "mypy-boto3-appconfigdata (>=1.35.0,<1.36.0)", "mypy-boto3-appfabric (>=1.35.0,<1.36.0)", "mypy-boto3-appflow (>=1.35.0,<1.36.0)", "mypy-boto3-appintegrations (>=1.35.0,<1.36.0)", "mypy-boto3-application-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-application-insights (>=1.35.0,<1.36.0)", "mypy-boto3-application-signals (>=1.35.0,<1.36.0)", "mypy-boto3-applicationcostprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-appmesh (>=1.35.0,<1.36.0)", "mypy-boto3-apprunner (>=1.35.0,<1.36.0)", "mypy-boto3-appstream (>=1.35.0,<1.36.0)", "mypy-boto3-appsync (>=1.35.0,<1.36.0)", "mypy-boto3-apptest (>=1.35.0,<1.36.0)", "mypy-boto3-arc-zonal-shift (>=1.35.0,<1.36.0)", "mypy-boto3-artifact (>=1.35.0,<1.36.0)", "mypy-boto3-athena (>=1.35.0,<1.36.0)", "mypy-boto3-auditmanager (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling-plans (>=1.35.0,<1.36.0)", "mypy-boto3-b2bi (>=1.35.0,<1.36.0)", "mypy-boto3-backup (>=1.35.0,<1.36.0)", "mypy-boto3-backup-gateway (>=1.35.0,<1.36.0)", "mypy-boto3-batch (>=1.35.0,<1.36.0)", "mypy-boto3-bcm-data-exports (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-billingconductor (>=1.35.0,<1.36.0)", "mypy-boto3-braket (>=1.35.0,<1.36.0)", "mypy-boto3-budgets (>=1.35.0,<1.36.0)", "mypy-boto3-ce (>=1.35.0,<1.36.0)", "mypy-boto3-chatbot (>=1.35.0,<1.36.0)", "mypy-boto3-chime (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-identity (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-meetings (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-messaging (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-voice (>=1.35.0,<1.36.0)", "mypy-boto3-cleanrooms (>=1.35.0,<1.36.0)", "mypy-boto3-cleanroomsml (>=1.35.0,<1.36.0)", "mypy-boto3-cloud9 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudcontrol (>=1.35.0,<1.36.0)", "mypy-boto3-clouddirectory (>=1.35.0,<1.36.0)", "mypy-boto3-cloudformation (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsm (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsmv2 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearch (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearchdomain (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail-data (>=1.35.0,<1.36.0)", "mypy-boto3-cloudwatch (>=1.35.0,<1.36.0)", "mypy-boto3-codeartifact (>=1.35.0,<1.36.0)", "mypy-boto3-codebuild (>=1.35.0,<1.36.0)", "mypy-boto3-codecatalyst (>=1.35.0,<1.36.0)", "mypy-boto3-codecommit (>=1.35.0,<1.36.0)", "mypy-boto3-codeconnections (>=1.35.0,<1.36.0)", "mypy-boto3-codedeploy (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-reviewer (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-security (>=1.35.0,<1.36.0)", "mypy-boto3-codeguruprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-codepipeline (>=1.35.0,<1.36.0)", "mypy-boto3-codestar (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-connections (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-notifications (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-identity (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-idp (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-sync (>=1.35.0,<1.36.0)", "mypy-boto3-comprehend (>=1.35.0,<1.36.0)", "mypy-boto3-comprehendmedical (>=1.35.0,<1.36.0)", "mypy-boto3-compute-optimizer (>=1.35.0,<1.36.0)", "mypy-boto3-config (>=1.35.0,<1.36.0)", "mypy-boto3-connect (>=1.35.0,<1.36.0)", "mypy-boto3-connect-contact-lens (>=1.35.0,<1.36.0)", "mypy-boto3-connectcampaigns (>=1.35.0,<1.36.0)", "mypy-boto3-connectcases (>=1.35.0,<1.36.0)", "mypy-boto3-connectparticipant (>=1.35.0,<1.36.0)", "mypy-boto3-controlcatalog (>=1.35.0,<1.36.0)", "mypy-boto3-controltower (>=1.35.0,<1.36.0)", "mypy-boto3-cost-optimization-hub (>=1.35.0,<1.36.0)", "mypy-boto3-cur (>=1.35.0,<1.36.0)", "mypy-boto3-customer-profiles (>=1.35.0,<1.36.0)", "mypy-boto3-databrew (>=1.35.0,<1.36.0)", "mypy-boto3-dataexchange (>=1.35.0,<1.36.0)", "mypy-boto3-datapipeline (>=1.35.0,<1.36.0)", "mypy-boto3-datasync (>=1.35.0,<1.36.0)", "mypy-boto3-datazone (>=1.35.0,<1.36.0)", "mypy-boto3-dax (>=1.35.0,<1.36.0)", "mypy-boto3-deadline (>=1.35.0,<1.36.0)", "mypy-boto3-detective (>=1.35.0,<1.36.0)", "mypy-boto3-devicefarm (>=1.35.0,<1.36.0)", "mypy-boto3-devops-guru (>=1.35.0,<1.36.0)", "mypy-boto3-directconnect (>=1.35.0,<1.36.0)", "mypy-boto3-discovery (>=1.35.0,<1.36.0)", "mypy-boto3-dlm (>=1.35.0,<1.36.0)", "mypy-boto3-dms (>=1.35.0,<1.36.0)", "mypy-boto3-docdb (>=1.35.0,<1.36.0)", "mypy-boto3-docdb-elastic (>=1.35.0,<1.36.0)", "mypy-boto3-drs (>=1.35.0,<1.36.0)", "mypy-boto3-ds (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodb (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodbstreams (>=1.35.0,<1.36.0)", "mypy-boto3-ebs (>=1.35.0,<1.36.0)", "mypy-boto3-ec2 (>=1.35.0,<1.36.0)", "mypy-boto3-ec2-instance-connect (>=1.35.0,<1.36.0)", "mypy-boto3-ecr (>=1.35.0,<1.36.0)", "mypy-boto3-ecr-public (>=1.35.0,<1.36.0)", "mypy-boto3-ecs (>=1.35.0,<1.36.0)", "mypy-boto3-efs (>=1.35.0,<1.36.0)", "mypy-boto3-eks (>=1.35.0,<1.36.0)", "mypy-boto3-eks-auth (>=1.35.0,<1.36.0)", "mypy-boto3-elastic-inference (>=1.35.0,<1.36.0)", "mypy-boto3-elasticache (>=1.35.0,<1.36.0)", "mypy-boto3-elasticbeanstalk (>=1.35.0,<1.36.0)", "mypy-boto3-elastictranscoder (>=1.35.0,<1.36.0)", "mypy-boto3-elb (>=1.35.0,<1.36.0)", "mypy-boto3-elbv2 (>=1.35.0,<1.36.0)", "mypy-boto3-emr (>=1.35.0,<1.36.0)", "mypy-boto3-emr-containers (>=1.35.0,<1.36.0)", "mypy-boto3-emr-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-entityresolution (>=1.35.0,<1.36.0)", "mypy-boto3-es (>=1.35.0,<1.36.0)", "mypy-boto3-events (>=1.35.0,<1.36.0)", "mypy-boto3-evidently (>=1.35.0,<1.36.0)", "mypy-boto3-finspace (>=1.35.0,<1.36.0)", "mypy-boto3-finspace-data (>=1.35.0,<1.36.0)", "mypy-boto3-firehose (>=1.35.0,<1.36.0)", "mypy-boto3-fis (>=1.35.0,<1.36.0)", "mypy-boto3-fms (>=1.35.0,<1.36.0)", "mypy-boto3-forecast (>=1.35.0,<1.36.0)", "mypy-boto3-forecastquery (>=1.35.0,<1.36.0)", "mypy-boto3-frauddetector (>=1.35.0,<1.36.0)", "mypy-boto3-freetier (>=1.35.0,<1.36.0)", "mypy-boto3-fsx (>=1.35.0,<1.36.0)", "mypy-boto3-gamelift (>=1.35.0,<1.36.0)", "mypy-boto3-glacier (>=1.35.0,<1.36.0)", "mypy-boto3-globalaccelerator (>=1.35.0,<1.36.0)", "mypy-boto3-glue (>=1.35.0,<1.36.0)", "mypy-boto3-grafana (>=1.35.0,<1.36.0)", "mypy-boto3-greengrass (>=1.35.0,<1.36.0)", "mypy-boto3-greengrassv2 (>=1.35.0,<1.36.0)", "mypy-boto3-groundstation (>=1.35.0,<1.36.0)", "mypy-boto3-guardduty (>=1.35.0,<1.36.0)", "mypy-boto3-health (>=1.35.0,<1.36.0)", "mypy-boto3-healthlake (>=1.35.0,<1.36.0)", "mypy-boto3-iam (>=1.35.0,<1.36.0)", "mypy-boto3-identitystore (>=1.35.0,<1.36.0)", "mypy-boto3-imagebuilder (>=1.35.0,<1.36.0)", "mypy-boto3-importexport (>=1.35.0,<1.36.0)", "mypy-boto3-inspector (>=1.35.0,<1.36.0)", "mypy-boto3-inspector-scan (>=1.35.0,<1.36.0)", "mypy-boto3-inspector2 (>=1.35.0,<1.36.0)", "mypy-boto3-internetmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-iot (>=1.35.0,<1.36.0)", "mypy-boto3-iot-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot-jobs-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-devices (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-projects (>=1.35.0,<1.36.0)", "mypy-boto3-iotanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-iotdeviceadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents-data (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleethub (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleetwise (>=1.35.0,<1.36.0)", "mypy-boto3-iotsecuretunneling (>=1.35.0,<1.36.0)", "mypy-boto3-iotsitewise (>=1.35.0,<1.36.0)", "mypy-boto3-iotthingsgraph (>=1.35.0,<1.36.0)", "mypy-boto3-iottwinmaker (>=1.35.0,<1.36.0)", "mypy-boto3-iotwireless (>=1.35.0,<1.36.0)", "mypy-boto3-ivs (>=1.35.0,<1.36.0)", "mypy-boto3-ivs-realtime (>=1.35.0,<1.36.0)", "mypy-boto3-ivschat (>=1.35.0,<1.36.0)", "mypy-boto3-kafka (>=1.35.0,<1.36.0)", "mypy-boto3-kafkaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-kendra (>=1.35.0,<1.36.0)", "mypy-boto3-kendra-ranking (>=1.35.0,<1.36.0)", "mypy-boto3-keyspaces (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-archived-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-signaling (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisvideo (>=1.35.0,<1.36.0)", "mypy-boto3-kms (>=1.35.0,<1.36.0)", "mypy-boto3-lakeformation (>=1.35.0,<1.36.0)", "mypy-boto3-lambda (>=1.35.0,<1.36.0)", "mypy-boto3-launch-wizard (>=1.35.0,<1.36.0)", "mypy-boto3-lex-models (>=1.35.0,<1.36.0)", "mypy-boto3-lex-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-models (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-lightsail (>=1.35.0,<1.36.0)", "mypy-boto3-location (>=1.35.0,<1.36.0)", "mypy-boto3-logs (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutequipment (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutmetrics (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutvision (>=1.35.0,<1.36.0)", "mypy-boto3-m2 (>=1.35.0,<1.36.0)", "mypy-boto3-machinelearning (>=1.35.0,<1.36.0)", "mypy-boto3-macie2 (>=1.35.0,<1.36.0)", "mypy-boto3-mailmanager (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain-query (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-agreement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-catalog (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-deployment (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-entitlement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconvert (>=1.35.0,<1.36.0)", "mypy-boto3-medialive (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage-vod (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackagev2 (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore-data (>=1.35.0,<1.36.0)", "mypy-boto3-mediatailor (>=1.35.0,<1.36.0)", "mypy-boto3-medical-imaging (>=1.35.0,<1.36.0)", "mypy-boto3-memorydb (>=1.35.0,<1.36.0)", "mypy-boto3-meteringmarketplace (>=1.35.0,<1.36.0)", "mypy-boto3-mgh (>=1.35.0,<1.36.0)", "mypy-boto3-mgn (>=1.35.0,<1.36.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhub-config (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhuborchestrator (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhubstrategy (>=1.35.0,<1.36.0)", "mypy-boto3-mq (>=1.35.0,<1.36.0)", "mypy-boto3-mturk (>=1.35.0,<1.36.0)", "mypy-boto3-mwaa (>=1.35.0,<1.36.0)", "mypy-boto3-neptune (>=1.35.0,<1.36.0)", "mypy-boto3-neptune-graph (>=1.35.0,<1.36.0)", "mypy-boto3-neptunedata (>=1.35.0,<1.36.0)", "mypy-boto3-network-firewall (>=1.35.0,<1.36.0)", "mypy-boto3-networkmanager (>=1.35.0,<1.36.0)", "mypy-boto3-networkmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-nimble (>=1.35.0,<1.36.0)", "mypy-boto3-oam (>=1.35.0,<1.36.0)", "mypy-boto3-omics (>=1.35.0,<1.36.0)", "mypy-boto3-opensearch (>=1.35.0,<1.36.0)", "mypy-boto3-opensearchserverless (>=1.35.0,<1.36.0)", "mypy-boto3-opsworks (>=1.35.0,<1.36.0)", "mypy-boto3-opsworkscm (>=1.35.0,<1.36.0)", "mypy-boto3-organizations (>=1.35.0,<1.36.0)", "mypy-boto3-osis (>=1.35.0,<1.36.0)", "mypy-boto3-outposts (>=1.35.0,<1.36.0)", "mypy-boto3-panorama (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography-data (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-ad (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-scep (>=1.35.0,<1.36.0)", "mypy-boto3-personalize (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-events (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-pi (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-email (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.35.0,<1.36.0)", "mypy-boto3-pipes (>=1.35.0,<1.36.0)", "mypy-boto3-polly (>=1.35.0,<1.36.0)", "mypy-boto3-pricing (>=1.35.0,<1.36.0)", "mypy-boto3-privatenetworks (>=1.35.0,<1.36.0)", "mypy-boto3-proton (>=1.35.0,<1.36.0)", "mypy-boto3-qapps (>=1.35.0,<1.36.0)", "mypy-boto3-qbusiness (>=1.35.0,<1.36.0)", "mypy-boto3-qconnect (>=1.35.0,<1.36.0)", "mypy-boto3-qldb (>=1.35.0,<1.36.0)", "mypy-boto3-qldb-session (>=1.35.0,<1.36.0)", "mypy-boto3-quicksight (>=1.35.0,<1.36.0)", "mypy-boto3-ram (>=1.35.0,<1.36.0)", "mypy-boto3-rbin (>=1.35.0,<1.36.0)", "mypy-boto3-rds (>=1.35.0,<1.36.0)", "mypy-boto3-rds-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-rekognition (>=1.35.0,<1.36.0)", "mypy-boto3-repostspace (>=1.35.0,<1.36.0)", "mypy-boto3-resiliencehub (>=1.35.0,<1.36.0)", "mypy-boto3-resource-explorer-2 (>=1.35.0,<1.36.0)", "mypy-boto3-resource-groups (>=1.35.0,<1.36.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.35.0,<1.36.0)", "mypy-boto3-robomaker (>=1.35.0,<1.36.0)", "mypy-boto3-rolesanywhere (>=1.35.0,<1.36.0)", "mypy-boto3-route53 (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-cluster (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-control-config (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-readiness (>=1.35.0,<1.36.0)", "mypy-boto3-route53domains (>=1.35.0,<1.36.0)", "mypy-boto3-route53profiles (>=1.35.0,<1.36.0)", "mypy-boto3-route53resolver (>=1.35.0,<1.36.0)", "mypy-boto3-rum (>=1.35.0,<1.36.0)", "mypy-boto3-s3 (>=1.35.0,<1.36.0)", "mypy-boto3-s3control (>=1.35.0,<1.36.0)", "mypy-boto3-s3outposts (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-edge (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-geospatial (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-metrics (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-savingsplans (>=1.35.0,<1.36.0)", "mypy-boto3-scheduler (>=1.35.0,<1.36.0)", "mypy-boto3-schemas (>=1.35.0,<1.36.0)", "mypy-boto3-sdb (>=1.35.0,<1.36.0)", "mypy-boto3-secretsmanager (>=1.35.0,<1.36.0)", "mypy-boto3-securityhub (>=1.35.0,<1.36.0)", "mypy-boto3-securitylake (>=1.35.0,<1.36.0)", "mypy-boto3-serverlessrepo (>=1.35.0,<1.36.0)", "mypy-boto3-service-quotas (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog-appregistry (>=1.35.0,<1.36.0)", "mypy-boto3-servicediscovery (>=1.35.0,<1.36.0)", "mypy-boto3-ses (>=1.35.0,<1.36.0)", "mypy-boto3-sesv2 (>=1.35.0,<1.36.0)", "mypy-boto3-shield (>=1.35.0,<1.36.0)", "mypy-boto3-signer (>=1.35.0,<1.36.0)", "mypy-boto3-simspaceweaver (>=1.35.0,<1.36.0)", "mypy-boto3-sms (>=1.35.0,<1.36.0)", "mypy-boto3-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-snow-device-management (>=1.35.0,<1.36.0)", "mypy-boto3-snowball (>=1.35.0,<1.36.0)", "mypy-boto3-sns (>=1.35.0,<1.36.0)", "mypy-boto3-sqs (>=1.35.0,<1.36.0)", "mypy-boto3-ssm (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-contacts (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-incidents (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-quicksetup (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-sap (>=1.35.0,<1.36.0)", "mypy-boto3-sso (>=1.35.0,<1.36.0)", "mypy-boto3-sso-admin (>=1.35.0,<1.36.0)", "mypy-boto3-sso-oidc (>=1.35.0,<1.36.0)", "mypy-boto3-stepfunctions (>=1.35.0,<1.36.0)", "mypy-boto3-storagegateway (>=1.35.0,<1.36.0)", "mypy-boto3-sts (>=1.35.0,<1.36.0)", "mypy-boto3-supplychain (>=1.35.0,<1.36.0)", "mypy-boto3-support (>=1.35.0,<1.36.0)", "mypy-boto3-support-app (>=1.35.0,<1.36.0)", "mypy-boto3-swf (>=1.35.0,<1.36.0)", "mypy-boto3-synthetics (>=1.35.0,<1.36.0)", "mypy-boto3-taxsettings (>=1.35.0,<1.36.0)", "mypy-boto3-textract (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-influxdb (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-query (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-write (>=1.35.0,<1.36.0)", "mypy-boto3-tnb (>=1.35.0,<1.36.0)", "mypy-boto3-transcribe (>=1.35.0,<1.36.0)", "mypy-boto3-transfer (>=1.35.0,<1.36.0)", "mypy-boto3-translate (>=1.35.0,<1.36.0)", "mypy-boto3-trustedadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-verifiedpermissions (>=1.35.0,<1.36.0)", "mypy-boto3-voice-id (>=1.35.0,<1.36.0)", "mypy-boto3-vpc-lattice (>=1.35.0,<1.36.0)", "mypy-boto3-waf (>=1.35.0,<1.36.0)", "mypy-boto3-waf-regional (>=1.35.0,<1.36.0)", "mypy-boto3-wafv2 (>=1.35.0,<1.36.0)", "mypy-boto3-wellarchitected (>=1.35.0,<1.36.0)", "mypy-boto3-wisdom (>=1.35.0,<1.36.0)", "mypy-boto3-workdocs (>=1.35.0,<1.36.0)", "mypy-boto3-worklink (>=1.35.0,<1.36.0)", "mypy-boto3-workmail (>=1.35.0,<1.36.0)", "mypy-boto3-workmailmessageflow (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-thin-client (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-web (>=1.35.0,<1.36.0)", "mypy-boto3-xray (>=1.35.0,<1.36.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)", "mypy-boto3-account (>=1.35.0,<1.36.0)", "mypy-boto3-acm (>=1.35.0,<1.36.0)", "mypy-boto3-acm-pca (>=1.35.0,<1.36.0)", "mypy-boto3-amp (>=1.35.0,<1.36.0)", "mypy-boto3-amplify (>=1.35.0,<1.36.0)", "mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)", "mypy-boto3-amplifyuibuilder (>=1.35.0,<1.36.0)", "mypy-boto3-apigateway (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewaymanagementapi (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewayv2 (>=1.35.0,<1.36.0)", "mypy-boto3-appconfig (>=1.35.0,<1.36.0)", "mypy-boto3-appconfigdata (>=1.35.0,<1.36.0)", "mypy-boto3-appfabric (>=1.35.0,<1.36.0)", "mypy-boto3-appflow (>=1.35.0,<1.36.0)", "mypy-boto3-appintegrations (>=1.35.0,<1.36.0)", "mypy-boto3-application-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-application-insights (>=1.35.0,<1.36.0)", "mypy-boto3-application-signals (>=1.35.0,<1.36.0)", "mypy-boto3-applicationcostprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-appmesh (>=1.35.0,<1.36.0)", "mypy-boto3-apprunner (>=1.35.0,<1.36.0)", "mypy-boto3-appstream (>=1.35.0,<1.36.0)", "mypy-boto3-appsync (>=1.35.0,<1.36.0)", "mypy-boto3-apptest (>=1.35.0,<1.36.0)", "mypy-boto3-arc-zonal-shift (>=1.35.0,<1.36.0)", "mypy-boto3-artifact (>=1.35.0,<1.36.0)", "mypy-boto3-athena (>=1.35.0,<1.36.0)", "mypy-boto3-auditmanager (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling-plans (>=1.35.0,<1.36.0)", "mypy-boto3-b2bi (>=1.35.0,<1.36.0)", "mypy-boto3-backup (>=1.35.0,<1.36.0)", "mypy-boto3-backup-gateway (>=1.35.0,<1.36.0)", "mypy-boto3-batch (>=1.35.0,<1.36.0)", "mypy-boto3-bcm-data-exports (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-billingconductor (>=1.35.0,<1.36.0)", "mypy-boto3-braket (>=1.35.0,<1.36.0)", "mypy-boto3-budgets (>=1.35.0,<1.36.0)", "mypy-boto3-ce (>=1.35.0,<1.36.0)", "mypy-boto3-chatbot (>=1.35.0,<1.36.0)", "mypy-boto3-chime (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-identity (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-meetings (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-messaging (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-voice (>=1.35.0,<1.36.0)", "mypy-boto3-cleanrooms (>=1.35.0,<1.36.0)", "mypy-boto3-cleanroomsml (>=1.35.0,<1.36.0)", "mypy-boto3-cloud9 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudcontrol (>=1.35.0,<1.36.0)", "mypy-boto3-clouddirectory (>=1.35.0,<1.36.0)", "mypy-boto3-cloudformation (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsm (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsmv2 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearch (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearchdomain (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail-data (>=1.35.0,<1.36.0)", "mypy-boto3-cloudwatch (>=1.35.0,<1.36.0)", "mypy-boto3-codeartifact (>=1.35.0,<1.36.0)", "mypy-boto3-codebuild (>=1.35.0,<1.36.0)", "mypy-boto3-codecatalyst (>=1.35.0,<1.36.0)", "mypy-boto3-codecommit (>=1.35.0,<1.36.0)", "mypy-boto3-codeconnections (>=1.35.0,<1.36.0)", "mypy-boto3-codedeploy (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-reviewer (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-security (>=1.35.0,<1.36.0)", "mypy-boto3-codeguruprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-codepipeline (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-connections (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-notifications (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-identity (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-idp (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-sync (>=1.35.0,<1.36.0)", "mypy-boto3-comprehend (>=1.35.0,<1.36.0)", "mypy-boto3-comprehendmedical (>=1.35.0,<1.36.0)", "mypy-boto3-compute-optimizer (>=1.35.0,<1.36.0)", "mypy-boto3-config (>=1.35.0,<1.36.0)", "mypy-boto3-connect (>=1.35.0,<1.36.0)", "mypy-boto3-connect-contact-lens (>=1.35.0,<1.36.0)", "mypy-boto3-connectcampaigns (>=1.35.0,<1.36.0)", "mypy-boto3-connectcases (>=1.35.0,<1.36.0)", "mypy-boto3-connectparticipant (>=1.35.0,<1.36.0)", "mypy-boto3-controlcatalog (>=1.35.0,<1.36.0)", "mypy-boto3-controltower (>=1.35.0,<1.36.0)", "mypy-boto3-cost-optimization-hub (>=1.35.0,<1.36.0)", "mypy-boto3-cur (>=1.35.0,<1.36.0)", "mypy-boto3-customer-profiles (>=1.35.0,<1.36.0)", "mypy-boto3-databrew (>=1.35.0,<1.36.0)", "mypy-boto3-dataexchange (>=1.35.0,<1.36.0)", "mypy-boto3-datapipeline (>=1.35.0,<1.36.0)", "mypy-boto3-datasync (>=1.35.0,<1.36.0)", "mypy-boto3-datazone (>=1.35.0,<1.36.0)", "mypy-boto3-dax (>=1.35.0,<1.36.0)", "mypy-boto3-deadline (>=1.35.0,<1.36.0)", "mypy-boto3-detective (>=1.35.0,<1.36.0)", "mypy-boto3-devicefarm (>=1.35.0,<1.36.0)", "mypy-boto3-devops-guru (>=1.35.0,<1.36.0)", "mypy-boto3-directconnect (>=1.35.0,<1.36.0)", "mypy-boto3-discovery (>=1.35.0,<1.36.0)", "mypy-boto3-dlm (>=1.35.0,<1.36.0)", "mypy-boto3-dms (>=1.35.0,<1.36.0)", "mypy-boto3-docdb (>=1.35.0,<1.36.0)", "mypy-boto3-docdb-elastic (>=1.35.0,<1.36.0)", "mypy-boto3-drs (>=1.35.0,<1.36.0)", "mypy-boto3-ds (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodb (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodbstreams (>=1.35.0,<1.36.0)", "mypy-boto3-ebs (>=1.35.0,<1.36.0)", "mypy-boto3-ec2 (>=1.35.0,<1.36.0)", "mypy-boto3-ec2-instance-connect (>=1.35.0,<1.36.0)", "mypy-boto3-ecr (>=1.35.0,<1.36.0)", "mypy-boto3-ecr-public (>=1.35.0,<1.36.0)", "mypy-boto3-ecs (>=1.35.0,<1.36.0)", "mypy-boto3-efs (>=1.35.0,<1.36.0)", "mypy-boto3-eks (>=1.35.0,<1.36.0)", "mypy-boto3-eks-auth (>=1.35.0,<1.36.0)", "mypy-boto3-elastic-inference (>=1.35.0,<1.36.0)", "mypy-boto3-elasticache (>=1.35.0,<1.36.0)", "mypy-boto3-elasticbeanstalk (>=1.35.0,<1.36.0)", "mypy-boto3-elastictranscoder (>=1.35.0,<1.36.0)", "mypy-boto3-elb (>=1.35.0,<1.36.0)", "mypy-boto3-elbv2 (>=1.35.0,<1.36.0)", "mypy-boto3-emr (>=1.35.0,<1.36.0)", "mypy-boto3-emr-containers (>=1.35.0,<1.36.0)", "mypy-boto3-emr-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-entityresolution (>=1.35.0,<1.36.0)", "mypy-boto3-es (>=1.35.0,<1.36.0)", "mypy-boto3-events (>=1.35.0,<1.36.0)", "mypy-boto3-evidently (>=1.35.0,<1.36.0)", "mypy-boto3-finspace (>=1.35.0,<1.36.0)", "mypy-boto3-finspace-data (>=1.35.0,<1.36.0)", "mypy-boto3-firehose (>=1.35.0,<1.36.0)", "mypy-boto3-fis (>=1.35.0,<1.36.0)", "mypy-boto3-fms (>=1.35.0,<1.36.0)", "mypy-boto3-forecast (>=1.35.0,<1.36.0)", "mypy-boto3-forecastquery (>=1.35.0,<1.36.0)", "mypy-boto3-frauddetector (>=1.35.0,<1.36.0)", "mypy-boto3-freetier (>=1.35.0,<1.36.0)", "mypy-boto3-fsx (>=1.35.0,<1.36.0)", "mypy-boto3-gamelift (>=1.35.0,<1.36.0)", "mypy-boto3-glacier (>=1.35.0,<1.36.0)", "mypy-boto3-globalaccelerator (>=1.35.0,<1.36.0)", "mypy-boto3-glue (>=1.35.0,<1.36.0)", "mypy-boto3-grafana (>=1.35.0,<1.36.0)", "mypy-boto3-greengrass (>=1.35.0,<1.36.0)", "mypy-boto3-greengrassv2 (>=1.35.0,<1.36.0)", "mypy-boto3-groundstation (>=1.35.0,<1.36.0)", "mypy-boto3-guardduty (>=1.35.0,<1.36.0)", "mypy-boto3-health (>=1.35.0,<1.36.0)", "mypy-boto3-healthlake (>=1.35.0,<1.36.0)", "mypy-boto3-iam (>=1.35.0,<1.36.0)", "mypy-boto3-identitystore (>=1.35.0,<1.36.0)", "mypy-boto3-imagebuilder (>=1.35.0,<1.36.0)", "mypy-boto3-importexport (>=1.35.0,<1.36.0)", "mypy-boto3-inspector (>=1.35.0,<1.36.0)", "mypy-boto3-inspector-scan (>=1.35.0,<1.36.0)", "mypy-boto3-inspector2 (>=1.35.0,<1.36.0)", "mypy-boto3-internetmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-iot (>=1.35.0,<1.36.0)", "mypy-boto3-iot-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot-jobs-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-devices (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-projects (>=1.35.0,<1.36.0)", "mypy-boto3-iotanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-iotdeviceadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents-data (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleethub (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleetwise (>=1.35.0,<1.36.0)", "mypy-boto3-iotsecuretunneling (>=1.35.0,<1.36.0)", "mypy-boto3-iotsitewise (>=1.35.0,<1.36.0)", "mypy-boto3-iotthingsgraph (>=1.35.0,<1.36.0)", "mypy-boto3-iottwinmaker (>=1.35.0,<1.36.0)", "mypy-boto3-iotwireless (>=1.35.0,<1.36.0)", "mypy-boto3-ivs (>=1.35.0,<1.36.0)", "mypy-boto3-ivs-realtime (>=1.35.0,<1.36.0)", "mypy-boto3-ivschat (>=1.35.0,<1.36.0)", "mypy-boto3-kafka (>=1.35.0,<1.36.0)", "mypy-boto3-kafkaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-kendra (>=1.35.0,<1.36.0)", "mypy-boto3-kendra-ranking (>=1.35.0,<1.36.0)", "mypy-boto3-keyspaces (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-archived-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-signaling (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisvideo (>=1.35.0,<1.36.0)", "mypy-boto3-kms (>=1.35.0,<1.36.0)", "mypy-boto3-lakeformation (>=1.35.0,<1.36.0)", "mypy-boto3-lambda (>=1.35.0,<1.36.0)", "mypy-boto3-launch-wizard (>=1.35.0,<1.36.0)", "mypy-boto3-lex-models (>=1.35.0,<1.36.0)", "mypy-boto3-lex-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-models (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-lightsail (>=1.35.0,<1.36.0)", "mypy-boto3-location (>=1.35.0,<1.36.0)", "mypy-boto3-logs (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutequipment (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutmetrics (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutvision (>=1.35.0,<1.36.0)", "mypy-boto3-m2 (>=1.35.0,<1.36.0)", "mypy-boto3-machinelearning (>=1.35.0,<1.36.0)", "mypy-boto3-macie2 (>=1.35.0,<1.36.0)", "mypy-boto3-mailmanager (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain-query (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-agreement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-catalog (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-deployment (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-entitlement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconvert (>=1.35.0,<1.36.0)", "mypy-boto3-medialive (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage-vod (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackagev2 (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore-data (>=1.35.0,<1.36.0)", "mypy-boto3-mediatailor (>=1.35.0,<1.36.0)", "mypy-boto3-medical-imaging (>=1.35.0,<1.36.0)", "mypy-boto3-memorydb (>=1.35.0,<1.36.0)", "mypy-boto3-meteringmarketplace (>=1.35.0,<1.36.0)", "mypy-boto3-mgh (>=1.35.0,<1.36.0)", "mypy-boto3-mgn (>=1.35.0,<1.36.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhub-config (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhuborchestrator (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhubstrategy (>=1.35.0,<1.36.0)", "mypy-boto3-mq (>=1.35.0,<1.36.0)", "mypy-boto3-mturk (>=1.35.0,<1.36.0)", "mypy-boto3-mwaa (>=1.35.0,<1.36.0)", "mypy-boto3-neptune (>=1.35.0,<1.36.0)", "mypy-boto3-neptune-graph (>=1.35.0,<1.36.0)", "mypy-boto3-neptunedata (>=1.35.0,<1.36.0)", "mypy-boto3-network-firewall (>=1.35.0,<1.36.0)", "mypy-boto3-networkmanager (>=1.35.0,<1.36.0)", "mypy-boto3-networkmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-nimble (>=1.35.0,<1.36.0)", "mypy-boto3-oam (>=1.35.0,<1.36.0)", "mypy-boto3-omics (>=1.35.0,<1.36.0)", "mypy-boto3-opensearch (>=1.35.0,<1.36.0)", "mypy-boto3-opensearchserverless (>=1.35.0,<1.36.0)", "mypy-boto3-opsworks (>=1.35.0,<1.36.0)", "mypy-boto3-opsworkscm (>=1.35.0,<1.36.0)", "mypy-boto3-organizations (>=1.35.0,<1.36.0)", "mypy-boto3-osis (>=1.35.0,<1.36.0)", "mypy-boto3-outposts (>=1.35.0,<1.36.0)", "mypy-boto3-panorama (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography-data (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-ad (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-scep (>=1.35.0,<1.36.0)", "mypy-boto3-pcs (>=1.35.0,<1.36.0)", "mypy-boto3-personalize (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-events (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-pi (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-email (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.35.0,<1.36.0)", "mypy-boto3-pipes (>=1.35.0,<1.36.0)", "mypy-boto3-polly (>=1.35.0,<1.36.0)", "mypy-boto3-pricing (>=1.35.0,<1.36.0)", "mypy-boto3-privatenetworks (>=1.35.0,<1.36.0)", "mypy-boto3-proton (>=1.35.0,<1.36.0)", "mypy-boto3-qapps (>=1.35.0,<1.36.0)", "mypy-boto3-qbusiness (>=1.35.0,<1.36.0)", "mypy-boto3-qconnect (>=1.35.0,<1.36.0)", "mypy-boto3-qldb (>=1.35.0,<1.36.0)", "mypy-boto3-qldb-session (>=1.35.0,<1.36.0)", "mypy-boto3-quicksight (>=1.35.0,<1.36.0)", "mypy-boto3-ram (>=1.35.0,<1.36.0)", "mypy-boto3-rbin (>=1.35.0,<1.36.0)", "mypy-boto3-rds (>=1.35.0,<1.36.0)", "mypy-boto3-rds-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-rekognition (>=1.35.0,<1.36.0)", "mypy-boto3-repostspace (>=1.35.0,<1.36.0)", "mypy-boto3-resiliencehub (>=1.35.0,<1.36.0)", "mypy-boto3-resource-explorer-2 (>=1.35.0,<1.36.0)", "mypy-boto3-resource-groups (>=1.35.0,<1.36.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.35.0,<1.36.0)", "mypy-boto3-robomaker (>=1.35.0,<1.36.0)", "mypy-boto3-rolesanywhere (>=1.35.0,<1.36.0)", "mypy-boto3-route53 (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-cluster (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-control-config (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-readiness (>=1.35.0,<1.36.0)", "mypy-boto3-route53domains (>=1.35.0,<1.36.0)", "mypy-boto3-route53profiles (>=1.35.0,<1.36.0)", "mypy-boto3-route53resolver (>=1.35.0,<1.36.0)", "mypy-boto3-rum (>=1.35.0,<1.36.0)", "mypy-boto3-s3 (>=1.35.0,<1.36.0)", "mypy-boto3-s3control (>=1.35.0,<1.36.0)", "mypy-boto3-s3outposts (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-edge (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-geospatial (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-metrics (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-savingsplans (>=1.35.0,<1.36.0)", "mypy-boto3-scheduler (>=1.35.0,<1.36.0)", "mypy-boto3-schemas (>=1.35.0,<1.36.0)", "mypy-boto3-sdb (>=1.35.0,<1.36.0)", "mypy-boto3-secretsmanager (>=1.35.0,<1.36.0)", "mypy-boto3-securityhub (>=1.35.0,<1.36.0)", "mypy-boto3-securitylake (>=1.35.0,<1.36.0)", "mypy-boto3-serverlessrepo (>=1.35.0,<1.36.0)", "mypy-boto3-service-quotas (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog-appregistry (>=1.35.0,<1.36.0)", "mypy-boto3-servicediscovery (>=1.35.0,<1.36.0)", "mypy-boto3-ses (>=1.35.0,<1.36.0)", "mypy-boto3-sesv2 (>=1.35.0,<1.36.0)", "mypy-boto3-shield (>=1.35.0,<1.36.0)", "mypy-boto3-signer (>=1.35.0,<1.36.0)", "mypy-boto3-simspaceweaver (>=1.35.0,<1.36.0)", "mypy-boto3-sms (>=1.35.0,<1.36.0)", "mypy-boto3-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-snow-device-management (>=1.35.0,<1.36.0)", "mypy-boto3-snowball (>=1.35.0,<1.36.0)", "mypy-boto3-sns (>=1.35.0,<1.36.0)", "mypy-boto3-sqs (>=1.35.0,<1.36.0)", "mypy-boto3-ssm (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-contacts (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-incidents (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-quicksetup (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-sap (>=1.35.0,<1.36.0)", "mypy-boto3-sso (>=1.35.0,<1.36.0)", "mypy-boto3-sso-admin (>=1.35.0,<1.36.0)", "mypy-boto3-sso-oidc (>=1.35.0,<1.36.0)", "mypy-boto3-stepfunctions (>=1.35.0,<1.36.0)", "mypy-boto3-storagegateway (>=1.35.0,<1.36.0)", "mypy-boto3-sts (>=1.35.0,<1.36.0)", "mypy-boto3-supplychain (>=1.35.0,<1.36.0)", "mypy-boto3-support (>=1.35.0,<1.36.0)", "mypy-boto3-support-app (>=1.35.0,<1.36.0)", "mypy-boto3-swf (>=1.35.0,<1.36.0)", "mypy-boto3-synthetics (>=1.35.0,<1.36.0)", "mypy-boto3-taxsettings (>=1.35.0,<1.36.0)", "mypy-boto3-textract (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-influxdb (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-query (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-write (>=1.35.0,<1.36.0)", "mypy-boto3-tnb (>=1.35.0,<1.36.0)", "mypy-boto3-transcribe (>=1.35.0,<1.36.0)", "mypy-boto3-transfer (>=1.35.0,<1.36.0)", "mypy-boto3-translate (>=1.35.0,<1.36.0)", "mypy-boto3-trustedadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-verifiedpermissions (>=1.35.0,<1.36.0)", "mypy-boto3-voice-id (>=1.35.0,<1.36.0)", "mypy-boto3-vpc-lattice (>=1.35.0,<1.36.0)", "mypy-boto3-waf (>=1.35.0,<1.36.0)", "mypy-boto3-waf-regional (>=1.35.0,<1.36.0)", "mypy-boto3-wafv2 (>=1.35.0,<1.36.0)", "mypy-boto3-wellarchitected (>=1.35.0,<1.36.0)", "mypy-boto3-wisdom (>=1.35.0,<1.36.0)", "mypy-boto3-workdocs (>=1.35.0,<1.36.0)", "mypy-boto3-worklink (>=1.35.0,<1.36.0)", "mypy-boto3-workmail (>=1.35.0,<1.36.0)", "mypy-boto3-workmailmessageflow (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-thin-client (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-web (>=1.35.0,<1.36.0)", "mypy-boto3-xray (>=1.35.0,<1.36.0)"] amp = ["mypy-boto3-amp (>=1.35.0,<1.36.0)"] amplify = ["mypy-boto3-amplify (>=1.35.0,<1.36.0)"] amplifybackend = ["mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)"] @@ -430,7 +430,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.35.0,<1.36.0)"] -boto3 = ["boto3 (==1.35.2)", "botocore (==1.35.2)"] +boto3 = ["boto3 (==1.35.11)", "botocore (==1.35.11)"] braket = ["mypy-boto3-braket (>=1.35.0,<1.36.0)"] budgets = ["mypy-boto3-budgets (>=1.35.0,<1.36.0)"] ce = ["mypy-boto3-ce (>=1.35.0,<1.36.0)"] @@ -466,7 +466,6 @@ codeguru-reviewer = ["mypy-boto3-codeguru-reviewer (>=1.35.0,<1.36.0)"] codeguru-security = ["mypy-boto3-codeguru-security (>=1.35.0,<1.36.0)"] codeguruprofiler = ["mypy-boto3-codeguruprofiler (>=1.35.0,<1.36.0)"] codepipeline = ["mypy-boto3-codepipeline (>=1.35.0,<1.36.0)"] -codestar = ["mypy-boto3-codestar (>=1.35.0,<1.36.0)"] codestar-connections = ["mypy-boto3-codestar-connections (>=1.35.0,<1.36.0)"] codestar-notifications = ["mypy-boto3-codestar-notifications (>=1.35.0,<1.36.0)"] cognito-identity = ["mypy-boto3-cognito-identity (>=1.35.0,<1.36.0)"] @@ -660,6 +659,7 @@ payment-cryptography = ["mypy-boto3-payment-cryptography (>=1.35.0,<1.36.0)"] payment-cryptography-data = ["mypy-boto3-payment-cryptography-data (>=1.35.0,<1.36.0)"] pca-connector-ad = ["mypy-boto3-pca-connector-ad (>=1.35.0,<1.36.0)"] pca-connector-scep = ["mypy-boto3-pca-connector-scep (>=1.35.0,<1.36.0)"] +pcs = ["mypy-boto3-pcs (>=1.35.0,<1.36.0)"] personalize = ["mypy-boto3-personalize (>=1.35.0,<1.36.0)"] personalize-events = ["mypy-boto3-personalize-events (>=1.35.0,<1.36.0)"] personalize-runtime = ["mypy-boto3-personalize-runtime (>=1.35.0,<1.36.0)"] From 01b68fb8efe5385f3b50d6e5f7eacd4882c17e84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 14:03:11 -0700 Subject: [PATCH 24/39] Bump the dependencies group with 15 updates (#1140) --- poetry.lock | 255 +++++++++++++++++++++++++------------------------ pyproject.toml | 8 +- 2 files changed, 132 insertions(+), 131 deletions(-) diff --git a/poetry.lock b/poetry.lock index 041214238..b59a5333e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,34 +2,34 @@ [[package]] name = "accelerate" -version = "0.32.1" +version = "0.34.0" description = "Accelerate" optional = true python-versions = ">=3.8.0" files = [ - {file = "accelerate-0.32.1-py3-none-any.whl", hash = "sha256:71fcf4be00872194071de561634268b71417d7f5b16b178e2fa76b6f117c52b0"}, - {file = "accelerate-0.32.1.tar.gz", hash = "sha256:3999acff0237cd0d4f9fd98b42d5a3163544777b53fc4f1eec886b77e992d177"}, + {file = "accelerate-0.34.0-py3-none-any.whl", hash = "sha256:0161fd3f975dd99b5cdb967bb6942bc986d9da466397742008a73290dcb73408"}, + {file = "accelerate-0.34.0.tar.gz", hash = "sha256:437a93f0cb15a7768483833975b5c781f61e31a203439948f1c6b0217e1f74d5"}, ] [package.dependencies] -huggingface-hub = "*" -numpy = ">=1.17,<2.0.0" +huggingface-hub = ">=0.21.0" +numpy = ">=1.17,<3.0.0" packaging = ">=20.0" psutil = "*" pyyaml = "*" -safetensors = ">=0.3.1" +safetensors = ">=0.4.3" torch = ">=1.10.0" [package.extras] -deepspeed = ["deepspeed (<=0.14.0)"] -dev = ["bitsandbytes", "black (>=23.1,<24.0)", "datasets", "diffusers", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.2.1,<0.3.0)", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"] +deepspeed = ["deepspeed"] +dev = ["bitsandbytes", "black (>=23.1,<24.0)", "datasets", "diffusers", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.2.1,<0.3.0)", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"] quality = ["black (>=23.1,<24.0)", "hf-doc-builder (>=0.3.0)", "ruff (>=0.2.1,<0.3.0)"] rich = ["rich"] sagemaker = ["sagemaker"] -test-dev = ["bitsandbytes", "datasets", "diffusers", "evaluate", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"] +test-dev = ["bitsandbytes", "datasets", "diffusers", "evaluate", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"] test-prod = ["parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist"] test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"] -testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"] +testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchdata (>=0.8.0)", "torchpippy (>=0.2.0)", "tqdm", "transformers"] [[package]] name = "aiohappyeyeballs" @@ -349,17 +349,17 @@ lxml = ["lxml"] [[package]] name = "boto3" -version = "1.35.2" +version = "1.35.12" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.35.2-py3-none-any.whl", hash = "sha256:c2f0837a259002489e59d1c30008791e3b3bb59e30e48c64e1d2d270147a4549"}, - {file = "boto3-1.35.2.tar.gz", hash = "sha256:cbf197ce28f04bc1ffa1db0aa26a1903d9bfa57a490f70537932e84367cdd15b"}, + {file = "boto3-1.35.12-py3-none-any.whl", hash = "sha256:acaa7c75cbf483605e3c46e9ac03043a4cf5e9866940122d68b06d1defe00774"}, + {file = "boto3-1.35.12.tar.gz", hash = "sha256:b32faab174f6f9b75fada27bcf054ab3e8846bd410ed9817d0b511109326b6b1"}, ] [package.dependencies] -botocore = ">=1.35.2,<1.36.0" +botocore = ">=1.35.12,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -780,13 +780,13 @@ xray = ["mypy-boto3-xray (>=1.35.0,<1.36.0)"] [[package]] name = "botocore" -version = "1.35.2" +version = "1.35.12" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.2-py3-none-any.whl", hash = "sha256:92b168d8be79055bb25754aa34d699866d8aa66abc69f8ce99b0c191bd9c6e70"}, - {file = "botocore-1.35.2.tar.gz", hash = "sha256:96c8eb6f0baed623a1b57ca9f24cb21d5508872cf0dfebb55527a85b6dbc76ba"}, + {file = "botocore-1.35.12-py3-none-any.whl", hash = "sha256:cb787030415438ea6ff8381f8acd8b1107593d5ebea457fd843a5e36ba19e9a4"}, + {file = "botocore-1.35.12.tar.gz", hash = "sha256:a8f8230032d090225a93763675a73c208d121bb63ed99f41ee6ad3d51b74b80d"}, ] [package.dependencies] @@ -1107,13 +1107,13 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cohere" -version = "5.8.1" +version = "5.9.0" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "cohere-5.8.1-py3-none-any.whl", hash = "sha256:92362c651dfbfef8c5d34e95de394578d7197ed7875c6fcbf101e84b60db7fbd"}, - {file = "cohere-5.8.1.tar.gz", hash = "sha256:4c0c4468f15f9ad7fb7af15cc9f7305cd6df51243d69e203682be87e9efa5071"}, + {file = "cohere-5.9.0-py3-none-any.whl", hash = "sha256:7c70cc9e6ade3355e00aa4a77fcb5662b32261a3237e00975d92b97bb5f3c0c9"}, + {file = "cohere-5.9.0.tar.gz", hash = "sha256:74e5b6e1fed0f617c26dfb8ef1cfccf8334321a51cc886c37374047916d71568"}, ] [package.dependencies] @@ -1350,13 +1350,13 @@ packaging = "*" [[package]] name = "diffusers" -version = "0.29.2" +version = "0.30.2" description = "State-of-the-art diffusion in PyTorch and JAX." optional = true python-versions = ">=3.8.0" files = [ - {file = "diffusers-0.29.2-py3-none-any.whl", hash = "sha256:d5e9bb13c8097b4eed10df23d1294d2e5a418f53e3f89c7ef228b5b982970428"}, - {file = "diffusers-0.29.2.tar.gz", hash = "sha256:b85f277668e22089cf68b40dd9b76940db7d24ba9cdac107533ed10ab8e4e9db"}, + {file = "diffusers-0.30.2-py3-none-any.whl", hash = "sha256:739826043147c2b59560944591dfdea5d24cd4fb15e751abbe20679a289bece8"}, + {file = "diffusers-0.30.2.tar.gz", hash = "sha256:641875f78f36bdfa4b9af752b124d1fd6d431eadd5547fe0a3f354ae0af2636c"}, ] [package.dependencies] @@ -1370,13 +1370,13 @@ requests = "*" safetensors = ">=0.3.1" [package.extras] -dev = ["GitPython (<3.1.19)", "Jinja2", "accelerate (>=0.29.3)", "compel (==0.1.8)", "datasets", "flax (>=0.4.1)", "hf-doc-builder (>=0.3.0)", "invisible-watermark (>=0.2.0)", "isort (>=5.5.4)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "ruff (==0.1.5)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "torch (>=1.4)", "torchvision", "transformers (>=4.25.1)", "urllib3 (<=2.0.0)"] +dev = ["GitPython (<3.1.19)", "Jinja2", "accelerate (>=0.31.0)", "compel (==0.1.8)", "datasets", "flax (>=0.4.1)", "hf-doc-builder (>=0.3.0)", "invisible-watermark (>=0.2.0)", "isort (>=5.5.4)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "ruff (==0.1.5)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "torch (>=1.4)", "torchvision", "transformers (>=4.41.2)", "urllib3 (<=2.0.0)"] docs = ["hf-doc-builder (>=0.3.0)"] flax = ["flax (>=0.4.1)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)"] quality = ["hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<=2.0.0)"] -test = ["GitPython (<3.1.19)", "Jinja2", "compel (==0.1.8)", "datasets", "invisible-watermark (>=0.2.0)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "torchvision", "transformers (>=4.25.1)"] -torch = ["accelerate (>=0.29.3)", "torch (>=1.4)"] -training = ["Jinja2", "accelerate (>=0.29.3)", "datasets", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "tensorboard"] +test = ["GitPython (<3.1.19)", "Jinja2", "compel (==0.1.8)", "datasets", "invisible-watermark (>=0.2.0)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "torchvision", "transformers (>=4.41.2)"] +torch = ["accelerate (>=0.31.0)", "torch (>=1.4)"] +training = ["Jinja2", "accelerate (>=0.31.0)", "datasets", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "tensorboard"] [[package]] name = "distlib" @@ -1455,13 +1455,13 @@ files = [ [[package]] name = "duckduckgo-search" -version = "6.2.10" +version = "6.2.11" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." optional = true python-versions = ">=3.8" files = [ - {file = "duckduckgo_search-6.2.10-py3-none-any.whl", hash = "sha256:266c1528dcbc90931b7c800a2c1041a0cb447c83c485414d77a7e443be717ed6"}, - {file = "duckduckgo_search-6.2.10.tar.gz", hash = "sha256:53057368480ca496fc4e331a34648124711580cf43fbb65336eaa6fd2ee37cec"}, + {file = "duckduckgo_search-6.2.11-py3-none-any.whl", hash = "sha256:6fb7069b79e8928f487001de6859034ade19201bdcd257ec198802430e374bfe"}, + {file = "duckduckgo_search-6.2.11.tar.gz", hash = "sha256:6b6ef1b552c5e67f23e252025d2504caf6f9fc14f70e86c6dd512200f386c673"}, ] [package.dependencies] @@ -2821,13 +2821,13 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markdownify" -version = "0.11.6" +version = "0.13.1" description = "Convert HTML to markdown." optional = true python-versions = "*" files = [ - {file = "markdownify-0.11.6-py3-none-any.whl", hash = "sha256:ba35fe289d5e9073bcd7d2cad629278fe25f1a93741fcdc0bfb4f009076d8324"}, - {file = "markdownify-0.11.6.tar.gz", hash = "sha256:009b240e0c9f4c8eaf1d085625dcd4011e12f0f8cec55dedf9ea6f7655e49bfe"}, + {file = "markdownify-0.13.1-py3-none-any.whl", hash = "sha256:1d181d43d20902bcc69d7be85b5316ed174d0dda72ff56e14ae4c95a4a407d22"}, + {file = "markdownify-0.13.1.tar.gz", hash = "sha256:ab257f9e6bd4075118828a28c9d02f8a4bfeb7421f558834aa79b2dfeb32a098"}, ] [package.dependencies] @@ -3697,13 +3697,13 @@ files = [ [[package]] name = "ollama" -version = "0.3.1" +version = "0.3.2" description = "The official Python client for Ollama." optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "ollama-0.3.1-py3-none-any.whl", hash = "sha256:db50034c73d6350349bdfba19c3f0d54a3cea73eb97b35f9d7419b2fc7206454"}, - {file = "ollama-0.3.1.tar.gz", hash = "sha256:032572fb494a4fba200c65013fe937a65382c846b5f358d9e8918ecbc9ac44b5"}, + {file = "ollama-0.3.2-py3-none-any.whl", hash = "sha256:ed2a6f752bd91c49b477d84a259c5657785d7777689d4a27ffe0a4d5b5dd3cae"}, + {file = "ollama-0.3.2.tar.gz", hash = "sha256:7deb3287cdefa1c39cc046163096f8597b83f59ca31a1f8ae78e71eccb7af95f"}, ] [package.dependencies] @@ -3711,13 +3711,13 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "openai" -version = "1.42.0" +version = "1.43.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.42.0-py3-none-any.whl", hash = "sha256:dc91e0307033a4f94931e5d03cc3b29b9717014ad5e73f9f2051b6cb5eda4d80"}, - {file = "openai-1.42.0.tar.gz", hash = "sha256:c9d31853b4e0bc2dc8bd08003b462a006035655a701471695d0bfdc08529cde3"}, + {file = "openai-1.43.0-py3-none-any.whl", hash = "sha256:1a748c2728edd3a738a72a0212ba866f4fdbe39c9ae03813508b267d45104abe"}, + {file = "openai-1.43.0.tar.gz", hash = "sha256:e607aff9fc3e28eade107e5edd8ca95a910a4b12589336d3cbb6bfe2ac306b3c"}, ] [package.dependencies] @@ -3735,13 +3735,13 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "opensearch-py" -version = "2.7.0" +version = "2.7.1" description = "Python client for OpenSearch" optional = true python-versions = "<4,>=3.8" files = [ - {file = "opensearch_py-2.7.0-py3-none-any.whl", hash = "sha256:6a36535efcda870c820fd84c4bda96d7d57fc900a8c7dec660a48c079904df97"}, - {file = "opensearch_py-2.7.0.tar.gz", hash = "sha256:c09a73727868c29f86ffbed1e987afb7f86bcce983b28bf69249cfad8c831d68"}, + {file = "opensearch_py-2.7.1-py3-none-any.whl", hash = "sha256:5417650eba98a1c7648e502207cebf3a12beab623ffe0ebbf55f9b1b4b6e44e9"}, + {file = "opensearch_py-2.7.1.tar.gz", hash = "sha256:67ab76e9373669bc71da417096df59827c08369ac3795d5438c9a8be21cbd759"}, ] [package.dependencies] @@ -3982,12 +3982,12 @@ files = [ [[package]] name = "pgvector" -version = "0.2.5" +version = "0.3.2" description = "pgvector support for Python" optional = true python-versions = ">=3.8" files = [ - {file = "pgvector-0.2.5-py2.py3-none-any.whl", hash = "sha256:5e5e93ec4d3c45ab1fa388729d56c602f6966296e19deee8878928c6d567e41b"}, + {file = "pgvector-0.3.2-py2.py3-none-any.whl", hash = "sha256:a44541c75a7340993b2840015820a910e5d7625d2ddd1235c1ee659732531bf6"}, ] [package.dependencies] @@ -5072,13 +5072,13 @@ pyyaml = "*" [[package]] name = "qdrant-client" -version = "1.11.0" +version = "1.11.1" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.11.0-py3-none-any.whl", hash = "sha256:1f574ccebb91c0bc8a620c9a41a5a010084fbc4d8c6f1cd0ab7b2eeb97336fc0"}, - {file = "qdrant_client-1.11.0.tar.gz", hash = "sha256:7c1d4d7a96cfd1ee0cde2a21c607e9df86bcca795ad8d1fd274d295ab64b8458"}, + {file = "qdrant_client-1.11.1-py3-none-any.whl", hash = "sha256:1375fad77c825c957181ff53775fb900c4383e817f864ea30b2605314da92f07"}, + {file = "qdrant_client-1.11.1.tar.gz", hash = "sha256:bfc23239b027073352ad92152209ec50281519686b7da3041612faece0fcdfbd"}, ] [package.dependencies] @@ -5094,8 +5094,8 @@ pydantic = ">=1.10.8" urllib3 = ">=1.26.14,<3" [package.extras] -fastembed = ["fastembed (==0.3.4)"] -fastembed-gpu = ["fastembed-gpu (==0.3.4)"] +fastembed = ["fastembed (==0.3.6)"] +fastembed-gpu = ["fastembed-gpu (==0.3.6)"] [[package]] name = "readme-renderer" @@ -5292,13 +5292,13 @@ idna2008 = ["idna"] [[package]] name = "rich" -version = "13.7.1" +version = "13.8.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.7.0" files = [ - {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, - {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, + {file = "rich-13.8.0-py3-none-any.whl", hash = "sha256:2e85306a063b9492dffc86278197a60cbece75bcb766022f3436f567cae11bdc"}, + {file = "rich-13.8.0.tar.gz", hash = "sha256:a5ac1f1cd448ade0d59cc3356f7db7a7ccda2c8cbae9c7a90c28ff463d3e91f4"}, ] [package.dependencies] @@ -5855,60 +5855,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.32" +version = "2.0.34" description = "Database Abstraction Library" optional = true python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-win32.whl", hash = "sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-win_amd64.whl", hash = "sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-win32.whl", hash = "sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-win_amd64.whl", hash = "sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-win32.whl", hash = "sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-win_amd64.whl", hash = "sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-win32.whl", hash = "sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-win_amd64.whl", hash = "sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84"}, - {file = "SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202"}, - {file = "SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-win32.whl", hash = "sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b"}, + {file = "SQLAlchemy-2.0.34-cp310-cp310-win_amd64.whl", hash = "sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-win32.whl", hash = "sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721"}, + {file = "SQLAlchemy-2.0.34-cp311-cp311-win_amd64.whl", hash = "sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-win32.whl", hash = "sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580"}, + {file = "SQLAlchemy-2.0.34-cp312-cp312-win_amd64.whl", hash = "sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a"}, + {file = "SQLAlchemy-2.0.34-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021"}, + {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06"}, + {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e"}, + {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba"}, + {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82"}, + {file = "SQLAlchemy-2.0.34-cp37-cp37m-win32.whl", hash = "sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3"}, + {file = "SQLAlchemy-2.0.34-cp37-cp37m-win_amd64.whl", hash = "sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-win32.whl", hash = "sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434"}, + {file = "SQLAlchemy-2.0.34-cp38-cp38-win_amd64.whl", hash = "sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-win32.whl", hash = "sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c"}, + {file = "SQLAlchemy-2.0.34-cp39-cp39-win_amd64.whl", hash = "sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8"}, + {file = "SQLAlchemy-2.0.34-py3-none-any.whl", hash = "sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f"}, + {file = "sqlalchemy-2.0.34.tar.gz", hash = "sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22"}, ] [package.dependencies] @@ -6197,31 +6197,31 @@ files = [ [[package]] name = "torch" -version = "2.4.0" +version = "2.4.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = true python-versions = ">=3.8.0" files = [ - {file = "torch-2.4.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:4ed94583e244af51d6a8d28701ca5a9e02d1219e782f5a01dd401f90af17d8ac"}, - {file = "torch-2.4.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c4ca297b7bd58b506bfd6e78ffd14eb97c0e7797dcd7965df62f50bb575d8954"}, - {file = "torch-2.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:2497cbc7b3c951d69b276ca51fe01c2865db67040ac67f5fc20b03e41d16ea4a"}, - {file = "torch-2.4.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:685418ab93730efbee71528821ff54005596970dd497bf03c89204fb7e3f71de"}, - {file = "torch-2.4.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e743adadd8c8152bb8373543964551a7cb7cc20ba898dc8f9c0cdbe47c283de0"}, - {file = "torch-2.4.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:7334325c0292cbd5c2eac085f449bf57d3690932eac37027e193ba775703c9e6"}, - {file = "torch-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:97730014da4c57ffacb3c09298c6ce05400606e890bd7a05008d13dd086e46b1"}, - {file = "torch-2.4.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f169b4ea6dc93b3a33319611fcc47dc1406e4dd539844dcbd2dec4c1b96e166d"}, - {file = "torch-2.4.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:997084a0f9784d2a89095a6dc67c7925e21bf25dea0b3d069b41195016ccfcbb"}, - {file = "torch-2.4.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:bc3988e8b36d1e8b998d143255d9408d8c75da4ab6dd0dcfd23b623dfb0f0f57"}, - {file = "torch-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:3374128bbf7e62cdaed6c237bfd39809fbcfaa576bee91e904706840c3f2195c"}, - {file = "torch-2.4.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:91aaf00bfe1ffa44dc5b52809d9a95129fca10212eca3ac26420eb11727c6288"}, - {file = "torch-2.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cc30457ea5489c62747d3306438af00c606b509d78822a88f804202ba63111ed"}, - {file = "torch-2.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a046491aaf96d1215e65e1fa85911ef2ded6d49ea34c8df4d0638879f2402eef"}, - {file = "torch-2.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:688eec9240f3ce775f22e1e1a5ab9894f3d5fe60f3f586deb7dbd23a46a83916"}, - {file = "torch-2.4.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:3af4de2a618fb065e78404c4ba27a818a7b7957eaeff28c6c66ce7fb504b68b8"}, - {file = "torch-2.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:618808d3f610d5f180e47a697d4ec90b810953bb1e020f424b2ac7fb0884b545"}, - {file = "torch-2.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ed765d232d23566052ba83632ec73a4fccde00b4c94ad45d63b471b09d63b7a7"}, - {file = "torch-2.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2feb98ac470109472fb10dfef38622a7ee08482a16c357863ebc7bc7db7c8f7"}, - {file = "torch-2.4.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:8940fc8b97a4c61fdb5d46a368f21f4a3a562a17879e932eb51a5ec62310cb31"}, + {file = "torch-2.4.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:362f82e23a4cd46341daabb76fba08f04cd646df9bfaf5da50af97cb60ca4971"}, + {file = "torch-2.4.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:e8ac1985c3ff0f60d85b991954cfc2cc25f79c84545aead422763148ed2759e3"}, + {file = "torch-2.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91e326e2ccfb1496e3bee58f70ef605aeb27bd26be07ba64f37dcaac3d070ada"}, + {file = "torch-2.4.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d36a8ef100f5bff3e9c3cea934b9e0d7ea277cb8210c7152d34a9a6c5830eadd"}, + {file = "torch-2.4.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0b5f88afdfa05a335d80351e3cea57d38e578c8689f751d35e0ff36bce872113"}, + {file = "torch-2.4.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:ef503165f2341942bfdf2bd520152f19540d0c0e34961232f134dc59ad435be8"}, + {file = "torch-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:092e7c2280c860eff762ac08c4bdcd53d701677851670695e0c22d6d345b269c"}, + {file = "torch-2.4.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:ddddbd8b066e743934a4200b3d54267a46db02106876d21cf31f7da7a96f98ea"}, + {file = "torch-2.4.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:fdc4fe11db3eb93c1115d3e973a27ac7c1a8318af8934ffa36b0370efe28e042"}, + {file = "torch-2.4.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:18835374f599207a9e82c262153c20ddf42ea49bc76b6eadad8e5f49729f6e4d"}, + {file = "torch-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:ebea70ff30544fc021d441ce6b219a88b67524f01170b1c538d7d3ebb5e7f56c"}, + {file = "torch-2.4.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:72b484d5b6cec1a735bf3fa5a1c4883d01748698c5e9cfdbeb4ffab7c7987e0d"}, + {file = "torch-2.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c99e1db4bf0c5347107845d715b4aa1097e601bdc36343d758963055e9599d93"}, + {file = "torch-2.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b57f07e92858db78c5b72857b4f0b33a65b00dc5d68e7948a8494b0314efb880"}, + {file = "torch-2.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:f18197f3f7c15cde2115892b64f17c80dbf01ed72b008020e7da339902742cf6"}, + {file = "torch-2.4.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:5fc1d4d7ed265ef853579caf272686d1ed87cebdcd04f2a498f800ffc53dab71"}, + {file = "torch-2.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:40f6d3fe3bae74efcf08cb7f8295eaddd8a838ce89e9d26929d4edd6d5e4329d"}, + {file = "torch-2.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:c9299c16c9743001ecef515536ac45900247f4338ecdf70746f2461f9e4831db"}, + {file = "torch-2.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:6bce130f2cd2d52ba4e2c6ada461808de7e5eccbac692525337cfb4c19421846"}, + {file = "torch-2.4.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:a38de2803ee6050309aac032676536c3d3b6a9804248537e38e098d0e14817ec"}, ] [package.dependencies] @@ -6240,6 +6240,7 @@ nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \" nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +setuptools = "*" sympy = "*" triton = {version = "3.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\""} typing-extensions = ">=4.8.0" @@ -6294,13 +6295,13 @@ gui = ["Gooey (>=1.0.1)"] [[package]] name = "transformers" -version = "4.44.1" +version = "4.44.2" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = true python-versions = ">=3.8.0" files = [ - {file = "transformers-4.44.1-py3-none-any.whl", hash = "sha256:bd2642da18b4e6d29b135c17650cd7ca8e874f2d092d2eddd3ed6b71a93a155c"}, - {file = "transformers-4.44.1.tar.gz", hash = "sha256:3b9a1a07ca65c665c7bf6109b7da76182184d10bb58d9ab14e6892e7b9e073a2"}, + {file = "transformers-4.44.2-py3-none-any.whl", hash = "sha256:1c02c65e7bfa5e52a634aff3da52138b583fc6f263c1f28d547dc144ba3d412d"}, + {file = "transformers-4.44.2.tar.gz", hash = "sha256:36aa17cc92ee154058e426d951684a2dab48751b35b49437896f898931270826"}, ] [package.dependencies] @@ -7007,4 +7008,4 @@ loaders-sql = ["sqlalchemy"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "5c8d23b1fdeb14a1dc7efd7c757a8282a60f6a006cceed741ab99b37467ef57e" +content-hash = "d4386bd485107d602d6369ef9d74cf33f1e7ad69dbe1e041f8142d291dc6c132" diff --git a/pyproject.toml b/pyproject.toml index df3468e53..be4684af4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,13 +40,13 @@ pymongo = { version = "^4.8.0", optional = true } marqo = { version = "^3.7.0", optional = true } redis = { version = "^4.6.0", optional = true } opensearch-py = { version = "^2.3.1", optional = true } -pgvector = { version = "^0.2.3", optional = true } +pgvector = { version = ">=0.2.3,<0.4.0", optional = true } psycopg2-binary = { version = "^2.9.9", optional = true } google-generativeai = { version = "^0.7.2", optional = true } trafilatura = {version = "^1.6", optional = true} playwright = {version = "^1.42", optional = true} beautifulsoup4 = {version = "^4.12.3", optional = true} -markdownify = {version = "^0.11.6", optional = true} +markdownify = {version = ">=0.11.6,<0.14.0", optional = true} voyageai = {version = "^0.2.1", optional = true} elevenlabs = {version = "^1.1.2", optional = true} qdrant-client = { version = "^1.10.1", optional = true } @@ -60,8 +60,8 @@ opentelemetry-api = {version = "^1.25.0", optional = true} opentelemetry-instrumentation = {version = "^0.46b0", optional = true} opentelemetry-instrumentation-threading = {version = "^0.46b0", optional = true} opentelemetry-exporter-otlp-proto-http = {version = "^1.25.0", optional = true} -diffusers = {version = "^0.29.1", optional = true} -accelerate = {version = "^0.32.1", optional = true} +diffusers = {version = ">=0.29.1,<0.31.0", optional = true} +accelerate = {version = ">=0.32.1,<0.35.0", optional = true} sentencepiece = {version = "^0.2.0", optional = true} torch = {version = "^2.3.1", optional = true} From aeaa4f9a3f945d15dafb7320199a05002414197b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 16:32:50 -0700 Subject: [PATCH 25/39] Bump the group-dependencies group with 7 updates (#1141) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Collin Dutter --- docs/griptape-framework/tools/index.md | 2 +- poetry.lock | 92 +++++++++++++------------- pyproject.toml | 4 +- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/docs/griptape-framework/tools/index.md b/docs/griptape-framework/tools/index.md index d97a9347c..f2adc0c97 100644 --- a/docs/griptape-framework/tools/index.md +++ b/docs/griptape-framework/tools/index.md @@ -9,7 +9,7 @@ One of the most powerful features of Griptape is the ability to use tools that c Many of our [Prompt Drivers](../drivers/prompt-drivers.md) leverage the native function calling built into the LLMs. For LLMs that don't support this, Griptape provides its own implementation using the [ReAct](https://arxiv.org/abs/2210.03629) technique. -You can switch between the two strategies by setting `use_native_tools` to `True` (LLM-native tool calling) or `False` (Griptape tool calling) on your [Prompt Driver][../drivers/prompt-drivers.md]. +You can switch between the two strategies by setting `use_native_tools` to `True` (LLM-native tool calling) or `False` (Griptape tool calling) on your [Prompt Driver](../drivers/prompt-drivers.md). ## Tools Here is an example of a Pipeline using Tools: diff --git a/poetry.lock b/poetry.lock index b59a5333e..640b3bab0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -368,13 +368,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.35.11" -description = "Type annotations for boto3 1.35.11 generated with mypy-boto3-builder 8.0.1" +version = "1.35.12" +description = "Type annotations for boto3 1.35.12 generated with mypy-boto3-builder 8.0.1" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs-1.35.11-py3-none-any.whl", hash = "sha256:43611ee8fe11402b78241d76a2866086dc836541ef1332bf558f852bf465ac85"}, - {file = "boto3_stubs-1.35.11.tar.gz", hash = "sha256:c2d803a9a125648afdda5551e108a59f1ce0d70070b7ef39b27c09699b74735a"}, + {file = "boto3_stubs-1.35.12-py3-none-any.whl", hash = "sha256:4287130d0a64cd849f40a6a7a985e6ee46c99d1db3f7a07dc051e63ff91cbccb"}, + {file = "boto3_stubs-1.35.12.tar.gz", hash = "sha256:81699cf3ad36e14d75648a0d9130c28924f5096e23f2d897f8f14741b5909a1c"}, ] [package.dependencies] @@ -430,7 +430,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.35.0,<1.36.0)"] -boto3 = ["boto3 (==1.35.11)", "botocore (==1.35.11)"] +boto3 = ["boto3 (==1.35.12)", "botocore (==1.35.12)"] braket = ["mypy-boto3-braket (>=1.35.0,<1.36.0)"] budgets = ["mypy-boto3-budgets (>=1.35.0,<1.36.0)"] ce = ["mypy-boto3-ce (>=1.35.0,<1.36.0)"] @@ -2978,13 +2978,13 @@ files = [ [[package]] name = "mkdocs" -version = "1.6.0" +version = "1.6.1" description = "Project documentation with Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"}, - {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"}, + {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, + {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, ] [package.dependencies] @@ -3009,13 +3009,13 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-autorefs" -version = "1.1.0" +version = "1.2.0" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_autorefs-1.1.0-py3-none-any.whl", hash = "sha256:492ac42f50214e81565e968f8cb0df9aba9d981542b9e7121b8f8ae9407fe6eb"}, - {file = "mkdocs_autorefs-1.1.0.tar.gz", hash = "sha256:f2fd43b11f66284bd014f9b542a05c8ecbfaad4e0d7b30b68584788217b6c656"}, + {file = "mkdocs_autorefs-1.2.0-py3-none-any.whl", hash = "sha256:d588754ae89bd0ced0c70c06f58566a4ee43471eeeee5202427da7de9ef85a2f"}, + {file = "mkdocs_autorefs-1.2.0.tar.gz", hash = "sha256:a86b93abff653521bda71cf3fc5596342b7a23982093915cb74273f67522190f"}, ] [package.dependencies] @@ -3056,13 +3056,13 @@ pyyaml = ">=5.1" [[package]] name = "mkdocs-glightbox" -version = "0.3.7" +version = "0.4.0" description = "MkDocs plugin supports image lightbox with GLightbox." optional = false python-versions = "*" files = [ - {file = "mkdocs-glightbox-0.3.7.tar.gz", hash = "sha256:4e890140a97dd4ad128cb92174384bd0ac33adec3304bbd2b7c48d0847685c4f"}, - {file = "mkdocs_glightbox-0.3.7-py3-none-any.whl", hash = "sha256:9659631a9829d93d8fb0ce3a20a10261c258605ba4dc87a3b7b5d847b93a276d"}, + {file = "mkdocs-glightbox-0.4.0.tar.gz", hash = "sha256:392b34207bf95991071a16d5f8916d1d2f2cd5d5bb59ae2997485ccd778c70d9"}, + {file = "mkdocs_glightbox-0.4.0-py3-none-any.whl", hash = "sha256:e0107beee75d3eb7380ac06ea2d6eac94c999eaa49f8c3cbab0e7be2ac006ccf"}, ] [[package]] @@ -3081,13 +3081,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.32" +version = "9.5.34" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.32-py3-none-any.whl", hash = "sha256:f3704f46b63d31b3cd35c0055a72280bed825786eccaf19c655b44e0cd2c6b3f"}, - {file = "mkdocs_material-9.5.32.tar.gz", hash = "sha256:38ed66e6d6768dde4edde022554553e48b2db0d26d1320b19e2e2b9da0be1120"}, + {file = "mkdocs_material-9.5.34-py3-none-any.whl", hash = "sha256:54caa8be708de2b75167fd4d3b9f3d949579294f49cb242515d4653dbee9227e"}, + {file = "mkdocs_material-9.5.34.tar.gz", hash = "sha256:1e60ddf716cfb5679dfd65900b8a25d277064ed82d9a53cd5190e3f894df7840"}, ] [package.dependencies] @@ -3135,25 +3135,25 @@ mkdocs = ">=1.2" [[package]] name = "mkdocstrings" -version = "0.25.2" +version = "0.26.0" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.25.2-py3-none-any.whl", hash = "sha256:9e2cda5e2e12db8bb98d21e3410f3f27f8faab685a24b03b06ba7daa5b92abfc"}, - {file = "mkdocstrings-0.25.2.tar.gz", hash = "sha256:5cf57ad7f61e8be3111a2458b4e49c2029c9cb35525393b179f9c916ca8042dc"}, + {file = "mkdocstrings-0.26.0-py3-none-any.whl", hash = "sha256:1aa227fe94f88e80737d37514523aacd473fc4b50a7f6852ce41447ab23f2654"}, + {file = "mkdocstrings-0.26.0.tar.gz", hash = "sha256:ff9d0de28c8fa877ed9b29a42fe407cfe6736d70a1c48177aa84fcc3dc8518cd"}, ] [package.dependencies] click = ">=7.0" importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} Jinja2 = ">=2.11.1" -Markdown = ">=3.3" +Markdown = ">=3.6" MarkupSafe = ">=1.1" mkdocs = ">=1.4" -mkdocs-autorefs = ">=0.3.1" +mkdocs-autorefs = ">=1.2" mkdocstrings-python = {version = ">=0.5.2", optional = true, markers = "extra == \"python\""} -platformdirs = ">=2.2.0" +platformdirs = ">=2.2" pymdown-extensions = ">=6.3" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.10\""} @@ -4809,13 +4809,13 @@ image = ["Pillow (>=8.0.0)"] [[package]] name = "pyright" -version = "1.1.377" +version = "1.1.379" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.377-py3-none-any.whl", hash = "sha256:af0dd2b6b636c383a6569a083f8c5a8748ae4dcde5df7914b3f3f267e14dd162"}, - {file = "pyright-1.1.377.tar.gz", hash = "sha256:aabc30fedce0ded34baa0c49b24f10e68f4bfc8f68ae7f3d175c4b0f256b4fcf"}, + {file = "pyright-1.1.379-py3-none-any.whl", hash = "sha256:01954811ac71db8646f50de1577576dc275ffb891a9e7324350e676cf6df323f"}, + {file = "pyright-1.1.379.tar.gz", hash = "sha256:6f426cb6443786fa966b930c23ad1941c8cb9fe672e4589daea8d80bb34193ea"}, ] [package.dependencies] @@ -5324,29 +5324,29 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.6.1" +version = "0.6.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.1-py3-none-linux_armv6l.whl", hash = "sha256:b4bb7de6a24169dc023f992718a9417380301b0c2da0fe85919f47264fb8add9"}, - {file = "ruff-0.6.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:45efaae53b360c81043e311cdec8a7696420b3d3e8935202c2846e7a97d4edae"}, - {file = "ruff-0.6.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bc60c7d71b732c8fa73cf995efc0c836a2fd8b9810e115be8babb24ae87e0850"}, - {file = "ruff-0.6.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c7477c3b9da822e2db0b4e0b59e61b8a23e87886e727b327e7dcaf06213c5cf"}, - {file = "ruff-0.6.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3a0af7ab3f86e3dc9f157a928e08e26c4b40707d0612b01cd577cc84b8905cc9"}, - {file = "ruff-0.6.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:392688dbb50fecf1bf7126731c90c11a9df1c3a4cdc3f481b53e851da5634fa5"}, - {file = "ruff-0.6.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5278d3e095ccc8c30430bcc9bc550f778790acc211865520f3041910a28d0024"}, - {file = "ruff-0.6.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe6d5f65d6f276ee7a0fc50a0cecaccb362d30ef98a110f99cac1c7872df2f18"}, - {file = "ruff-0.6.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2e0dd11e2ae553ee5c92a81731d88a9883af8db7408db47fc81887c1f8b672e"}, - {file = "ruff-0.6.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d812615525a34ecfc07fd93f906ef5b93656be01dfae9a819e31caa6cfe758a1"}, - {file = "ruff-0.6.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:faaa4060f4064c3b7aaaa27328080c932fa142786f8142aff095b42b6a2eb631"}, - {file = "ruff-0.6.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:99d7ae0df47c62729d58765c593ea54c2546d5de213f2af2a19442d50a10cec9"}, - {file = "ruff-0.6.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9eb18dfd7b613eec000e3738b3f0e4398bf0153cb80bfa3e351b3c1c2f6d7b15"}, - {file = "ruff-0.6.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c62bc04c6723a81e25e71715aa59489f15034d69bf641df88cb38bdc32fd1dbb"}, - {file = "ruff-0.6.1-py3-none-win32.whl", hash = "sha256:9fb4c4e8b83f19c9477a8745e56d2eeef07a7ff50b68a6998f7d9e2e3887bdc4"}, - {file = "ruff-0.6.1-py3-none-win_amd64.whl", hash = "sha256:c2ebfc8f51ef4aca05dad4552bbcf6fe8d1f75b2f6af546cc47cc1c1ca916b5b"}, - {file = "ruff-0.6.1-py3-none-win_arm64.whl", hash = "sha256:3bc81074971b0ffad1bd0c52284b22411f02a11a012082a76ac6da153536e014"}, - {file = "ruff-0.6.1.tar.gz", hash = "sha256:af3ffd8c6563acb8848d33cd19a69b9bfe943667f0419ca083f8ebe4224a3436"}, + {file = "ruff-0.6.3-py3-none-linux_armv6l.whl", hash = "sha256:97f58fda4e309382ad30ede7f30e2791d70dd29ea17f41970119f55bdb7a45c3"}, + {file = "ruff-0.6.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3b061e49b5cf3a297b4d1c27ac5587954ccb4ff601160d3d6b2f70b1622194dc"}, + {file = "ruff-0.6.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:34e2824a13bb8c668c71c1760a6ac7d795ccbd8d38ff4a0d8471fdb15de910b1"}, + {file = "ruff-0.6.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bddfbb8d63c460f4b4128b6a506e7052bad4d6f3ff607ebbb41b0aa19c2770d1"}, + {file = "ruff-0.6.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ced3eeb44df75353e08ab3b6a9e113b5f3f996bea48d4f7c027bc528ba87b672"}, + {file = "ruff-0.6.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47021dff5445d549be954eb275156dfd7c37222acc1e8014311badcb9b4ec8c1"}, + {file = "ruff-0.6.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7d7bd20dc07cebd68cc8bc7b3f5ada6d637f42d947c85264f94b0d1cd9d87384"}, + {file = "ruff-0.6.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:500f166d03fc6d0e61c8e40a3ff853fa8a43d938f5d14c183c612df1b0d6c58a"}, + {file = "ruff-0.6.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42844ff678f9b976366b262fa2d1d1a3fe76f6e145bd92c84e27d172e3c34500"}, + {file = "ruff-0.6.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70452a10eb2d66549de8e75f89ae82462159855e983ddff91bc0bce6511d0470"}, + {file = "ruff-0.6.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:65a533235ed55f767d1fc62193a21cbf9e3329cf26d427b800fdeacfb77d296f"}, + {file = "ruff-0.6.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2e2c23cef30dc3cbe9cc5d04f2899e7f5e478c40d2e0a633513ad081f7361b5"}, + {file = "ruff-0.6.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d8a136aa7d228975a6aee3dd8bea9b28e2b43e9444aa678fb62aeb1956ff2351"}, + {file = "ruff-0.6.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f92fe93bc72e262b7b3f2bba9879897e2d58a989b4714ba6a5a7273e842ad2f8"}, + {file = "ruff-0.6.3-py3-none-win32.whl", hash = "sha256:7a62d3b5b0d7f9143d94893f8ba43aa5a5c51a0ffc4a401aa97a81ed76930521"}, + {file = "ruff-0.6.3-py3-none-win_amd64.whl", hash = "sha256:746af39356fee2b89aada06c7376e1aa274a23493d7016059c3a72e3b296befb"}, + {file = "ruff-0.6.3-py3-none-win_arm64.whl", hash = "sha256:14a9528a8b70ccc7a847637c29e56fd1f9183a9db743bbc5b8e0c4ad60592a82"}, + {file = "ruff-0.6.3.tar.gz", hash = "sha256:183b99e9edd1ef63be34a3b51fee0a9f4ab95add123dbf89a71f7b1f0c991983"}, ] [[package]] @@ -7008,4 +7008,4 @@ loaders-sql = ["sqlalchemy"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "d4386bd485107d602d6369ef9d74cf33f1e7ad69dbe1e041f8142d291dc6c132" +content-hash = "ad31933841926602b18aea04927c1f86d6825d8891495210a28dc1732768499e" diff --git a/pyproject.toml b/pyproject.toml index be4684af4..a0854e0a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -238,8 +238,8 @@ optional = true [tool.poetry.group.docs.dependencies] mkdocs = "^1.5.2" mkdocs-material = "^9.2.8" -mkdocs-glightbox = "^0.3.4" -mkdocstrings = {extras = ["python"], version = "^0.25.2"} +mkdocs-glightbox = ">=0.3.4,<0.5.0" +mkdocstrings = {extras = ["python"], version = ">=0.25.2,<0.27.0"} mkdocs-gen-files = "^0.5.0" mkdocs-literate-nav = "^0.6.0" mkdocs-section-index = "^0.3.6" From a1ad5b7eec7d330bb4a778d5e445f24b2afd0262 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Thu, 5 Sep 2024 09:24:53 -0700 Subject: [PATCH 26/39] Update list of rag modules (#1146) --- docs/griptape-framework/engines/rag-engines.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/griptape-framework/engines/rag-engines.md b/docs/griptape-framework/engines/rag-engines.md index ed71b69c0..688f46ddd 100644 --- a/docs/griptape-framework/engines/rag-engines.md +++ b/docs/griptape-framework/engines/rag-engines.md @@ -27,14 +27,12 @@ RAG modules are used to implement concrete actions in the RAG pipeline. `RagEngi - `TranslateQueryRagModule` is for translating the query into another language. -#### Retrieval Modules -- `TextRetrievalRagModule` is for retrieving text chunks. -- `TextLoaderRetrievalRagModule` is for retrieving data with text loaders in real time. +#### Retrieval/Rerank Modules - `TextChunksRerankRagModule` is for re-ranking retrieved results. +- `TextLoaderRetrievalRagModule` is for retrieving data with text loaders in real time. +- `VectorStoreRetrievalRagModule` is for retrieving text chunks from a vector store. #### Response Modules -- `MetadataBeforeResponseRagModule` is for appending metadata. -- `RulesetsBeforeResponseRagModule` is for appending rulesets. - `PromptResponseRagModule` is for generating responses based on retrieved text chunks. - `TextChunksResponseRagModule` is for responding with retrieved text chunks. - `FootnotePromptResponseRagModule` is for responding with automatic footnotes from text chunk references. From 21c9d21c0db01f964088e7337387b81a9e7c7418 Mon Sep 17 00:00:00 2001 From: billytrend-cohere <144115527+billytrend-cohere@users.noreply.github.com> Date: Thu, 5 Sep 2024 16:32:10 -0500 Subject: [PATCH 27/39] Revert "Filter out cohere's pydantic warning (#1081)" (#1147) --- griptape/drivers/prompt/cohere_prompt_driver.py | 4 ---- poetry.lock | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/griptape/drivers/prompt/cohere_prompt_driver.py b/griptape/drivers/prompt/cohere_prompt_driver.py index 05be5b7f2..ff1a8b482 100644 --- a/griptape/drivers/prompt/cohere_prompt_driver.py +++ b/griptape/drivers/prompt/cohere_prompt_driver.py @@ -1,6 +1,5 @@ from __future__ import annotations -import warnings from typing import TYPE_CHECKING, Any from attrs import Factory, define, field @@ -25,9 +24,6 @@ from griptape.tokenizers import BaseTokenizer, CohereTokenizer from griptape.utils import import_optional_dependency -# TODO Remove once https://github.com/cohere-ai/cohere-python/issues/559 is resolved -warnings.filterwarnings("ignore", module="pydantic") - if TYPE_CHECKING: from collections.abc import Iterator diff --git a/poetry.lock b/poetry.lock index 640b3bab0..ad6effafd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1107,13 +1107,13 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cohere" -version = "5.9.0" +version = "5.9.1" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "cohere-5.9.0-py3-none-any.whl", hash = "sha256:7c70cc9e6ade3355e00aa4a77fcb5662b32261a3237e00975d92b97bb5f3c0c9"}, - {file = "cohere-5.9.0.tar.gz", hash = "sha256:74e5b6e1fed0f617c26dfb8ef1cfccf8334321a51cc886c37374047916d71568"}, + {file = "cohere-5.9.1-py3-none-any.whl", hash = "sha256:8e1e1dde0e1a5ee5a3f22b890e0c927989f9326bab57b6b4be812a40e2565c3a"}, + {file = "cohere-5.9.1.tar.gz", hash = "sha256:de9a828b91481882bf554e94a61ccc52843a09fbc58f53b8e1fa940b17ce1dc9"}, ] [package.dependencies] From dc569b3b3d0a7137dd573ac3a15e2a46c5e01c8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 16:11:17 -0700 Subject: [PATCH 28/39] Bump cryptography from 43.0.0 to 43.0.1 (#1145) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Collin Dutter --- poetry.lock | 58 ++++++++++++++++++++++++++--------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/poetry.lock b/poetry.lock index ad6effafd..a5bcd2444 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1248,38 +1248,38 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "43.0.0" +version = "43.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf"}, - {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55"}, - {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431"}, - {file = "cryptography-43.0.0-cp37-abi3-win32.whl", hash = "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc"}, - {file = "cryptography-43.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778"}, - {file = "cryptography-43.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f"}, - {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0"}, - {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b"}, - {file = "cryptography-43.0.0-cp39-abi3-win32.whl", hash = "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf"}, - {file = "cryptography-43.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1"}, - {file = "cryptography-43.0.0.tar.gz", hash = "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e"}, + {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, + {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, + {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, + {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, + {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, + {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, + {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, ] [package.dependencies] @@ -1292,7 +1292,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.0)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] From 9735d88d18c9bc8507f31a74a743465414d0297d Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Fri, 6 Sep 2024 13:24:25 -0700 Subject: [PATCH 29/39] Don't rerank empty docs (#1153) Co-authored-by: matt --- CHANGELOG.md | 1 + .../drivers/rerank/cohere_rerank_driver.py | 24 +++++++++++-------- .../rerank/test_cohere_rerank_driver.py | 16 +++++++++++++ 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7d833612..6801ccf3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Missing `maxTokens` inference parameter in `AmazonBedrockPromptDriver`. - Incorrect model in `OpenAiDriverConfig`'s `text_to_speech_driver`. - Crash when using `CohereRerankDriver` with `CsvRowArtifact`s. +- Crash when passing "empty" Artifacts or no Artifacts to `CohereRerankDriver`. ## [0.30.2] - 2024-08-26 diff --git a/griptape/drivers/rerank/cohere_rerank_driver.py b/griptape/drivers/rerank/cohere_rerank_driver.py index 5ca03cf63..b6c1d1477 100644 --- a/griptape/drivers/rerank/cohere_rerank_driver.py +++ b/griptape/drivers/rerank/cohere_rerank_driver.py @@ -24,13 +24,17 @@ class CohereRerankDriver(BaseRerankDriver): ) def run(self, query: str, artifacts: list[TextArtifact]) -> list[TextArtifact]: - artifacts_dict = {str(hash(a.to_text())): a for a in artifacts} - response = self.client.rerank( - model=self.model, - query=query, - documents=[a.to_text() for a in artifacts_dict.values()], - return_documents=True, - top_n=self.top_n, - ) - - return [artifacts_dict[str(hash(r.document.text))] for r in response.results] + # Cohere errors out if passed "empty" documents or no documents at all + artifacts_dict = {str(hash(a.to_text())): a for a in artifacts if a} + + if artifacts_dict: + response = self.client.rerank( + model=self.model, + query=query, + documents=[a.to_text() for a in artifacts_dict.values()], + return_documents=True, + top_n=self.top_n, + ) + return [artifacts_dict[str(hash(r.document.text))] for r in response.results] + else: + return [] diff --git a/tests/unit/drivers/rerank/test_cohere_rerank_driver.py b/tests/unit/drivers/rerank/test_cohere_rerank_driver.py index 87a727269..d6f77f552 100644 --- a/tests/unit/drivers/rerank/test_cohere_rerank_driver.py +++ b/tests/unit/drivers/rerank/test_cohere_rerank_driver.py @@ -20,8 +20,24 @@ def mock_client(self, mocker): return mock_client + @pytest.fixture() + def mock_empty_client(self, mocker): + mock_client = mocker.patch("cohere.Client").return_value + mock_client.rerank.side_effect = Exception("Client should not be called") + + return mock_client + def test_run(self, mock_client): driver = CohereRerankDriver(api_key="api-key") result = driver.run("hello", artifacts=[TextArtifact("foo"), TextArtifact("bar")]) assert len(result) == 2 + + def test_run_empty_artifacts(self, mock_empty_client): + driver = CohereRerankDriver(api_key="api-key") + result = driver.run("hello", artifacts=[TextArtifact(""), TextArtifact(" ")]) + + assert len(result) == 0 + + result = driver.run("hello", artifacts=[]) + assert len(result) == 0 From 5b56867660d2fe6f408fc39aa85c9f58f5d983c3 Mon Sep 17 00:00:00 2001 From: Matt Vallillo Date: Mon, 9 Sep 2024 13:18:04 -0400 Subject: [PATCH 30/39] Add `AzureOpenAiTextToSpeechDriver` (#1150) --- .github/workflows/docs-integration-tests.yml | 2 + CHANGELOG.md | 1 + .../drivers/src/text_to_speech_drivers_3.py | 20 ++++++++ .../drivers/text-to-speech-drivers.md | 8 +++ griptape/drivers/__init__.py | 2 + .../azure_openai_text_to_speech_driver.py | 51 +++++++++++++++++++ ...test_azure_openai_text_to_speech_driver.py | 33 ++++++++++++ 7 files changed, 117 insertions(+) create mode 100644 docs/griptape-framework/drivers/src/text_to_speech_drivers_3.py create mode 100644 griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py create mode 100644 tests/unit/drivers/text_to_speech/test_azure_openai_text_to_speech_driver.py diff --git a/.github/workflows/docs-integration-tests.yml b/.github/workflows/docs-integration-tests.yml index 61111b20b..77dab4a5b 100644 --- a/.github/workflows/docs-integration-tests.yml +++ b/.github/workflows/docs-integration-tests.yml @@ -85,6 +85,8 @@ jobs: AZURE_OPENAI_API_KEY_2: ${{ secrets.INTEG_AZURE_OPENAI_API_KEY_2 }} AZURE_OPENAI_ENDPOINT_3: ${{ secrets.INTEG_AZURE_OPENAI_ENDPOINT_3 }} AZURE_OPENAI_API_KEY_3: ${{ secrets.INTEG_AZURE_OPENAI_API_KEY_3 }} + AZURE_OPENAI_ENDPOINT_4: ${{ secrets.INTEG_AZURE_OPENAI_ENDPOINT_4 }} + AZURE_OPENAI_API_KEY_4: ${{ secrets.INTEG_AZURE_OPENAI_API_KEY_4 }} AZURE_OPENAI_35_TURBO_16K_DEPLOYMENT_ID: ${{ secrets.INTEG_OPENAI_35_TURBO_16K_DEPLOYMENT_ID }} AZURE_OPENAI_35_TURBO_DEPLOYMENT_ID: ${{ secrets.INTEG_OPENAI_35_TURBO_DEPLOYMENT_ID }} AZURE_OPENAI_DAVINCI_DEPLOYMENT_ID: ${{ secrets.INTEG_OPENAI_DAVINCI_DEPLOYMENT_ID }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 6801ccf3d..1c18e59b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Parameter `meta: dict` on `BaseEvent`. +- `AzureOpenAiTextToSpeechDriver`. ### Changed - **BREAKING**: Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifacts`. diff --git a/docs/griptape-framework/drivers/src/text_to_speech_drivers_3.py b/docs/griptape-framework/drivers/src/text_to_speech_drivers_3.py new file mode 100644 index 000000000..87add5498 --- /dev/null +++ b/docs/griptape-framework/drivers/src/text_to_speech_drivers_3.py @@ -0,0 +1,20 @@ +import os + +from griptape.drivers import AzureOpenAiTextToSpeechDriver +from griptape.engines import TextToSpeechEngine +from griptape.structures import Agent +from griptape.tools.text_to_speech.tool import TextToSpeechTool + +driver = AzureOpenAiTextToSpeechDriver( + api_key=os.environ["AZURE_OPENAI_API_KEY_4"], + model="tts", + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_4"], +) + +tool = TextToSpeechTool( + engine=TextToSpeechEngine( + text_to_speech_driver=driver, + ), +) + +Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") diff --git a/docs/griptape-framework/drivers/text-to-speech-drivers.md b/docs/griptape-framework/drivers/text-to-speech-drivers.md index c5455914e..a6fb955e6 100644 --- a/docs/griptape-framework/drivers/text-to-speech-drivers.md +++ b/docs/griptape-framework/drivers/text-to-speech-drivers.md @@ -29,3 +29,11 @@ The [OpenAI Text to Speech Driver](../../reference/griptape/drivers/text_to_spee ```python --8<-- "docs/griptape-framework/drivers/src/text_to_speech_drivers_2.py" ``` + +## Azure OpenAI + +The [Azure OpenAI Text to Speech Driver](../../reference/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.md) provides support for text-to-speech models hosted in your Azure OpenAI instance. This Driver supports configurations specific to OpenAI, like voice selection and output format. + +```python +--8<-- "docs/griptape-framework/drivers/src/text_to_speech_drivers_3.py" +``` diff --git a/griptape/drivers/__init__.py b/griptape/drivers/__init__.py index f19ec7d10..7d2de3552 100644 --- a/griptape/drivers/__init__.py +++ b/griptape/drivers/__init__.py @@ -118,6 +118,7 @@ from .text_to_speech.dummy_text_to_speech_driver import DummyTextToSpeechDriver from .text_to_speech.elevenlabs_text_to_speech_driver import ElevenLabsTextToSpeechDriver from .text_to_speech.openai_text_to_speech_driver import OpenAiTextToSpeechDriver +from .text_to_speech.azure_openai_text_to_speech_driver import AzureOpenAiTextToSpeechDriver from .structure_run.base_structure_run_driver import BaseStructureRunDriver from .structure_run.griptape_cloud_structure_run_driver import GriptapeCloudStructureRunDriver @@ -227,6 +228,7 @@ "DummyTextToSpeechDriver", "ElevenLabsTextToSpeechDriver", "OpenAiTextToSpeechDriver", + "AzureOpenAiTextToSpeechDriver", "BaseStructureRunDriver", "GriptapeCloudStructureRunDriver", "LocalStructureRunDriver", diff --git a/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py b/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py new file mode 100644 index 000000000..562a1d637 --- /dev/null +++ b/griptape/drivers/text_to_speech/azure_openai_text_to_speech_driver.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import Callable, Optional + +import openai +from attrs import Factory, define, field + +from griptape.drivers import OpenAiTextToSpeechDriver + + +@define +class AzureOpenAiTextToSpeechDriver(OpenAiTextToSpeechDriver): + """Azure OpenAi Text to Speech Driver. + + Attributes: + azure_deployment: An optional Azure OpenAi deployment id. Defaults to the model name. + azure_endpoint: An Azure OpenAi endpoint. + azure_ad_token: An optional Azure Active Directory token. + azure_ad_token_provider: An optional Azure Active Directory token provider. + api_version: An Azure OpenAi API version. + client: An `openai.AzureOpenAI` client. + """ + + model: str = field(default="tts", kw_only=True, metadata={"serializable": True}) + azure_deployment: str = field( + kw_only=True, + default=Factory(lambda self: self.model, takes_self=True), + metadata={"serializable": True}, + ) + azure_endpoint: str = field(kw_only=True, metadata={"serializable": True}) + azure_ad_token: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": False}) + azure_ad_token_provider: Optional[Callable[[], str]] = field( + kw_only=True, + default=None, + metadata={"serializable": False}, + ) + api_version: str = field(default="2024-07-01-preview", kw_only=True, metadata={"serializable": True}) + client: openai.AzureOpenAI = field( + default=Factory( + lambda self: openai.AzureOpenAI( + organization=self.organization, + api_key=self.api_key, + api_version=self.api_version, + azure_endpoint=self.azure_endpoint, + azure_deployment=self.azure_deployment, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ), + takes_self=True, + ), + ) diff --git a/tests/unit/drivers/text_to_speech/test_azure_openai_text_to_speech_driver.py b/tests/unit/drivers/text_to_speech/test_azure_openai_text_to_speech_driver.py new file mode 100644 index 000000000..5bab87c9e --- /dev/null +++ b/tests/unit/drivers/text_to_speech/test_azure_openai_text_to_speech_driver.py @@ -0,0 +1,33 @@ +from unittest.mock import Mock + +import pytest + +from griptape.drivers import AzureOpenAiTextToSpeechDriver + + +class TestAzureOpenAiTextToSpeechDriver: + @pytest.fixture() + def mock_speech_create(self, mocker): + mock_speech_create = mocker.patch("openai.AzureOpenAI").return_value.audio.speech.create + mock_function = Mock(arguments='{"foo": "bar"}', id="mock-id") + mock_function.name = "MockTool_test" + mock_speech_create.return_value = Mock( + content=b"speech", + ) + + return mock_speech_create + + def test_init(self): + assert AzureOpenAiTextToSpeechDriver(azure_endpoint="foobar", azure_deployment="foobar") + assert AzureOpenAiTextToSpeechDriver(azure_endpoint="foobar").azure_deployment == "tts" + + def test_run_text_to_audio(self, mock_speech_create): + driver = AzureOpenAiTextToSpeechDriver(azure_endpoint="foobar") + output = driver.run_text_to_audio(["foo", "bar"]) + mock_speech_create.assert_called_once_with( + input="foo. bar", + model=driver.model, + response_format=driver.format, + voice=driver.voice, + ) + assert output.value == b"speech" From 46823ca2edd157bf09aeb59d699040d3edd4419e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 11:26:47 -0700 Subject: [PATCH 31/39] Bump the dependencies group with 4 updates (#1156) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/poetry.lock b/poetry.lock index a5bcd2444..23931faa1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "accelerate" -version = "0.34.0" +version = "0.34.2" description = "Accelerate" optional = true python-versions = ">=3.8.0" files = [ - {file = "accelerate-0.34.0-py3-none-any.whl", hash = "sha256:0161fd3f975dd99b5cdb967bb6942bc986d9da466397742008a73290dcb73408"}, - {file = "accelerate-0.34.0.tar.gz", hash = "sha256:437a93f0cb15a7768483833975b5c781f61e31a203439948f1c6b0217e1f74d5"}, + {file = "accelerate-0.34.2-py3-none-any.whl", hash = "sha256:d69159e2c4e4a473d14443b27d2d732929254e826b3ab4813b3785b5ac616c7c"}, + {file = "accelerate-0.34.2.tar.gz", hash = "sha256:98c1ebe1f5a45c0a3af02dc60b5bb8b7d58d60c3326a326a06ce6d956b18ca5b"}, ] [package.dependencies] @@ -349,17 +349,17 @@ lxml = ["lxml"] [[package]] name = "boto3" -version = "1.35.12" +version = "1.35.14" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.35.12-py3-none-any.whl", hash = "sha256:acaa7c75cbf483605e3c46e9ac03043a4cf5e9866940122d68b06d1defe00774"}, - {file = "boto3-1.35.12.tar.gz", hash = "sha256:b32faab174f6f9b75fada27bcf054ab3e8846bd410ed9817d0b511109326b6b1"}, + {file = "boto3-1.35.14-py3-none-any.whl", hash = "sha256:c3e138e9041d59cd34cdc28a587dfdc899dba02ea26ebc3e10fb4bc88e5cf31b"}, + {file = "boto3-1.35.14.tar.gz", hash = "sha256:7bc78d7140c353b10a637927fe4bc4c4d95a464d1b8f515d5844def2ee52cbd5"}, ] [package.dependencies] -botocore = ">=1.35.12,<1.36.0" +botocore = ">=1.35.14,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -780,13 +780,13 @@ xray = ["mypy-boto3-xray (>=1.35.0,<1.36.0)"] [[package]] name = "botocore" -version = "1.35.12" +version = "1.35.14" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.12-py3-none-any.whl", hash = "sha256:cb787030415438ea6ff8381f8acd8b1107593d5ebea457fd843a5e36ba19e9a4"}, - {file = "botocore-1.35.12.tar.gz", hash = "sha256:a8f8230032d090225a93763675a73c208d121bb63ed99f41ee6ad3d51b74b80d"}, + {file = "botocore-1.35.14-py3-none-any.whl", hash = "sha256:24823135232f88266b66ae8e1d0f3d40872c14cd976781f7fe52b8f0d79035a0"}, + {file = "botocore-1.35.14.tar.gz", hash = "sha256:8515a2fc7ca5bcf0b10016ba05ccf2d642b7cb77d8773026ff2fa5aa3bf38d2e"}, ] [package.dependencies] @@ -3697,13 +3697,13 @@ files = [ [[package]] name = "ollama" -version = "0.3.2" +version = "0.3.3" description = "The official Python client for Ollama." optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "ollama-0.3.2-py3-none-any.whl", hash = "sha256:ed2a6f752bd91c49b477d84a259c5657785d7777689d4a27ffe0a4d5b5dd3cae"}, - {file = "ollama-0.3.2.tar.gz", hash = "sha256:7deb3287cdefa1c39cc046163096f8597b83f59ca31a1f8ae78e71eccb7af95f"}, + {file = "ollama-0.3.3-py3-none-any.whl", hash = "sha256:ca6242ce78ab34758082b7392df3f9f6c2cb1d070a9dede1a4c545c929e16dba"}, + {file = "ollama-0.3.3.tar.gz", hash = "sha256:f90a6d61803117f40b0e8ff17465cab5e1eb24758a473cfe8101aff38bc13b51"}, ] [package.dependencies] @@ -3711,13 +3711,13 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "openai" -version = "1.43.0" +version = "1.44.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.43.0-py3-none-any.whl", hash = "sha256:1a748c2728edd3a738a72a0212ba866f4fdbe39c9ae03813508b267d45104abe"}, - {file = "openai-1.43.0.tar.gz", hash = "sha256:e607aff9fc3e28eade107e5edd8ca95a910a4b12589336d3cbb6bfe2ac306b3c"}, + {file = "openai-1.44.0-py3-none-any.whl", hash = "sha256:99a12bbda15f9c632ee911851e101669a82ee34992fbfd658a9db27d90dc0a9c"}, + {file = "openai-1.44.0.tar.gz", hash = "sha256:acde74598976ec85bc477e9abb94eeb17f6efd998914d5685eeb46a69116894a"}, ] [package.dependencies] From e6a04c7b88cf9fa5d6bcf4c833ffebfab89a3258 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 12:55:49 -0700 Subject: [PATCH 32/39] Bump the group-dependencies group with 4 updates (#1157) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Collin Dutter --- poetry.lock | 64 ++++++++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/poetry.lock b/poetry.lock index 23931faa1..647231351 100644 --- a/poetry.lock +++ b/poetry.lock @@ -368,13 +368,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.35.12" -description = "Type annotations for boto3 1.35.12 generated with mypy-boto3-builder 8.0.1" +version = "1.35.14" +description = "Type annotations for boto3 1.35.14 generated with mypy-boto3-builder 8.0.1" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs-1.35.12-py3-none-any.whl", hash = "sha256:4287130d0a64cd849f40a6a7a985e6ee46c99d1db3f7a07dc051e63ff91cbccb"}, - {file = "boto3_stubs-1.35.12.tar.gz", hash = "sha256:81699cf3ad36e14d75648a0d9130c28924f5096e23f2d897f8f14741b5909a1c"}, + {file = "boto3_stubs-1.35.14-py3-none-any.whl", hash = "sha256:c9b3c92b5b9b1278ca03bbb942075c5f9378f4bd26d7bce3ab1068246b088928"}, + {file = "boto3_stubs-1.35.14.tar.gz", hash = "sha256:cfa0d7189862cbd02c6cef1c6ce597728340056687547e8a2c50d2033bf979b6"}, ] [package.dependencies] @@ -430,7 +430,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.35.0,<1.36.0)"] -boto3 = ["boto3 (==1.35.12)", "botocore (==1.35.12)"] +boto3 = ["boto3 (==1.35.14)", "botocore (==1.35.14)"] braket = ["mypy-boto3-braket (>=1.35.0,<1.36.0)"] budgets = ["mypy-boto3-budgets (>=1.35.0,<1.36.0)"] ce = ["mypy-boto3-ce (>=1.35.0,<1.36.0)"] @@ -3135,13 +3135,13 @@ mkdocs = ">=1.2" [[package]] name = "mkdocstrings" -version = "0.26.0" +version = "0.26.1" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.26.0-py3-none-any.whl", hash = "sha256:1aa227fe94f88e80737d37514523aacd473fc4b50a7f6852ce41447ab23f2654"}, - {file = "mkdocstrings-0.26.0.tar.gz", hash = "sha256:ff9d0de28c8fa877ed9b29a42fe407cfe6736d70a1c48177aa84fcc3dc8518cd"}, + {file = "mkdocstrings-0.26.1-py3-none-any.whl", hash = "sha256:29738bfb72b4608e8e55cc50fb8a54f325dc7ebd2014e4e3881a49892d5983cf"}, + {file = "mkdocstrings-0.26.1.tar.gz", hash = "sha256:bb8b8854d6713d5348ad05b069a09f3b79edbc6a0f33a34c6821141adb03fe33"}, ] [package.dependencies] @@ -4882,21 +4882,21 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-env" -version = "1.1.3" +version = "1.1.4" description = "pytest plugin that allows you to add environment variables." optional = false python-versions = ">=3.8" files = [ - {file = "pytest_env-1.1.3-py3-none-any.whl", hash = "sha256:aada77e6d09fcfb04540a6e462c58533c37df35fa853da78707b17ec04d17dfc"}, - {file = "pytest_env-1.1.3.tar.gz", hash = "sha256:fcd7dc23bb71efd3d35632bde1bbe5ee8c8dc4489d6617fb010674880d96216b"}, + {file = "pytest_env-1.1.4-py3-none-any.whl", hash = "sha256:a4212056d4d440febef311a98fdca56c31256d58fb453d103cba4e8a532b721d"}, + {file = "pytest_env-1.1.4.tar.gz", hash = "sha256:86653658da8f11c6844975db955746c458a9c09f1e64957603161e2ff93f5133"}, ] [package.dependencies] -pytest = ">=7.4.3" +pytest = ">=8.3.2" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] -test = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "pytest-mock (>=3.12)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] [[package]] name = "pytest-mock" @@ -5324,29 +5324,29 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.6.3" +version = "0.6.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.3-py3-none-linux_armv6l.whl", hash = "sha256:97f58fda4e309382ad30ede7f30e2791d70dd29ea17f41970119f55bdb7a45c3"}, - {file = "ruff-0.6.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3b061e49b5cf3a297b4d1c27ac5587954ccb4ff601160d3d6b2f70b1622194dc"}, - {file = "ruff-0.6.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:34e2824a13bb8c668c71c1760a6ac7d795ccbd8d38ff4a0d8471fdb15de910b1"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bddfbb8d63c460f4b4128b6a506e7052bad4d6f3ff607ebbb41b0aa19c2770d1"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ced3eeb44df75353e08ab3b6a9e113b5f3f996bea48d4f7c027bc528ba87b672"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47021dff5445d549be954eb275156dfd7c37222acc1e8014311badcb9b4ec8c1"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7d7bd20dc07cebd68cc8bc7b3f5ada6d637f42d947c85264f94b0d1cd9d87384"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:500f166d03fc6d0e61c8e40a3ff853fa8a43d938f5d14c183c612df1b0d6c58a"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42844ff678f9b976366b262fa2d1d1a3fe76f6e145bd92c84e27d172e3c34500"}, - {file = "ruff-0.6.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70452a10eb2d66549de8e75f89ae82462159855e983ddff91bc0bce6511d0470"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:65a533235ed55f767d1fc62193a21cbf9e3329cf26d427b800fdeacfb77d296f"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2e2c23cef30dc3cbe9cc5d04f2899e7f5e478c40d2e0a633513ad081f7361b5"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d8a136aa7d228975a6aee3dd8bea9b28e2b43e9444aa678fb62aeb1956ff2351"}, - {file = "ruff-0.6.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f92fe93bc72e262b7b3f2bba9879897e2d58a989b4714ba6a5a7273e842ad2f8"}, - {file = "ruff-0.6.3-py3-none-win32.whl", hash = "sha256:7a62d3b5b0d7f9143d94893f8ba43aa5a5c51a0ffc4a401aa97a81ed76930521"}, - {file = "ruff-0.6.3-py3-none-win_amd64.whl", hash = "sha256:746af39356fee2b89aada06c7376e1aa274a23493d7016059c3a72e3b296befb"}, - {file = "ruff-0.6.3-py3-none-win_arm64.whl", hash = "sha256:14a9528a8b70ccc7a847637c29e56fd1f9183a9db743bbc5b8e0c4ad60592a82"}, - {file = "ruff-0.6.3.tar.gz", hash = "sha256:183b99e9edd1ef63be34a3b51fee0a9f4ab95add123dbf89a71f7b1f0c991983"}, + {file = "ruff-0.6.4-py3-none-linux_armv6l.whl", hash = "sha256:c4b153fc152af51855458e79e835fb6b933032921756cec9af7d0ba2aa01a258"}, + {file = "ruff-0.6.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:bedff9e4f004dad5f7f76a9d39c4ca98af526c9b1695068198b3bda8c085ef60"}, + {file = "ruff-0.6.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d02a4127a86de23002e694d7ff19f905c51e338c72d8e09b56bfb60e1681724f"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7862f42fc1a4aca1ea3ffe8a11f67819d183a5693b228f0bb3a531f5e40336fc"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eebe4ff1967c838a1a9618a5a59a3b0a00406f8d7eefee97c70411fefc353617"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:932063a03bac394866683e15710c25b8690ccdca1cf192b9a98260332ca93408"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:50e30b437cebef547bd5c3edf9ce81343e5dd7c737cb36ccb4fe83573f3d392e"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c44536df7b93a587de690e124b89bd47306fddd59398a0fb12afd6133c7b3818"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ea086601b22dc5e7693a78f3fcfc460cceabfdf3bdc36dc898792aba48fbad6"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b52387d3289ccd227b62102c24714ed75fbba0b16ecc69a923a37e3b5e0aaaa"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0308610470fcc82969082fc83c76c0d362f562e2f0cdab0586516f03a4e06ec6"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:803b96dea21795a6c9d5bfa9e96127cc9c31a1987802ca68f35e5c95aed3fc0d"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:66dbfea86b663baab8fcae56c59f190caba9398df1488164e2df53e216248baa"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:34d5efad480193c046c86608dbba2bccdc1c5fd11950fb271f8086e0c763a5d1"}, + {file = "ruff-0.6.4-py3-none-win32.whl", hash = "sha256:f0f8968feea5ce3777c0d8365653d5e91c40c31a81d95824ba61d871a11b8523"}, + {file = "ruff-0.6.4-py3-none-win_amd64.whl", hash = "sha256:549daccee5227282289390b0222d0fbee0275d1db6d514550d65420053021a58"}, + {file = "ruff-0.6.4-py3-none-win_arm64.whl", hash = "sha256:ac4b75e898ed189b3708c9ab3fc70b79a433219e1e87193b4f2b77251d058d14"}, + {file = "ruff-0.6.4.tar.gz", hash = "sha256:ac3b5bfbee99973f80aa1b7cbd1c9cbce200883bdd067300c22a6cc1c7fba212"}, ] [[package]] From 9d9b643976f30fdf0af16b75c9b92503b076888e Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Tue, 10 Sep 2024 16:09:27 -0700 Subject: [PATCH 33/39] Removed the `__all__` declaration from the `griptape.mixins` module. (#1164) --- CHANGELOG.md | 1 + griptape/artifacts/action_artifact.py | 2 +- griptape/artifacts/base_artifact.py | 2 +- griptape/common/actions/base_action.py | 2 +- .../contents/base_message_content.py | 2 +- .../prompt_stack/messages/base_message.py | 2 +- griptape/common/prompt_stack/prompt_stack.py | 2 +- griptape/common/reference.py | 2 +- .../configs/drivers/base_drivers_config.py | 2 +- .../base_audio_transcription_driver.py | 3 ++- .../embedding/base_embedding_driver.py | 3 ++- .../base_event_listener_driver.py | 2 +- .../base_image_generation_driver.py | 3 ++- .../base_image_generation_model_driver.py | 2 +- .../image_query/base_image_query_driver.py | 3 ++- .../base_image_query_model_driver.py | 2 +- .../base_conversation_memory_driver.py | 2 +- griptape/drivers/prompt/base_prompt_driver.py | 3 ++- .../base_text_to_speech_driver.py | 3 ++- .../vector/base_vector_store_driver.py | 3 ++- .../engines/rag/modules/base_rag_module.py | 2 +- .../response/prompt_response_rag_module.py | 2 +- griptape/engines/rag/rag_context.py | 2 +- griptape/engines/rag/stages/base_rag_stage.py | 2 +- griptape/events/base_event.py | 2 +- griptape/loaders/base_loader.py | 2 +- griptape/memory/meta/base_meta_entry.py | 2 +- .../structure/base_conversation_memory.py | 2 +- griptape/memory/structure/run.py | 2 +- griptape/memory/task/task_memory.py | 2 +- griptape/mixins/__init__.py | 19 ------------------- griptape/schemas/base_schema.py | 2 +- griptape/structures/workflow.py | 2 +- griptape/tasks/actions_subtask.py | 2 +- griptape/tasks/base_audio_generation_task.py | 3 ++- griptape/tasks/base_audio_input_task.py | 2 +- griptape/tasks/base_image_generation_task.py | 3 ++- griptape/tasks/base_task.py | 2 +- griptape/tasks/prompt_task.py | 2 +- griptape/tasks/tool_task.py | 2 +- griptape/tasks/toolkit_task.py | 2 +- griptape/tools/base_image_generation_tool.py | 2 +- griptape/tools/base_tool.py | 2 +- griptape/tools/extraction/tool.py | 2 +- griptape/tools/text_to_speech/tool.py | 2 +- tests/mocks/mock_futures_executor.py | 2 +- tests/mocks/mock_serializable.py | 2 +- .../test_image_artifact_file_output_mixin.py | 2 +- tests/unit/mixins/test_rule_mixin.py | 2 +- 49 files changed, 57 insertions(+), 66 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c18e59b0..cfb97f0f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING**: `BaseConversationMemoryDriver.load` now returns `tuple[list[Run], dict]`. This represents the runs and metadata. - **BREAKING**: `BaseConversationMemoryDriver.store` now takes `runs: list[Run]` and `metadata: dict` as input. - **BREAKING**: Parameter `file_path` on `LocalConversationMemoryDriver` renamed to `persist_file` and is now type `Optional[str]`. +- **BREAKING**: Removed the `__all__` declaration from the `griptape.mixins` module. - `Defaults.drivers_config.conversation_memory_driver` now defaults to `LocalConversationMemoryDriver` instead of `None`. - `CsvRowArtifact.to_text()` now includes the header. diff --git a/griptape/artifacts/action_artifact.py b/griptape/artifacts/action_artifact.py index a10653078..9772bbbab 100644 --- a/griptape/artifacts/action_artifact.py +++ b/griptape/artifacts/action_artifact.py @@ -5,7 +5,7 @@ from attrs import define, field from griptape.artifacts import BaseArtifact -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.common import ToolAction diff --git a/griptape/artifacts/base_artifact.py b/griptape/artifacts/base_artifact.py index d1e0d34f4..82a0bbd23 100644 --- a/griptape/artifacts/base_artifact.py +++ b/griptape/artifacts/base_artifact.py @@ -7,7 +7,7 @@ from attrs import Factory, define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.common import Reference diff --git a/griptape/common/actions/base_action.py b/griptape/common/actions/base_action.py index abd9abcd4..99c443248 100644 --- a/griptape/common/actions/base_action.py +++ b/griptape/common/actions/base_action.py @@ -1,6 +1,6 @@ from abc import ABC -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin class BaseAction(SerializableMixin, ABC): ... diff --git a/griptape/common/prompt_stack/contents/base_message_content.py b/griptape/common/prompt_stack/contents/base_message_content.py index a0b10fd05..cbd16811b 100644 --- a/griptape/common/prompt_stack/contents/base_message_content.py +++ b/griptape/common/prompt_stack/contents/base_message_content.py @@ -5,7 +5,7 @@ from attrs import define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from collections.abc import Sequence diff --git a/griptape/common/prompt_stack/messages/base_message.py b/griptape/common/prompt_stack/messages/base_message.py index 15bcd9c73..6a0d9522e 100644 --- a/griptape/common/prompt_stack/messages/base_message.py +++ b/griptape/common/prompt_stack/messages/base_message.py @@ -5,7 +5,7 @@ from attrs import Factory, define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.common import BaseDeltaMessageContent, BaseMessageContent diff --git a/griptape/common/prompt_stack/prompt_stack.py b/griptape/common/prompt_stack/prompt_stack.py index c9f71aa20..6d8dfde75 100644 --- a/griptape/common/prompt_stack/prompt_stack.py +++ b/griptape/common/prompt_stack/prompt_stack.py @@ -22,7 +22,7 @@ Message, TextMessageContent, ) -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.tools import BaseTool diff --git a/griptape/common/reference.py b/griptape/common/reference.py index 66a62b83f..637c68318 100644 --- a/griptape/common/reference.py +++ b/griptape/common/reference.py @@ -5,7 +5,7 @@ from attrs import Factory, define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin @define(kw_only=True) diff --git a/griptape/configs/drivers/base_drivers_config.py b/griptape/configs/drivers/base_drivers_config.py index 456249634..0d9f476ab 100644 --- a/griptape/configs/drivers/base_drivers_config.py +++ b/griptape/configs/drivers/base_drivers_config.py @@ -5,7 +5,7 @@ from attrs import define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin from griptape.utils.decorators import lazy_property if TYPE_CHECKING: diff --git a/griptape/drivers/audio_transcription/base_audio_transcription_driver.py b/griptape/drivers/audio_transcription/base_audio_transcription_driver.py index ae46c474c..a79d390d3 100644 --- a/griptape/drivers/audio_transcription/base_audio_transcription_driver.py +++ b/griptape/drivers/audio_transcription/base_audio_transcription_driver.py @@ -6,7 +6,8 @@ from attrs import define, field from griptape.events import EventBus, FinishAudioTranscriptionEvent, StartAudioTranscriptionEvent -from griptape.mixins import ExponentialBackoffMixin, SerializableMixin +from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import AudioArtifact, TextArtifact diff --git a/griptape/drivers/embedding/base_embedding_driver.py b/griptape/drivers/embedding/base_embedding_driver.py index 8998f00e5..2a3533728 100644 --- a/griptape/drivers/embedding/base_embedding_driver.py +++ b/griptape/drivers/embedding/base_embedding_driver.py @@ -7,7 +7,8 @@ from attrs import define, field from griptape.chunkers import BaseChunker, TextChunker -from griptape.mixins import ExponentialBackoffMixin, SerializableMixin +from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import TextArtifact diff --git a/griptape/drivers/event_listener/base_event_listener_driver.py b/griptape/drivers/event_listener/base_event_listener_driver.py index 75bdc9f75..f9cb55dc9 100644 --- a/griptape/drivers/event_listener/base_event_listener_driver.py +++ b/griptape/drivers/event_listener/base_event_listener_driver.py @@ -7,7 +7,7 @@ from attrs import Factory, define, field -from griptape.mixins import FuturesExecutorMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin if TYPE_CHECKING: from griptape.events import BaseEvent diff --git a/griptape/drivers/image_generation/base_image_generation_driver.py b/griptape/drivers/image_generation/base_image_generation_driver.py index 8dfca5945..8c10ce12d 100644 --- a/griptape/drivers/image_generation/base_image_generation_driver.py +++ b/griptape/drivers/image_generation/base_image_generation_driver.py @@ -6,7 +6,8 @@ from attrs import define, field from griptape.events import EventBus, FinishImageGenerationEvent, StartImageGenerationEvent -from griptape.mixins import ExponentialBackoffMixin, SerializableMixin +from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import ImageArtifact diff --git a/griptape/drivers/image_generation_model/base_image_generation_model_driver.py b/griptape/drivers/image_generation_model/base_image_generation_model_driver.py index 9acc62890..9dc5a9b6b 100644 --- a/griptape/drivers/image_generation_model/base_image_generation_model_driver.py +++ b/griptape/drivers/image_generation_model/base_image_generation_model_driver.py @@ -5,7 +5,7 @@ from attrs import define -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import ImageArtifact diff --git a/griptape/drivers/image_query/base_image_query_driver.py b/griptape/drivers/image_query/base_image_query_driver.py index 28c571328..ecfe0ca6e 100644 --- a/griptape/drivers/image_query/base_image_query_driver.py +++ b/griptape/drivers/image_query/base_image_query_driver.py @@ -6,7 +6,8 @@ from attrs import define, field from griptape.events import EventBus, FinishImageQueryEvent, StartImageQueryEvent -from griptape.mixins import ExponentialBackoffMixin, SerializableMixin +from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import ImageArtifact, TextArtifact diff --git a/griptape/drivers/image_query_model/base_image_query_model_driver.py b/griptape/drivers/image_query_model/base_image_query_model_driver.py index 5f60367d5..ac97ee3c1 100644 --- a/griptape/drivers/image_query_model/base_image_query_model_driver.py +++ b/griptape/drivers/image_query_model/base_image_query_model_driver.py @@ -5,7 +5,7 @@ from attrs import define -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import ImageArtifact, TextArtifact diff --git a/griptape/drivers/memory/conversation/base_conversation_memory_driver.py b/griptape/drivers/memory/conversation/base_conversation_memory_driver.py index ea0a171f2..c9963b1eb 100644 --- a/griptape/drivers/memory/conversation/base_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/base_conversation_memory_driver.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.memory.structure import Run diff --git a/griptape/drivers/prompt/base_prompt_driver.py b/griptape/drivers/prompt/base_prompt_driver.py index c07980c9e..778b6f474 100644 --- a/griptape/drivers/prompt/base_prompt_driver.py +++ b/griptape/drivers/prompt/base_prompt_driver.py @@ -17,7 +17,8 @@ observable, ) from griptape.events import CompletionChunkEvent, EventBus, FinishPromptEvent, StartPromptEvent -from griptape.mixins import ExponentialBackoffMixin, SerializableMixin +from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from collections.abc import Iterator diff --git a/griptape/drivers/text_to_speech/base_text_to_speech_driver.py b/griptape/drivers/text_to_speech/base_text_to_speech_driver.py index cb11cc498..b2ad8bc3e 100644 --- a/griptape/drivers/text_to_speech/base_text_to_speech_driver.py +++ b/griptape/drivers/text_to_speech/base_text_to_speech_driver.py @@ -8,7 +8,8 @@ from griptape.events import EventBus from griptape.events.finish_text_to_speech_event import FinishTextToSpeechEvent from griptape.events.start_text_to_speech_event import StartTextToSpeechEvent -from griptape.mixins import ExponentialBackoffMixin, SerializableMixin +from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts.audio_artifact import AudioArtifact diff --git a/griptape/drivers/vector/base_vector_store_driver.py b/griptape/drivers/vector/base_vector_store_driver.py index 2abb29c3f..50810752e 100644 --- a/griptape/drivers/vector/base_vector_store_driver.py +++ b/griptape/drivers/vector/base_vector_store_driver.py @@ -9,7 +9,8 @@ from griptape import utils from griptape.artifacts import BaseArtifact, ListArtifact, TextArtifact -from griptape.mixins import FuturesExecutorMixin, SerializableMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.drivers import BaseEmbeddingDriver diff --git a/griptape/engines/rag/modules/base_rag_module.py b/griptape/engines/rag/modules/base_rag_module.py index 668b3aced..30c66f27c 100644 --- a/griptape/engines/rag/modules/base_rag_module.py +++ b/griptape/engines/rag/modules/base_rag_module.py @@ -7,7 +7,7 @@ from attrs import Factory, define, field from griptape.common import Message, PromptStack -from griptape.mixins import FuturesExecutorMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin if TYPE_CHECKING: from griptape.engines.rag import RagContext diff --git a/griptape/engines/rag/modules/response/prompt_response_rag_module.py b/griptape/engines/rag/modules/response/prompt_response_rag_module.py index 78dfba8f4..fbb8ed7e6 100644 --- a/griptape/engines/rag/modules/response/prompt_response_rag_module.py +++ b/griptape/engines/rag/modules/response/prompt_response_rag_module.py @@ -7,7 +7,7 @@ from griptape.artifacts.text_artifact import TextArtifact from griptape.configs import Defaults from griptape.engines.rag.modules import BaseResponseRagModule -from griptape.mixins import RuleMixin +from griptape.mixins.rule_mixin import RuleMixin from griptape.utils import J2 if TYPE_CHECKING: diff --git a/griptape/engines/rag/rag_context.py b/griptape/engines/rag/rag_context.py index 3dbfc6834..b48fb3acb 100644 --- a/griptape/engines/rag/rag_context.py +++ b/griptape/engines/rag/rag_context.py @@ -5,7 +5,7 @@ from attrs import define, field from griptape import utils -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import BaseArtifact, TextArtifact diff --git a/griptape/engines/rag/stages/base_rag_stage.py b/griptape/engines/rag/stages/base_rag_stage.py index 6a28551b4..dfa2e6002 100644 --- a/griptape/engines/rag/stages/base_rag_stage.py +++ b/griptape/engines/rag/stages/base_rag_stage.py @@ -5,7 +5,7 @@ from griptape.engines.rag import RagContext from griptape.engines.rag.modules import BaseRagModule -from griptape.mixins import FuturesExecutorMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin @define(kw_only=True) diff --git a/griptape/events/base_event.py b/griptape/events/base_event.py index 61443107e..f008c9cd8 100644 --- a/griptape/events/base_event.py +++ b/griptape/events/base_event.py @@ -7,7 +7,7 @@ from attrs import Factory, define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin @define diff --git a/griptape/loaders/base_loader.py b/griptape/loaders/base_loader.py index 525b4df0a..14f9aa10f 100644 --- a/griptape/loaders/base_loader.py +++ b/griptape/loaders/base_loader.py @@ -5,7 +5,7 @@ from attrs import define, field -from griptape.mixins import FuturesExecutorMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin from griptape.utils.futures import execute_futures_dict from griptape.utils.hash import bytes_to_hash, str_to_hash diff --git a/griptape/memory/meta/base_meta_entry.py b/griptape/memory/meta/base_meta_entry.py index c1b253317..d27e79d35 100644 --- a/griptape/memory/meta/base_meta_entry.py +++ b/griptape/memory/meta/base_meta_entry.py @@ -4,7 +4,7 @@ from attrs import define -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin @define diff --git a/griptape/memory/structure/base_conversation_memory.py b/griptape/memory/structure/base_conversation_memory.py index 92f5bd942..e2095b460 100644 --- a/griptape/memory/structure/base_conversation_memory.py +++ b/griptape/memory/structure/base_conversation_memory.py @@ -7,7 +7,7 @@ from griptape.common import PromptStack from griptape.configs import Defaults -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin from griptape.utils import dict_merge if TYPE_CHECKING: diff --git a/griptape/memory/structure/run.py b/griptape/memory/structure/run.py index 5d2a182ad..4be0b587c 100644 --- a/griptape/memory/structure/run.py +++ b/griptape/memory/structure/run.py @@ -5,7 +5,7 @@ from attrs import Factory, define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import BaseArtifact diff --git a/griptape/memory/task/task_memory.py b/griptape/memory/task/task_memory.py index c7f12b233..1aa60dba3 100644 --- a/griptape/memory/task/task_memory.py +++ b/griptape/memory/task/task_memory.py @@ -7,7 +7,7 @@ from griptape.artifacts import BaseArtifact, BlobArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact from griptape.memory.meta import ActionSubtaskMetaEntry from griptape.memory.task.storage import BlobArtifactStorage, TextArtifactStorage -from griptape.mixins import ActivityMixin +from griptape.mixins.activity_mixin import ActivityMixin if TYPE_CHECKING: from griptape.memory.task.storage import BaseArtifactStorage diff --git a/griptape/mixins/__init__.py b/griptape/mixins/__init__.py index 32e00dd8b..e69de29bb 100644 --- a/griptape/mixins/__init__.py +++ b/griptape/mixins/__init__.py @@ -1,19 +0,0 @@ -from .activity_mixin import ActivityMixin -from .exponential_backoff_mixin import ExponentialBackoffMixin -from .actions_subtask_origin_mixin import ActionsSubtaskOriginMixin -from .rule_mixin import RuleMixin -from .serializable_mixin import SerializableMixin -from .media_artifact_file_output_mixin import BlobArtifactFileOutputMixin -from .futures_executor_mixin import FuturesExecutorMixin -from .singleton_mixin import SingletonMixin - -__all__ = [ - "ActivityMixin", - "ExponentialBackoffMixin", - "ActionsSubtaskOriginMixin", - "RuleMixin", - "BlobArtifactFileOutputMixin", - "SerializableMixin", - "FuturesExecutorMixin", - "SingletonMixin", -] diff --git a/griptape/schemas/base_schema.py b/griptape/schemas/base_schema.py index f25e8870b..9290c6098 100644 --- a/griptape/schemas/base_schema.py +++ b/griptape/schemas/base_schema.py @@ -25,7 +25,7 @@ def from_attrs_cls(cls, attrs_cls: type) -> type: """ from marshmallow import post_load - from griptape.mixins import SerializableMixin + from griptape.mixins.serializable_mixin import SerializableMixin class SubSchema(cls): @post_load diff --git a/griptape/structures/workflow.py b/griptape/structures/workflow.py index f1e1ec86b..cd7bef07d 100644 --- a/griptape/structures/workflow.py +++ b/griptape/structures/workflow.py @@ -9,7 +9,7 @@ from griptape.artifacts import ErrorArtifact from griptape.common import observable from griptape.memory.structure import Run -from griptape.mixins import FuturesExecutorMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin from griptape.structures import Structure if TYPE_CHECKING: diff --git a/griptape/tasks/actions_subtask.py b/griptape/tasks/actions_subtask.py index 7cdb5d4de..38a96a603 100644 --- a/griptape/tasks/actions_subtask.py +++ b/griptape/tasks/actions_subtask.py @@ -13,7 +13,7 @@ from griptape.common import ToolAction from griptape.configs import Defaults from griptape.events import EventBus, FinishActionsSubtaskEvent, StartActionsSubtaskEvent -from griptape.mixins import ActionsSubtaskOriginMixin +from griptape.mixins.actions_subtask_origin_mixin import ActionsSubtaskOriginMixin from griptape.tasks import BaseTask from griptape.utils import remove_null_values_in_dict_recursively diff --git a/griptape/tasks/base_audio_generation_task.py b/griptape/tasks/base_audio_generation_task.py index 519a1a59a..fae217d54 100644 --- a/griptape/tasks/base_audio_generation_task.py +++ b/griptape/tasks/base_audio_generation_task.py @@ -6,7 +6,8 @@ from attrs import define from griptape.configs import Defaults -from griptape.mixins import BlobArtifactFileOutputMixin, RuleMixin +from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin +from griptape.mixins.rule_mixin import RuleMixin from griptape.tasks import BaseTask logger = logging.getLogger(Defaults.logging_config.logger_name) diff --git a/griptape/tasks/base_audio_input_task.py b/griptape/tasks/base_audio_input_task.py index e39f70fcd..8a834db56 100644 --- a/griptape/tasks/base_audio_input_task.py +++ b/griptape/tasks/base_audio_input_task.py @@ -8,7 +8,7 @@ from griptape.artifacts.audio_artifact import AudioArtifact from griptape.configs import Defaults -from griptape.mixins import RuleMixin +from griptape.mixins.rule_mixin import RuleMixin from griptape.tasks import BaseTask logger = logging.getLogger(Defaults.logging_config.logger_name) diff --git a/griptape/tasks/base_image_generation_task.py b/griptape/tasks/base_image_generation_task.py index f0c1f0e7e..bd36d0080 100644 --- a/griptape/tasks/base_image_generation_task.py +++ b/griptape/tasks/base_image_generation_task.py @@ -10,7 +10,8 @@ from griptape.configs import Defaults from griptape.loaders import ImageLoader -from griptape.mixins import BlobArtifactFileOutputMixin, RuleMixin +from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin +from griptape.mixins.rule_mixin import RuleMixin from griptape.rules import Rule, Ruleset from griptape.tasks import BaseTask diff --git a/griptape/tasks/base_task.py b/griptape/tasks/base_task.py index 535b3a92d..2c6743035 100644 --- a/griptape/tasks/base_task.py +++ b/griptape/tasks/base_task.py @@ -11,7 +11,7 @@ from griptape.artifacts import ErrorArtifact from griptape.configs import Defaults from griptape.events import EventBus, FinishTaskEvent, StartTaskEvent -from griptape.mixins import FuturesExecutorMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin if TYPE_CHECKING: from griptape.artifacts import BaseArtifact diff --git a/griptape/tasks/prompt_task.py b/griptape/tasks/prompt_task.py index 9c0060039..d2dd20b36 100644 --- a/griptape/tasks/prompt_task.py +++ b/griptape/tasks/prompt_task.py @@ -8,7 +8,7 @@ from griptape.artifacts import BaseArtifact, ListArtifact, TextArtifact from griptape.common import PromptStack from griptape.configs import Defaults -from griptape.mixins import RuleMixin +from griptape.mixins.rule_mixin import RuleMixin from griptape.tasks import BaseTask from griptape.utils import J2 diff --git a/griptape/tasks/tool_task.py b/griptape/tasks/tool_task.py index 6dd5000b3..2dcb796d8 100644 --- a/griptape/tasks/tool_task.py +++ b/griptape/tasks/tool_task.py @@ -8,7 +8,7 @@ from griptape import utils from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact, ListArtifact -from griptape.mixins import ActionsSubtaskOriginMixin +from griptape.mixins.actions_subtask_origin_mixin import ActionsSubtaskOriginMixin from griptape.tasks import ActionsSubtask, PromptTask from griptape.utils import J2 diff --git a/griptape/tasks/toolkit_task.py b/griptape/tasks/toolkit_task.py index ff1194440..ed9860c66 100644 --- a/griptape/tasks/toolkit_task.py +++ b/griptape/tasks/toolkit_task.py @@ -8,7 +8,7 @@ from griptape import utils from griptape.artifacts import ActionArtifact, BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact from griptape.common import PromptStack, ToolAction -from griptape.mixins import ActionsSubtaskOriginMixin +from griptape.mixins.actions_subtask_origin_mixin import ActionsSubtaskOriginMixin from griptape.tasks import ActionsSubtask, PromptTask from griptape.utils import J2 diff --git a/griptape/tools/base_image_generation_tool.py b/griptape/tools/base_image_generation_tool.py index 487c6d1ba..ee1c37b6d 100644 --- a/griptape/tools/base_image_generation_tool.py +++ b/griptape/tools/base_image_generation_tool.py @@ -1,6 +1,6 @@ from attrs import define -from griptape.mixins import BlobArtifactFileOutputMixin +from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin from griptape.tools import BaseTool diff --git a/griptape/tools/base_tool.py b/griptape/tools/base_tool.py index 7c6785649..b846ec40b 100644 --- a/griptape/tools/base_tool.py +++ b/griptape/tools/base_tool.py @@ -16,7 +16,7 @@ from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact, TextArtifact from griptape.common import observable -from griptape.mixins import ActivityMixin +from griptape.mixins.activity_mixin import ActivityMixin if TYPE_CHECKING: from griptape.common import ToolAction diff --git a/griptape/tools/extraction/tool.py b/griptape/tools/extraction/tool.py index 1f6d06b80..3cb46a670 100644 --- a/griptape/tools/extraction/tool.py +++ b/griptape/tools/extraction/tool.py @@ -6,7 +6,7 @@ from schema import Literal, Or, Schema from griptape.artifacts import ErrorArtifact, ListArtifact, TextArtifact -from griptape.mixins import RuleMixin +from griptape.mixins.rule_mixin import RuleMixin from griptape.tools import BaseTool from griptape.utils.decorators import activity diff --git a/griptape/tools/text_to_speech/tool.py b/griptape/tools/text_to_speech/tool.py index 95a42d0ae..ea4982029 100644 --- a/griptape/tools/text_to_speech/tool.py +++ b/griptape/tools/text_to_speech/tool.py @@ -5,7 +5,7 @@ from attrs import define, field from schema import Literal, Schema -from griptape.mixins import BlobArtifactFileOutputMixin +from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin from griptape.tools import BaseTool from griptape.utils.decorators import activity diff --git a/tests/mocks/mock_futures_executor.py b/tests/mocks/mock_futures_executor.py index cbbf84560..30dcfc21e 100644 --- a/tests/mocks/mock_futures_executor.py +++ b/tests/mocks/mock_futures_executor.py @@ -1,4 +1,4 @@ -from griptape.mixins import FuturesExecutorMixin +from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin class MockFuturesExecutor(FuturesExecutorMixin): ... diff --git a/tests/mocks/mock_serializable.py b/tests/mocks/mock_serializable.py index b40ae25b4..9a838f9b5 100644 --- a/tests/mocks/mock_serializable.py +++ b/tests/mocks/mock_serializable.py @@ -4,7 +4,7 @@ from attrs import define, field -from griptape.mixins import SerializableMixin +from griptape.mixins.serializable_mixin import SerializableMixin @define diff --git a/tests/unit/mixins/test_image_artifact_file_output_mixin.py b/tests/unit/mixins/test_image_artifact_file_output_mixin.py index 03c44e081..cf124da39 100644 --- a/tests/unit/mixins/test_image_artifact_file_output_mixin.py +++ b/tests/unit/mixins/test_image_artifact_file_output_mixin.py @@ -4,7 +4,7 @@ import pytest from griptape.artifacts import ImageArtifact -from griptape.mixins import BlobArtifactFileOutputMixin +from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin class TestMediaArtifactFileOutputMixin: diff --git a/tests/unit/mixins/test_rule_mixin.py b/tests/unit/mixins/test_rule_mixin.py index e88014566..393d721a3 100644 --- a/tests/unit/mixins/test_rule_mixin.py +++ b/tests/unit/mixins/test_rule_mixin.py @@ -1,6 +1,6 @@ import pytest -from griptape.mixins import RuleMixin +from griptape.mixins.rule_mixin import RuleMixin from griptape.rules import Rule, Ruleset from griptape.structures import Agent from griptape.tasks import PromptTask From 4bf3d57bea94c36db6aeba60e1eed24d3a3a2b6d Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 11 Sep 2024 13:47:06 -0700 Subject: [PATCH 34/39] Add ability to use EventListener as Context Manager (#1163) --- CHANGELOG.md | 1 + docs/griptape-framework/misc/events.md | 9 +++++++++ .../misc/src/events_context.py | 13 +++++++++++++ griptape/events/event_bus.py | 17 +++++++++++------ griptape/events/event_listener.py | 16 ++++++++++++++++ tests/unit/events/test_event_listener.py | 18 ++++++++++++++++++ 6 files changed, 68 insertions(+), 6 deletions(-) create mode 100644 docs/griptape-framework/misc/src/events_context.py diff --git a/CHANGELOG.md b/CHANGELOG.md index cfb97f0f1..788dd2e23 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Parameter `meta: dict` on `BaseEvent`. - `AzureOpenAiTextToSpeechDriver`. +- Ability to use Event Listeners as Context Managers for temporarily setting the Event Bus listeners. ### Changed - **BREAKING**: Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifacts`. diff --git a/docs/griptape-framework/misc/events.md b/docs/griptape-framework/misc/events.md index 3c4181aee..beb02d66a 100644 --- a/docs/griptape-framework/misc/events.md +++ b/docs/griptape-framework/misc/events.md @@ -73,6 +73,15 @@ Handler 1 ``` +## Context Managers + +You can also use [EventListener](../../reference/griptape/events/event_listener.md)s as a Python Context Manager. +The `EventListener` will automatically be added and removed from the [EventBus](../../reference/griptape/events/event_bus.md) when entering and exiting the context. + +```python +--8<-- "docs/griptape-framework/misc/src/events_context.py" +``` + ## Streaming diff --git a/docs/griptape-framework/misc/src/events_context.py b/docs/griptape-framework/misc/src/events_context.py new file mode 100644 index 000000000..f9597ec15 --- /dev/null +++ b/docs/griptape-framework/misc/src/events_context.py @@ -0,0 +1,13 @@ +from griptape.events import EventBus, EventListener, FinishStructureRunEvent, StartPromptEvent +from griptape.structures import Agent + +EventBus.add_event_listeners( + [EventListener(lambda e: print(f"Out of context: {e.type}"), event_types=[StartPromptEvent])] +) + +agent = Agent(input="Hello!") + +with EventListener(lambda e: print(f"In context: {e.type}"), event_types=[FinishStructureRunEvent]): + agent.run() + +agent.run() diff --git a/griptape/events/event_bus.py b/griptape/events/event_bus.py index 3ddc325ff..b7954480e 100644 --- a/griptape/events/event_bus.py +++ b/griptape/events/event_bus.py @@ -1,8 +1,9 @@ from __future__ import annotations +import threading from typing import TYPE_CHECKING -from attrs import define, field +from attrs import Factory, define, field from griptape.mixins.singleton_mixin import SingletonMixin @@ -13,6 +14,7 @@ @define class _EventBus(SingletonMixin): _event_listeners: list[EventListener] = field(factory=list, kw_only=True, alias="_event_listeners") + _thread_lock: threading.Lock = field(default=Factory(lambda: threading.Lock()), alias="_thread_lock") @property def event_listeners(self) -> list[EventListener]: @@ -26,21 +28,24 @@ def remove_event_listeners(self, event_listeners: list[EventListener]) -> None: self.remove_event_listener(event_listener) def add_event_listener(self, event_listener: EventListener) -> EventListener: - if event_listener not in self._event_listeners: - self._event_listeners.append(event_listener) + with self._thread_lock: + if event_listener not in self._event_listeners: + self._event_listeners.append(event_listener) return event_listener def remove_event_listener(self, event_listener: EventListener) -> None: - if event_listener in self._event_listeners: - self._event_listeners.remove(event_listener) + with self._thread_lock: + if event_listener in self._event_listeners: + self._event_listeners.remove(event_listener) def publish_event(self, event: BaseEvent, *, flush: bool = False) -> None: for event_listener in self._event_listeners: event_listener.publish_event(event, flush=flush) def clear_event_listeners(self) -> None: - self._event_listeners.clear() + with self._thread_lock: + self._event_listeners.clear() EventBus = _EventBus() diff --git a/griptape/events/event_listener.py b/griptape/events/event_listener.py index 74171d375..1fad4a1de 100644 --- a/griptape/events/event_listener.py +++ b/griptape/events/event_listener.py @@ -16,6 +16,22 @@ class EventListener: event_types: Optional[list[type[BaseEvent]]] = field(default=None, kw_only=True) driver: Optional[BaseEventListenerDriver] = field(default=None, kw_only=True) + _last_event_listeners: Optional[list[EventListener]] = field(default=None) + + def __enter__(self) -> EventListener: + from griptape.events import EventBus + + EventBus.add_event_listener(self) + + return self + + def __exit__(self, type, value, traceback) -> None: # noqa: ANN001, A002 + from griptape.events import EventBus + + EventBus.remove_event_listener(self) + + self._last_event_listeners = None + def publish_event(self, event: BaseEvent, *, flush: bool = False) -> None: event_types = self.event_types diff --git a/tests/unit/events/test_event_listener.py b/tests/unit/events/test_event_listener.py index a6d90d4fc..f35bc5416 100644 --- a/tests/unit/events/test_event_listener.py +++ b/tests/unit/events/test_event_listener.py @@ -135,3 +135,21 @@ def event_handler(event: BaseEvent): event_listener.publish_event(mock_event) mock_event_listener_driver.publish_event.assert_called_once_with({"event": mock_event.to_dict()}, flush=False) + + def test_context_manager(self): + e1 = EventListener() + EventBus.add_event_listeners([e1]) + + with EventListener() as e2: + assert EventBus.event_listeners == [e1, e2] + + assert EventBus.event_listeners == [e1] + + def test_context_manager_multiple(self): + e1 = EventListener() + EventBus.add_event_listener(e1) + + with EventListener() as e2, EventListener() as e3: + assert EventBus.event_listeners == [e1, e2, e3] + + assert EventBus.event_listeners == [e1] From 2ae50b3c826b2c41788c42bc581ba5b2fb96fd69 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 11 Sep 2024 14:15:45 -0700 Subject: [PATCH 35/39] Add JsonSchemaRule (#1165) --- CHANGELOG.md | 1 + .../griptape-framework/structures/rulesets.md | 62 ++++++++++++++++++- .../structures/src/basic_rule.py | 13 ++++ .../structures/src/json_schema_rule.py | 18 ++++++ .../src/json_schema_rule_pydantic.py | 22 +++++++ griptape/mixins/rule_mixin.py | 6 +- griptape/rules/__init__.py | 4 +- griptape/rules/base_rule.py | 17 +++++ griptape/rules/json_schema_rule.py | 17 +++++ griptape/rules/rule.py | 11 +++- griptape/rules/ruleset.py | 6 +- griptape/structures/structure.py | 4 +- griptape/templates/rules/json_schema.j2 | 1 + griptape/templates/rulesets/rulesets.j2 | 2 +- tests/unit/rules/test_json_schema_rule.py | 32 ++++++++++ tests/unit/rules/test_rule.py | 8 +++ 16 files changed, 209 insertions(+), 15 deletions(-) create mode 100644 docs/griptape-framework/structures/src/basic_rule.py create mode 100644 docs/griptape-framework/structures/src/json_schema_rule.py create mode 100644 docs/griptape-framework/structures/src/json_schema_rule_pydantic.py create mode 100644 griptape/rules/base_rule.py create mode 100644 griptape/rules/json_schema_rule.py create mode 100644 griptape/templates/rules/json_schema.j2 create mode 100644 tests/unit/rules/test_json_schema_rule.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 788dd2e23..25387aafd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Parameter `meta: dict` on `BaseEvent`. - `AzureOpenAiTextToSpeechDriver`. - Ability to use Event Listeners as Context Managers for temporarily setting the Event Bus listeners. +- `JsonSchemaRule` for instructing the LLM to output a JSON object that conforms to a schema. ### Changed - **BREAKING**: Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifacts`. diff --git a/docs/griptape-framework/structures/rulesets.md b/docs/griptape-framework/structures/rulesets.md index d69b085ac..a0773856f 100644 --- a/docs/griptape-framework/structures/rulesets.md +++ b/docs/griptape-framework/structures/rulesets.md @@ -5,8 +5,66 @@ search: ## Overview -A [Ruleset](../../reference/griptape/rules/ruleset.md) can be used to define rules for [Structures](../structures/agents.md) and [Tasks](../structures/tasks.md). -Rulesets can be used to shape personality, format output, restrict topics, and more. +A [Ruleset](../../reference/griptape/rules/ruleset.md) can be used to define [Rule](../../reference/griptape/rules/base_rule.md)s for [Structures](../structures/agents.md) and [Tasks](../structures/tasks.md). Griptape places Rules into the LLM's system prompt for strong control over the output. + +## Types of Rules + +### Rule + +[Rule](../../reference/griptape/rules/base_rule.md)s shape the LLM's behavior by defining specific guidelines or instructions for how it should interpret and respond to inputs. Rules can be used to modify language style, tone, or even behavior based on what you define. + +```python +--8<-- "docs/griptape-framework/structures/src/basic_rule.py" +``` + +``` +[09/10/24 14:41:52] INFO PromptTask b7b23a88ea9e4cd0befb7e7a4ed596b0 + Input: Hi there! How are you? + INFO PromptTask b7b23a88ea9e4cd0befb7e7a4ed596b0 + Output: Ahoy, matey! I be doing just fine, thank ye fer askin'. How be the winds blowin' in yer sails today? +``` + +### Json Schema + +[JsonSchemaRule](../../reference/griptape/rules/json_schema_rule.md)s defines a structured format for the LLM's output by providing a JSON schema. +This is particularly useful when you need the LLM to return well-formed data, such as JSON objects, with specific fields and data types. + +!!! warning + `JsonSchemaRule` may break [ToolkitTask](../structures/tasks.md#toolkittask) which relies on a specific [output token](https://github.com/griptape-ai/griptape/blob/e6a04c7b88cf9fa5d6bcf4c833ffebfab89a3258/griptape/tasks/toolkit_task.py#L28). + + +```python +--8<-- "docs/griptape-framework/structures/src/json_schema_rule.py" +``` + +``` +[09/10/24 14:44:53] INFO PromptTask fb26dd41803443c0b51c3d861626e07a + Input: What is the sentiment of this message?: 'I am so happy!' +[09/10/24 14:44:54] INFO PromptTask fb26dd41803443c0b51c3d861626e07a + Output: { + "answer": "The sentiment of the message is positive.", + "relevant_emojis": ["😊", "😃"] + } +``` + +Although Griptape leverages the `schema` library, you're free to use any JSON schema generation library to define your schema! + +For example, using `pydantic`: + +```python +--8<-- "docs/griptape-framework/structures/src/json_schema_rule_pydantic.py" +``` + +``` +[09/11/24 09:45:58] INFO PromptTask eae43f52829c4289a6cca9ee7950e075 + Input: What is the sentiment of this message?: 'I am so happy!' + INFO PromptTask eae43f52829c4289a6cca9ee7950e075 + Output: { + "answer": "The sentiment of the message is positive.", + "relevant_emojis": ["😊", "😄"] + } +answer='The sentiment of the message is positive.' relevant_emojis=['😊', '😄'] +``` ## Structure diff --git a/docs/griptape-framework/structures/src/basic_rule.py b/docs/griptape-framework/structures/src/basic_rule.py new file mode 100644 index 000000000..75511f514 --- /dev/null +++ b/docs/griptape-framework/structures/src/basic_rule.py @@ -0,0 +1,13 @@ +from griptape.rules import Rule, Ruleset +from griptape.structures import Agent + +pipeline = Agent( + rulesets=[ + Ruleset( + name="Personality", + rules=[Rule("Talk like a pirate.")], + ), + ] +) + +pipeline.run("Hi there! How are you?") diff --git a/docs/griptape-framework/structures/src/json_schema_rule.py b/docs/griptape-framework/structures/src/json_schema_rule.py new file mode 100644 index 000000000..1f78de928 --- /dev/null +++ b/docs/griptape-framework/structures/src/json_schema_rule.py @@ -0,0 +1,18 @@ +import json + +import schema + +from griptape.rules.json_schema_rule import JsonSchemaRule +from griptape.structures import Agent + +agent = Agent( + rules=[ + JsonSchemaRule( + schema.Schema({"answer": str, "relevant_emojis": schema.Schema(["str"])}).json_schema("Output Format") + ) + ] +) + +output = agent.run("What is the sentiment of this message?: 'I am so happy!'").output + +print(json.dumps(json.loads(output.value), indent=2)) diff --git a/docs/griptape-framework/structures/src/json_schema_rule_pydantic.py b/docs/griptape-framework/structures/src/json_schema_rule_pydantic.py new file mode 100644 index 000000000..bfbcf7cf3 --- /dev/null +++ b/docs/griptape-framework/structures/src/json_schema_rule_pydantic.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +import pydantic + +from griptape.rules.json_schema_rule import JsonSchemaRule +from griptape.structures import Agent + + +class SentimentModel(pydantic.BaseModel): + answer: str + relevant_emojis: list[str] + + +agent = Agent(rules=[JsonSchemaRule(SentimentModel.model_json_schema())]) + +output = agent.run("What is the sentiment of this message?: 'I am so happy!'").output + +sentiment_analysis = SentimentModel.model_validate_json(output.value) + +# Autocomplete via dot notation 🤩 +print(sentiment_analysis.answer) +print(sentiment_analysis.relevant_emojis) diff --git a/griptape/mixins/rule_mixin.py b/griptape/mixins/rule_mixin.py index ff4395270..7fe6a6346 100644 --- a/griptape/mixins/rule_mixin.py +++ b/griptape/mixins/rule_mixin.py @@ -4,7 +4,7 @@ from attrs import Attribute, define, field -from griptape.rules import Rule, Ruleset +from griptape.rules import BaseRule, Ruleset if TYPE_CHECKING: from griptape.structures import Structure @@ -16,7 +16,7 @@ class RuleMixin: ADDITIONAL_RULESET_NAME = "Additional Ruleset" rulesets: list[Ruleset] = field(factory=list, kw_only=True) - rules: list[Rule] = field(factory=list, kw_only=True) + rules: list[BaseRule] = field(factory=list, kw_only=True) structure: Optional[Structure] = field(default=None, kw_only=True) @rulesets.validator # pyright: ignore[reportAttributeAccessIssue] @@ -28,7 +28,7 @@ def validate_rulesets(self, _: Attribute, rulesets: list[Ruleset]) -> None: raise ValueError("Can't have both rulesets and rules specified.") @rules.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_rules(self, _: Attribute, rules: list[Rule]) -> None: + def validate_rules(self, _: Attribute, rules: list[BaseRule]) -> None: if not rules: return diff --git a/griptape/rules/__init__.py b/griptape/rules/__init__.py index 4becdc1e5..a2e8ae08b 100644 --- a/griptape/rules/__init__.py +++ b/griptape/rules/__init__.py @@ -1,5 +1,7 @@ +from griptape.rules.base_rule import BaseRule from griptape.rules.rule import Rule +from griptape.rules.json_schema_rule import JsonSchemaRule from griptape.rules.ruleset import Ruleset -__all__ = ["Rule", "Ruleset"] +__all__ = ["BaseRule", "Rule", "JsonSchemaRule", "Ruleset"] diff --git a/griptape/rules/base_rule.py b/griptape/rules/base_rule.py new file mode 100644 index 000000000..190fc71e4 --- /dev/null +++ b/griptape/rules/base_rule.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any + +from attrs import define, field + + +@define(frozen=True) +class BaseRule(ABC): + value: Any = field() + + def __str__(self) -> str: + return self.to_text() + + @abstractmethod + def to_text(self) -> str: ... diff --git a/griptape/rules/json_schema_rule.py b/griptape/rules/json_schema_rule.py new file mode 100644 index 000000000..1bd418464 --- /dev/null +++ b/griptape/rules/json_schema_rule.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +import json + +from attrs import define, field + +from griptape.rules import BaseRule +from griptape.utils import J2 + + +@define(frozen=True) +class JsonSchemaRule(BaseRule): + value: dict = field() + template_generator: J2 = field(default=J2("rules/json_schema.j2")) + + def to_text(self) -> str: + return self.template_generator.render(json_schema=json.dumps(self.value)) diff --git a/griptape/rules/rule.py b/griptape/rules/rule.py index 1063d174e..952770adf 100644 --- a/griptape/rules/rule.py +++ b/griptape/rules/rule.py @@ -1,8 +1,13 @@ from __future__ import annotations -from attrs import define +from attrs import define, field + +from griptape.rules import BaseRule @define(frozen=True) -class Rule: - value: str +class Rule(BaseRule): + value: str = field() + + def to_text(self) -> str: + return self.value diff --git a/griptape/rules/ruleset.py b/griptape/rules/ruleset.py index 1f158411a..eec1203f9 100644 --- a/griptape/rules/ruleset.py +++ b/griptape/rules/ruleset.py @@ -1,14 +1,14 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Sequence from attrs import define, field if TYPE_CHECKING: - from griptape.rules import Rule + from griptape.rules import BaseRule @define class Ruleset: name: str = field() - rules: list[Rule] = field() + rules: Sequence[BaseRule] = field() diff --git a/griptape/structures/structure.py b/griptape/structures/structure.py index 63ba02373..b066c336e 100644 --- a/griptape/structures/structure.py +++ b/griptape/structures/structure.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: from griptape.artifacts import BaseArtifact from griptape.memory.structure import BaseConversationMemory - from griptape.rules import Rule, Ruleset + from griptape.rules import BaseRule, Rule, Ruleset from griptape.tasks import BaseTask @@ -23,7 +23,7 @@ class Structure(ABC): id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True) rulesets: list[Ruleset] = field(factory=list, kw_only=True) - rules: list[Rule] = field(factory=list, kw_only=True) + rules: list[BaseRule] = field(factory=list, kw_only=True) tasks: list[BaseTask] = field(factory=list, kw_only=True) conversation_memory: Optional[BaseConversationMemory] = field( default=Factory(lambda: ConversationMemory()), diff --git a/griptape/templates/rules/json_schema.j2 b/griptape/templates/rules/json_schema.j2 new file mode 100644 index 000000000..9a351c1cd --- /dev/null +++ b/griptape/templates/rules/json_schema.j2 @@ -0,0 +1 @@ +You must respond with a JSON object that successfully validates against the following schema: {{json_schema}} diff --git a/griptape/templates/rulesets/rulesets.j2 b/griptape/templates/rulesets/rulesets.j2 index 1f58aa811..5b149adbc 100644 --- a/griptape/templates/rulesets/rulesets.j2 +++ b/griptape/templates/rulesets/rulesets.j2 @@ -6,7 +6,7 @@ Ruleset name: {{ ruleset.name }} "{{ ruleset.name }}" rules: {% for rule in ruleset.rules %} Rule #{{loop.index}} -{{ rule.value }} +{{ rule.to_text() }} {% endfor %} {% endfor %} diff --git a/tests/unit/rules/test_json_schema_rule.py b/tests/unit/rules/test_json_schema_rule.py new file mode 100644 index 000000000..a1a4f2361 --- /dev/null +++ b/tests/unit/rules/test_json_schema_rule.py @@ -0,0 +1,32 @@ +import json + +import schema + +from griptape.rules import JsonSchemaRule + + +class TestJsonSchemaRule: + def test_init(self): + json_schema = schema.Schema({"type": "string"}).json_schema("test") + rule = JsonSchemaRule(json_schema) + assert rule.value == { + "type": "object", + "properties": {"type": {"const": "string"}}, + "required": ["type"], + "additionalProperties": False, + "$id": "test", + "$schema": "http://json-schema.org/draft-07/schema#", + } + + def test_to_text(self): + json_schema = schema.Schema({"type": "string"}).json_schema("test") + rule = JsonSchemaRule(json_schema) + assert ( + rule.to_text() + == f"You must respond with a JSON object that successfully validates against the following schema: {json.dumps(json_schema)}" + ) + + def test___str__(self): + json_schema = schema.Schema({"type": "string"}).json_schema("test") + rule = JsonSchemaRule(json_schema) + assert str(rule) == rule.to_text() diff --git a/tests/unit/rules/test_rule.py b/tests/unit/rules/test_rule.py index f3bbf4664..9afce8dd5 100644 --- a/tests/unit/rules/test_rule.py +++ b/tests/unit/rules/test_rule.py @@ -5,3 +5,11 @@ class TestRule: def test_init(self): rule = Rule("foobar") assert rule.value == "foobar" + + def test_to_text(self): + rule = Rule("foobar") + assert rule.to_text() == "foobar" + + def test___str__(self): + rule = Rule("foobar") + assert str(rule) == "foobar" From c6d2f9e1ad68144b9c6d0fe1f8658b6428d6b520 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 11 Sep 2024 15:33:26 -0700 Subject: [PATCH 36/39] Drivers Config Context Manager (#1162) --- CHANGELOG.md | 1 + docs/griptape-framework/structures/configs.md | 15 ++++++++- .../structures/src/config_defaults.py | 12 +++++++ .../structures/src/drivers_config_with.py | 31 +++++++++++++++++++ .../configs/drivers/base_drivers_config.py | 21 ++++++++++++- .../configs/drivers/test_drivers_config.py | 11 +++++++ 6 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 docs/griptape-framework/structures/src/config_defaults.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_with.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 25387aafd..1fed1b200 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `AzureOpenAiTextToSpeechDriver`. - Ability to use Event Listeners as Context Managers for temporarily setting the Event Bus listeners. - `JsonSchemaRule` for instructing the LLM to output a JSON object that conforms to a schema. +- Ability to use Drivers Configs as Context Managers for temporarily setting the default Drivers. ### Changed - **BREAKING**: Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifacts`. diff --git a/docs/griptape-framework/structures/configs.md b/docs/griptape-framework/structures/configs.md index 2a9b5c62d..e192af79d 100644 --- a/docs/griptape-framework/structures/configs.md +++ b/docs/griptape-framework/structures/configs.md @@ -5,7 +5,14 @@ search: ## Overview -Griptape exposes global configuration options to easily customize different parts of the framework. +Griptape exposes a global singleton, [Defaults](../../reference/griptape/configs/defaults_config.md), which can be used to access and modify the default configurations of the framework. + +To update the default configurations, simply update the fields on the `Defaults` object. +Framework objects will be created with the currently set default configurations, but you can always override at the individual class level. + +```python +--8<-- "docs/griptape-framework/structures/src/config_defaults.py" +``` ### Drivers Configs @@ -13,6 +20,12 @@ The [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) Griptape provides predefined [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md)'s for widely used services that provide APIs for most Driver types Griptape offers. +`DriversConfig`s can be used as a Python Context Manager using the `with` statement to temporarily change the default configurations for a block of code. + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_with.py" +``` + #### OpenAI The [OpenAI Driver config](../../reference/griptape/configs/drivers/openai_drivers_config.md) provides default Drivers for OpenAI's APIs. This is the default config for all Structures. diff --git a/docs/griptape-framework/structures/src/config_defaults.py b/docs/griptape-framework/structures/src/config_defaults.py new file mode 100644 index 000000000..01deec3a9 --- /dev/null +++ b/docs/griptape-framework/structures/src/config_defaults.py @@ -0,0 +1,12 @@ +from griptape.configs import Defaults +from griptape.configs.drivers import AnthropicDriversConfig, OpenAiDriversConfig +from griptape.drivers.prompt.anthropic_prompt_driver import AnthropicPromptDriver +from griptape.structures import Agent + +Defaults.drivers_config = OpenAiDriversConfig() # Default +openai_agent = Agent() + +Defaults.drivers_config = AnthropicDriversConfig() +anthropic_agent = Agent( + prompt_driver=AnthropicPromptDriver(model="claude-3-5-sonnet-20240620"), # Override the default prompt driver +) diff --git a/docs/griptape-framework/structures/src/drivers_config_with.py b/docs/griptape-framework/structures/src/drivers_config_with.py new file mode 100644 index 000000000..65fab3f38 --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_with.py @@ -0,0 +1,31 @@ +import schema + +from griptape.configs.drivers import AnthropicDriversConfig, OpenAiDriversConfig +from griptape.drivers import AnthropicPromptDriver, OpenAiChatPromptDriver +from griptape.engines import JsonExtractionEngine +from griptape.structures import Agent +from griptape.tasks import ToolTask +from griptape.tools import ExtractionTool + +with OpenAiDriversConfig(): # Agent will be created with OpenAi Drivers + openai_agent = Agent() + +with AnthropicDriversConfig(): # Agent will be created with Anthropic Drivers + anthropic_agent = Agent( + tasks=[ + ToolTask( + "Extract sentiment from this text: {{ args[0] }}", + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"), # Override this particular Task's prompt driver + tool=ExtractionTool( + extraction_engine=JsonExtractionEngine( + prompt_driver=AnthropicPromptDriver( # Override this particular Engine's prompt driver + model="claude-3-opus-20240229" + ), + template_schema=schema.Schema({"sentiment": str}).json_schema("Output"), + ), + ), + ) + ] + ) + +anthropic_agent.run("Hello, I am happy!") diff --git a/griptape/configs/drivers/base_drivers_config.py b/griptape/configs/drivers/base_drivers_config.py index 0d9f476ab..ceead5c84 100644 --- a/griptape/configs/drivers/base_drivers_config.py +++ b/griptape/configs/drivers/base_drivers_config.py @@ -1,7 +1,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional from attrs import define, field @@ -48,6 +48,25 @@ class BaseDriversConfig(ABC, SerializableMixin): default=None, kw_only=True, metadata={"serializable": True}, alias="audio_transcription_driver" ) + _last_drivers_config: Optional[BaseDriversConfig] = field(default=None) + + def __enter__(self) -> BaseDriversConfig: + from griptape.configs import Defaults + + self._last_drivers_config = Defaults.drivers_config + + Defaults.drivers_config = self + + return self + + def __exit__(self, type, value, traceback) -> None: # noqa: ANN001, A002 + from griptape.configs import Defaults + + if self._last_drivers_config is not None: + Defaults.drivers_config = self._last_drivers_config + + self._last_drivers_config = None + @lazy_property() @abstractmethod def prompt_driver(self) -> BasePromptDriver: ... diff --git a/tests/unit/configs/drivers/test_drivers_config.py b/tests/unit/configs/drivers/test_drivers_config.py index e2476c437..de7edc9ae 100644 --- a/tests/unit/configs/drivers/test_drivers_config.py +++ b/tests/unit/configs/drivers/test_drivers_config.py @@ -1,6 +1,7 @@ import pytest from griptape.configs.drivers import DriversConfig +from tests.mocks.mock_drivers_config import MockDriversConfig class TestDriversConfig: @@ -41,6 +42,16 @@ def test_dot_update(self, config): assert config.prompt_driver.max_tokens == 10 + def test_context_manager(self): + from griptape.configs import Defaults + + old_drivers_config = Defaults.drivers_config + + with MockDriversConfig() as config: + assert Defaults.drivers_config == config + + assert Defaults.drivers_config == old_drivers_config + @pytest.mark.skip_mock_config() def test_lazy_init(self): from griptape.configs import Defaults From 01c8e7ddeb0bf9d3ec28735cd30f0c85f2f5570f Mon Sep 17 00:00:00 2001 From: Matt Vallillo Date: Thu, 12 Sep 2024 14:48:59 -0400 Subject: [PATCH 37/39] Add AzureOpenAiTextToSpeech driver in config (#1169) --- .../configs/drivers/azure_openai_drivers_config.py | 11 +++++++++++ .../drivers/test_azure_openai_drivers_config.py | 12 +++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/griptape/configs/drivers/azure_openai_drivers_config.py b/griptape/configs/drivers/azure_openai_drivers_config.py index a29ba3c2f..3ced8f9cd 100644 --- a/griptape/configs/drivers/azure_openai_drivers_config.py +++ b/griptape/configs/drivers/azure_openai_drivers_config.py @@ -10,6 +10,7 @@ AzureOpenAiEmbeddingDriver, AzureOpenAiImageGenerationDriver, AzureOpenAiImageQueryDriver, + AzureOpenAiTextToSpeechDriver, LocalVectorStoreDriver, ) from griptape.utils.decorators import lazy_property @@ -92,3 +93,13 @@ def vector_store_driver(self) -> LocalVectorStoreDriver: azure_ad_token_provider=self.azure_ad_token_provider, ) ) + + @lazy_property() + def text_to_speech_driver(self) -> AzureOpenAiTextToSpeechDriver: + return AzureOpenAiTextToSpeechDriver( + model="tts", + azure_endpoint=self.azure_endpoint, + api_key=self.api_key, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) diff --git a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py index 01886962e..83b9dd77c 100644 --- a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py +++ b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py @@ -85,6 +85,16 @@ def test_to_dict(self, config): }, "type": "LocalVectorStoreDriver", }, - "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, + "text_to_speech_driver": { + "base_url": None, + "format": "mp3", + "model": "tts", + "api_version": "2024-07-01-preview", + "azure_deployment": "tts", + "azure_endpoint": "http://localhost:8080", + "organization": None, + "type": "AzureOpenAiTextToSpeechDriver", + "voice": "alloy", + }, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, } From 86100db3c300df865042681328753d5894fb366e Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Fri, 13 Sep 2024 11:35:13 -0700 Subject: [PATCH 38/39] Show mixing and matching Drivers in custom example (#1168) --- docs/griptape-framework/structures/src/drivers_config_7.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/griptape-framework/structures/src/drivers_config_7.py b/docs/griptape-framework/structures/src/drivers_config_7.py index 3b1d396ce..efec4f3cf 100644 --- a/docs/griptape-framework/structures/src/drivers_config_7.py +++ b/docs/griptape-framework/structures/src/drivers_config_7.py @@ -2,14 +2,15 @@ from griptape.configs import Defaults from griptape.configs.drivers import DriversConfig -from griptape.drivers import AnthropicPromptDriver +from griptape.drivers import AnthropicPromptDriver, OpenAiEmbeddingDriver from griptape.structures import Agent Defaults.drivers_config = DriversConfig( prompt_driver=AnthropicPromptDriver( model="claude-3-sonnet-20240229", api_key=os.environ["ANTHROPIC_API_KEY"], - ) + ), + embedding_driver=OpenAiEmbeddingDriver(), ) From 37d558240c0774862974a8acb5bc2b1af1579c08 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Fri, 13 Sep 2024 15:31:59 -0700 Subject: [PATCH 39/39] Refactor Artifacts (#1114) --- CHANGELOG.md | 21 +++ MIGRATION.md | 136 ++++++++++++++++++ docs/griptape-framework/data/artifacts.md | 56 +++----- docs/griptape-framework/data/loaders.md | 6 +- griptape/artifacts/__init__.py | 4 - griptape/artifacts/action_artifact.py | 13 +- griptape/artifacts/audio_artifact.py | 24 +++- griptape/artifacts/base_artifact.py | 38 ++--- griptape/artifacts/blob_artifact.py | 29 ++-- griptape/artifacts/boolean_artifact.py | 19 ++- griptape/artifacts/csv_row_artifact.py | 34 ----- griptape/artifacts/error_artifact.py | 11 +- griptape/artifacts/generic_artifact.py | 10 +- griptape/artifacts/image_artifact.py | 27 ++-- griptape/artifacts/info_artifact.py | 12 +- griptape/artifacts/json_artifact.py | 20 ++- griptape/artifacts/list_artifact.py | 33 +++-- griptape/artifacts/media_artifact.py | 53 ------- griptape/artifacts/text_artifact.py | 24 ++-- griptape/common/prompt_stack/prompt_stack.py | 6 +- .../amazon_bedrock_image_generation_driver.py | 12 +- ...ngface_pipeline_image_generation_driver.py | 4 +- .../leonardo_image_generation_driver.py | 12 +- .../openai_image_generation_driver.py | 3 +- .../extraction/csv_extraction_engine.py | 16 ++- griptape/loaders/csv_loader.py | 15 +- griptape/loaders/dataframe_loader.py | 15 +- griptape/loaders/sql_loader.py | 15 +- ...mixin.py => artifact_file_output_mixin.py} | 8 +- griptape/schemas/base_schema.py | 6 +- griptape/tasks/base_audio_generation_task.py | 4 +- griptape/tasks/base_image_generation_task.py | 9 +- griptape/tasks/tool_task.py | 6 +- griptape/tools/base_image_generation_tool.py | 4 +- griptape/tools/query/tool.py | 4 +- griptape/tools/text_to_speech/tool.py | 4 +- tests/unit/artifacts/test_action_artifact.py | 4 - tests/unit/artifacts/test_audio_artifact.py | 14 +- tests/unit/artifacts/test_base_artifact.py | 8 +- .../artifacts/test_base_media_artifact.py | 30 ---- tests/unit/artifacts/test_blob_artifact.py | 27 ++-- tests/unit/artifacts/test_boolean_artifact.py | 11 ++ tests/unit/artifacts/test_csv_row_artifact.py | 30 ---- tests/unit/artifacts/test_image_artifact.py | 13 +- tests/unit/artifacts/test_json_artifact.py | 14 +- tests/unit/artifacts/test_list_artifact.py | 12 +- tests/unit/artifacts/test_text_artifact.py | 5 + ...table_diffusion_image_generation_driver.py | 4 +- ...st_azure_openai_image_generation_driver.py | 9 +- .../test_leonardo_image_generation_driver.py | 4 +- .../test_openai_image_generation_driver.py | 10 +- .../test_base_local_vector_store_driver.py | 15 -- .../extraction/test_csv_extraction_engine.py | 6 +- tests/unit/loaders/test_audio_loader.py | 6 +- tests/unit/loaders/test_csv_loader.py | 29 ++-- tests/unit/loaders/test_dataframe_loader.py | 52 ------- tests/unit/loaders/test_image_loader.py | 19 ++- tests/unit/loaders/test_sql_loader.py | 17 +-- tests/unit/memory/tool/test_task_memory.py | 6 +- .../test_image_artifact_file_output_mixin.py | 12 +- tests/unit/tasks/test_extraction_task.py | 2 +- tests/unit/tools/test_extraction_tool.py | 4 +- tests/unit/tools/test_file_manager.py | 25 +--- .../test_inpainting_image_generation_tool.py | 8 +- .../test_outpainting_image_variation_tool.py | 8 +- .../test_prompt_image_generation_tool.py | 5 +- tests/unit/tools/test_sql_tool.py | 2 +- tests/unit/tools/test_text_to_speech_tool.py | 3 +- .../test_variation_image_generation_tool.py | 4 +- 69 files changed, 574 insertions(+), 557 deletions(-) delete mode 100644 griptape/artifacts/csv_row_artifact.py delete mode 100644 griptape/artifacts/media_artifact.py rename griptape/mixins/{media_artifact_file_output_mixin.py => artifact_file_output_mixin.py} (87%) delete mode 100644 tests/unit/artifacts/test_base_media_artifact.py delete mode 100644 tests/unit/artifacts/test_csv_row_artifact.py delete mode 100644 tests/unit/loaders/test_dataframe_loader.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 1fed1b200..4516c59b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +### Added +- `BaseArtifact.to_bytes()` method to convert an Artifact's value to bytes. +- `BlobArtifact.base64` property for converting a `BlobArtifact`'s value to a base64 string. +- `CsvLoader`/`SqlLoader`/`DataframeLoader` `formatter_fn` field for customizing how SQL results are formatted into `TextArtifact`s. + +### Changed +- **BREAKING**: Removed `CsvRowArtifact`. Use `TextArtifact` instead. +- **BREAKING**: Removed `MediaArtifact`, use `ImageArtifact` or `AudioArtifact` instead. +- **BREAKING**: `CsvLoader`, `DataframeLoader`, and `SqlLoader` now return `list[TextArtifact]`. +- **BREAKING**: Removed `ImageArtifact.media_type`. +- **BREAKING**: Removed `AudioArtifact.media_type`. +- **BREAKING**: Removed `BlobArtifact.dir_name`. +- **BREAKING**: Moved `ImageArtifact.prompt` and `ImageArtifact.model` into `ImageArtifact.meta`. +- **BREAKING**: `ImageArtifact.format` is now required. +- Updated `JsonArtifact` value converter to properly handle more types. +- `AudioArtifact` now subclasses `BlobArtifact` instead of `MediaArtifact`. +- `ImageArtifact` now subclasses `BlobArtifact` instead of `MediaArtifact`. +- Removed `__add__` method from `BaseArtifact`, implemented it where necessary. +- Generic type support to `ListArtifact`. +- Iteration support to `ListArtifact`. + ## [0.31.0] - 2024-09-03 **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. diff --git a/MIGRATION.md b/MIGRATION.md index af8835e5b..016a93f03 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,6 +1,142 @@ # Migration Guide This document provides instructions for migrating your codebase to accommodate breaking changes introduced in new versions of Griptape. +## 0.31.X to 0.32.X + +### Removed `MediaArtifact` + +`MediaArtifact` has been removed. Use `ImageArtifact` or `AudioArtifact` instead. + +#### Before + +```python +image_media = MediaArtifact( + b"image_data", + media_type="image", + format="jpeg" +) + +audio_media = MediaArtifact( + b"audio_data", + media_type="audio", + format="wav" +) +``` + +#### After +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg" +) + +audio_artifact = AudioArtifact( + b"audio_data", + format="wav" +) +``` + +### `ImageArtifact.format` is now required + +`ImageArtifact.format` is now a required parameter. Update any code that does not provide a `format` parameter. + +#### Before + +```python +image_artifact = ImageArtifact( + b"image_data" +) +``` + +#### After +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg" +) +``` + +### Removed `CsvRowArtifact` + +`CsvRowArtifact` has been removed. Use `TextArtifact` instead. + +#### Before + +```python +artifact = CsvRowArtifact({"name": "John", "age": 30}) +print(artifact.value) # {"name": "John", "age": 30} +print(type(artifact.value)) # +``` + +#### After +```python +artifact = TextArtifact("name: John\nage: 30") +print(artifact.value) # name: John\nage: 30 +print(type(artifact.value)) # +``` + +If you require storing a dictionary as an Artifact, you can use `GenericArtifact` instead. + +### `CsvLoader`, `DataframeLoader`, and `SqlLoader` return types + +`CsvLoader`, `DataframeLoader`, and `SqlLoader` now return a `list[TextArtifact]` instead of `list[CsvRowArtifact]`. + +If you require a dictionary, set a custom `formatter_fn` and then parse the text to a dictionary. + +#### Before + +```python +results = CsvLoader().load(Path("people.csv").read_text()) + +print(results[0].value) # {"name": "John", "age": 30} +print(type(results[0].value)) # +``` + +#### After +```python +results = CsvLoader().load(Path("people.csv").read_text()) + +print(results[0].value) # name: John\nAge: 30 +print(type(results[0].value)) # + +# Customize formatter_fn +results = CsvLoader(formatter_fn=lambda x: json.dumps(x)).load(Path("people.csv").read_text()) +print(results[0].value) # {"name": "John", "age": 30} +print(type(results[0].value)) # + +dict_results = [json.loads(result.value) for result in results] +print(dict_results[0]) # {"name": "John", "age": 30} +print(type(dict_results[0])) # +``` + +### Moved `ImageArtifact.prompt` and `ImageArtifact.model` to `ImageArtifact.meta` + +`ImageArtifact.prompt` and `ImageArtifact.model` have been moved to `ImageArtifact.meta`. + +#### Before + +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg", + prompt="Generate an image of a cat", + model="DALL-E" +) + +print(image_artifact.prompt, image_artifact.model) # Generate an image of a cat, DALL-E +``` + +#### After +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg", + meta={"prompt": "Generate an image of a cat", "model": "DALL-E"} +) + +print(image_artifact.meta["prompt"], image_artifact.meta["model"]) # Generate an image of a cat, DALL-E +``` + ## 0.30.X to 0.31.X diff --git a/docs/griptape-framework/data/artifacts.md b/docs/griptape-framework/data/artifacts.md index 8c4da02b3..2edd1ebec 100644 --- a/docs/griptape-framework/data/artifacts.md +++ b/docs/griptape-framework/data/artifacts.md @@ -5,60 +5,50 @@ search: ## Overview -**[Artifacts](../../reference/griptape/artifacts/base_artifact.md)** are used for passing different types of data between Griptape components. All tools return artifacts that are later consumed by tasks and task memory. -Artifacts make sure framework components enforce contracts when passing and consuming data. +**[Artifacts](../../reference/griptape/artifacts/base_artifact.md)** are the core data structure in Griptape. They are used to encapsulate data and enhance it with metadata. ## Text -A [TextArtifact](../../reference/griptape/artifacts/text_artifact.md) for passing text data of arbitrary size around the framework. It can be used to count tokens with [token_count()](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.token_count) with a tokenizer. -It can also be used to generate a text embedding with [generate_embedding()](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.generate_embedding) -and access it with [embedding](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.embedding). +[TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s store textual data. They offer methods such as [token_count()](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.token_count) for counting tokens with a tokenizer, and [generate_embedding()](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.generate_embedding) for creating text embeddings. You can also access the embedding via the [embedding](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.embedding) property. -[TaskMemory](../../reference/griptape/memory/task/task_memory.md) automatically stores [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s returned by tool activities and returns artifact IDs back to the LLM. +When `TextArtifact`s are returned from Tools, they will be stored in [Task Memory](../../griptape-framework/structures/task-memory.md) if the Tool has set `off_prompt=True`. -## Csv Row +## Blob -A [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md) for passing structured row data around the framework. It inherits from [TextArtifact](../../reference/griptape/artifacts/text_artifact.md) and overrides the -[to_text()](../../reference/griptape/artifacts/csv_row_artifact.md#griptape.artifacts.csv_row_artifact.CsvRowArtifact.to_text) method, which always returns a valid CSV row. +[BlobArtifact](../../reference/griptape/artifacts/blob_artifact.md)s store binary large objects (blobs). -## Info +When `BlobArtifact`s are returned from Tools, they will be stored in [Task Memory](../../griptape-framework/structures/task-memory.md) if the Tool has set `off_prompt=True`. -An [InfoArtifact](../../reference/griptape/artifacts/info_artifact.md) for passing short notifications back to the LLM without task memory storing them. +### Image -## Error +[ImageArtifact](../../reference/griptape/artifacts/image_artifact.md)s store image data. This includes binary image data along with metadata such as MIME type and dimensions. They are a subclass of [BlobArtifacts](#blob). -An [ErrorArtifact](../../reference/griptape/artifacts/error_artifact.md) is used for passing errors back to the LLM without task memory storing them. +### Audio -## Blob +[AudioArtifact](../../reference/griptape/artifacts/audio_artifact.md)s store audio content. This includes binary audio data and metadata such as format, and duration. They are a subclass of [BlobArtifacts](#blob). -A [BlobArtifact](../../reference/griptape/artifacts/blob_artifact.md) for passing binary large objects (blobs) back to the LLM. -Treat it as a way to return unstructured data, such as images, videos, audio, and other files back from tools. -Each blob has a [name](../../reference/griptape/artifacts/base_artifact.md#griptape.artifacts.base_artifact.BaseArtifact.name) and -[dir](../../reference/griptape/artifacts/blob_artifact.md#griptape.artifacts.blob_artifact.BlobArtifact.dir_name) to uniquely identify stored objects. +## List -[TaskMemory](../../reference/griptape/memory/task/task_memory.md) automatically stores [BlobArtifact](../../reference/griptape/artifacts/blob_artifact.md)s returned by tool activities that can be reused by other tools. +[ListArtifact](../../reference/griptape/artifacts/list_artifact.md)s store lists of Artifacts. -## Image +When `ListArtifact`s are returned from Tools, their elements will be stored in [Task Memory](../../griptape-framework/structures/task-memory.md) if the element is either a `TextArtifact` or a `BlobArtifact` and the Tool has set `off_prompt=True`. -An [ImageArtifact](../../reference/griptape/artifacts/image_artifact.md) is used for passing images back to the LLM. In addition to binary image data, an Image Artifact includes image metadata like MIME type, dimensions, and prompt and model information for images returned by [image generation Drivers](../drivers/image-generation-drivers.md). It inherits from [BlobArtifact](#blob). +## Info -## Audio +[InfoArtifact](../../reference/griptape/artifacts/info_artifact.md)s store small pieces of textual information. These are useful for conveying messages about the execution or results of an operation, such as "No results found" or "Operation completed successfully." -An [AudioArtifact](../../reference/griptape/artifacts/audio_artifact.md) allows the Framework to interact with audio content. An Audio Artifact includes binary audio content as well as metadata like format, duration, and prompt and model information for audio returned generative models. It inherits from [BlobArtifact](#blob). +## JSON -## Boolean +[JsonArtifact](../../reference/griptape/artifacts/json_artifact.md)s store JSON-serializable data. Any data assigned to the `value` property is processed using `json.dumps(json.loads(value))`. -A [BooleanArtifact](../../reference/griptape/artifacts/boolean_artifact.md) is used for passing boolean values around the framework. +## Error -!!! info - Any object passed on init to `BooleanArtifact` will be coerced into a `bool` type. This might lead to unintended behavior: `BooleanArtifact("False").value is True`. Use [BooleanArtifact.parse_bool](../../reference/griptape/artifacts/boolean_artifact.md#griptape.artifacts.boolean_artifact.BooleanArtifact.parse_bool) to convert case-insensitive string literal values `"True"` and `"False"` into a `BooleanArtifact`: `BooleanArtifact.parse_bool("False").value is False`. +[ErrorArtifact](../../reference/griptape/artifacts/error_artifact.md)s store exception information, providing a structured way to convey errors. -## Generic +## Action -A [GenericArtifact](../../reference/griptape/artifacts/generic_artifact.md) can be used as an escape hatch for passing any type of data around the framework. -It is generally not recommended to use this Artifact type, but it can be used in a handful of situations where no other Artifact type fits the data being passed. -See [talking to a video](../../examples/talk-to-a-video.md) for an example of using a `GenericArtifact` to pass a Gemini-specific video file. +[ActionArtifact](../../reference/griptape/artifacts/action_artifact.md)s represent actions taken by an LLM. Currently, the only supported action type is [ToolAction](../../reference/griptape/common/actions/tool_action.md), which is used to execute a [Tool](../../griptape-framework/tools/index.md). -## Json +## Generic -A [JsonArtifact](../../reference/griptape/artifacts/json_artifact.md) is used for passing JSON-serliazable data around the framework. Anything passed to `value` will be converted using `json.dumps(json.loads(value))`. +[GenericArtifact](../../reference/griptape/artifacts/generic_artifact.md)s provide a flexible way to pass data that does not fit into any other artifact category. While not generally recommended, they can be useful for specific use cases. For instance, see [talking to a video](../../examples/talk-to-a-video.md), which demonstrates using a `GenericArtifact` to pass a Gemini-specific video file. diff --git a/docs/griptape-framework/data/loaders.md b/docs/griptape-framework/data/loaders.md index 914fdee2a..0c0fc3ead 100644 --- a/docs/griptape-framework/data/loaders.md +++ b/docs/griptape-framework/data/loaders.md @@ -22,7 +22,7 @@ Inherits from the [TextLoader](../../reference/griptape/loaders/text_loader.md) ## SQL -Can be used to load data from a SQL database into [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md)s: +Can be used to load data from a SQL database into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s: ```python --8<-- "docs/griptape-framework/data/src/loaders_2.py" @@ -30,7 +30,7 @@ Can be used to load data from a SQL database into [CsvRowArtifact](../../referen ## CSV -Can be used to load CSV files into [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md)s: +Can be used to load CSV files into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s: ```python --8<-- "docs/griptape-framework/data/src/loaders_3.py" @@ -42,7 +42,7 @@ Can be used to load CSV files into [CsvRowArtifact](../../reference/griptape/art !!! info This driver requires the `loaders-dataframe` [extra](../index.md#extras). -Can be used to load [pandas](https://pandas.pydata.org/) [DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)s into [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md)s: +Can be used to load [pandas](https://pandas.pydata.org/) [DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)s into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s: ```python --8<-- "docs/griptape-framework/data/src/loaders_4.py" diff --git a/griptape/artifacts/__init__.py b/griptape/artifacts/__init__.py index f39bfea8d..0e58a8a76 100644 --- a/griptape/artifacts/__init__.py +++ b/griptape/artifacts/__init__.py @@ -5,9 +5,7 @@ from .json_artifact import JsonArtifact from .blob_artifact import BlobArtifact from .boolean_artifact import BooleanArtifact -from .csv_row_artifact import CsvRowArtifact from .list_artifact import ListArtifact -from .media_artifact import MediaArtifact from .image_artifact import ImageArtifact from .audio_artifact import AudioArtifact from .action_artifact import ActionArtifact @@ -22,9 +20,7 @@ "JsonArtifact", "BlobArtifact", "BooleanArtifact", - "CsvRowArtifact", "ListArtifact", - "MediaArtifact", "ImageArtifact", "AudioArtifact", "ActionArtifact", diff --git a/griptape/artifacts/action_artifact.py b/griptape/artifacts/action_artifact.py index 9772bbbab..d882d0638 100644 --- a/griptape/artifacts/action_artifact.py +++ b/griptape/artifacts/action_artifact.py @@ -5,15 +5,20 @@ from attrs import define, field from griptape.artifacts import BaseArtifact -from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.common import ToolAction @define() -class ActionArtifact(BaseArtifact, SerializableMixin): +class ActionArtifact(BaseArtifact): + """Represents the LLM taking an action to use a Tool. + + Attributes: + value: The Action to take. Currently only supports ToolAction. + """ + value: ToolAction = field(metadata={"serializable": True}) - def __add__(self, other: BaseArtifact) -> ActionArtifact: - raise NotImplementedError + def to_text(self) -> str: + return str(self.value) diff --git a/griptape/artifacts/audio_artifact.py b/griptape/artifacts/audio_artifact.py index 3dc67fa36..e9e38858a 100644 --- a/griptape/artifacts/audio_artifact.py +++ b/griptape/artifacts/audio_artifact.py @@ -1,12 +1,26 @@ from __future__ import annotations -from attrs import define +from attrs import define, field -from griptape.artifacts import MediaArtifact +from griptape.artifacts import BlobArtifact @define -class AudioArtifact(MediaArtifact): - """AudioArtifact is a type of MediaArtifact representing audio.""" +class AudioArtifact(BlobArtifact): + """Stores audio data. - media_type: str = "audio" + Attributes: + format: The audio format, e.g. "wav" or "mp3". + """ + + format: str = field(kw_only=True, metadata={"serializable": True}) + + @property + def mime_type(self) -> str: + return f"audio/{self.format}" + + def to_bytes(self) -> bytes: + return self.value + + def to_text(self) -> str: + return f"Audio, format: {self.format}, size: {len(self.value)} bytes" diff --git a/griptape/artifacts/base_artifact.py b/griptape/artifacts/base_artifact.py index 82a0bbd23..61989ab54 100644 --- a/griptape/artifacts/base_artifact.py +++ b/griptape/artifacts/base_artifact.py @@ -1,6 +1,5 @@ from __future__ import annotations -import json import uuid from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any, Optional @@ -15,6 +14,20 @@ @define class BaseArtifact(SerializableMixin, ABC): + """Serves as the base class for all Artifacts. + + Artifacts are used to encapsulate data and enhance it with metadata. + + Attributes: + id: The unique identifier of the Artifact. Defaults to a random UUID. + reference: The optional Reference to the Artifact. + meta: The metadata associated with the Artifact. Defaults to an empty dictionary. + name: The name of the Artifact. Defaults to the id. + value: The value of the Artifact. + encoding: The encoding to use when encoding/decoding the value. + encoding_error_handler: The error handler to use when encoding/decoding the value. + """ + id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={"serializable": True}) reference: Optional[Reference] = field(default=None, kw_only=True, metadata={"serializable": True}) meta: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) @@ -24,22 +37,8 @@ class BaseArtifact(SerializableMixin, ABC): metadata={"serializable": True}, ) value: Any = field() - - @classmethod - def value_to_bytes(cls, value: Any) -> bytes: - if isinstance(value, bytes): - return value - else: - return str(value).encode() - - @classmethod - def value_to_dict(cls, value: Any) -> dict: - dict_value = value if isinstance(value, dict) else json.loads(value) - - return dict(dict_value.items()) - - def to_text(self) -> str: - return str(self.value) + encoding_error_handler: str = field(default="strict", kw_only=True) + encoding: str = field(default="utf-8", kw_only=True) def __str__(self) -> str: return self.to_text() @@ -50,5 +49,8 @@ def __bool__(self) -> bool: def __len__(self) -> int: return len(self.value) + def to_bytes(self) -> bytes: + return self.to_text().encode(encoding=self.encoding, errors=self.encoding_error_handler) + @abstractmethod - def __add__(self, other: BaseArtifact) -> BaseArtifact: ... + def to_text(self) -> str: ... diff --git a/griptape/artifacts/blob_artifact.py b/griptape/artifacts/blob_artifact.py index 0c0dcc122..7c814a052 100644 --- a/griptape/artifacts/blob_artifact.py +++ b/griptape/artifacts/blob_artifact.py @@ -1,7 +1,6 @@ from __future__ import annotations -import os.path -from typing import Optional +import base64 from attrs import define, field @@ -10,17 +9,27 @@ @define class BlobArtifact(BaseArtifact): - value: bytes = field(converter=BaseArtifact.value_to_bytes, metadata={"serializable": True}) - dir_name: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - encoding: str = field(default="utf-8", kw_only=True) - encoding_error_handler: str = field(default="strict", kw_only=True) + """Stores arbitrary binary data. - def __add__(self, other: BaseArtifact) -> BlobArtifact: - return BlobArtifact(self.value + other.value, name=self.name) + Attributes: + value: The binary data. + """ + + value: bytes = field( + converter=lambda value: value if isinstance(value, bytes) else str(value).encode(), + metadata={"serializable": True}, + ) + + @property + def base64(self) -> str: + return base64.b64encode(self.value).decode(self.encoding) @property - def full_path(self) -> str: - return os.path.join(self.dir_name, self.name) if self.dir_name else self.name + def mime_type(self) -> str: + return "application/octet-stream" + + def to_bytes(self) -> bytes: + return self.value def to_text(self) -> str: return self.value.decode(encoding=self.encoding, errors=self.encoding_error_handler) diff --git a/griptape/artifacts/boolean_artifact.py b/griptape/artifacts/boolean_artifact.py index 5bcdfac9b..eb135824d 100644 --- a/griptape/artifacts/boolean_artifact.py +++ b/griptape/artifacts/boolean_artifact.py @@ -9,17 +9,23 @@ @define class BooleanArtifact(BaseArtifact): + """Stores a boolean value. + + Attributes: + value: The boolean value. + """ + value: bool = field(converter=bool, metadata={"serializable": True}) @classmethod - def parse_bool(cls, value: Union[str, bool]) -> BooleanArtifact: # noqa: FBT001 - """Convert a string literal or bool to a BooleanArtifact. The string must be either "true" or "false" with any casing.""" + def parse_bool(cls, value: Union[str, bool]) -> BooleanArtifact: + """Convert a string literal or bool to a BooleanArtifact. The string must be either "true" or "false".""" if value is not None: if isinstance(value, str): if value.lower() == "true": - return BooleanArtifact(True) # noqa: FBT003 + return BooleanArtifact(value=True) elif value.lower() == "false": - return BooleanArtifact(False) # noqa: FBT003 + return BooleanArtifact(value=False) elif isinstance(value, bool): return BooleanArtifact(value) raise ValueError(f"Cannot convert '{value}' to BooleanArtifact") @@ -28,4 +34,7 @@ def __add__(self, other: BaseArtifact) -> BooleanArtifact: raise ValueError("Cannot add BooleanArtifact with other artifacts") def __eq__(self, value: object) -> bool: - return self.value is value + return self.value == value + + def to_text(self) -> str: + return str(self.value).lower() diff --git a/griptape/artifacts/csv_row_artifact.py b/griptape/artifacts/csv_row_artifact.py deleted file mode 100644 index 00f1047fc..000000000 --- a/griptape/artifacts/csv_row_artifact.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import annotations - -import csv -import io - -from attrs import define, field - -from griptape.artifacts import BaseArtifact, TextArtifact - - -@define -class CsvRowArtifact(TextArtifact): - value: dict[str, str] = field(converter=BaseArtifact.value_to_dict, metadata={"serializable": True}) - delimiter: str = field(default=",", kw_only=True, metadata={"serializable": True}) - - def __add__(self, other: BaseArtifact) -> CsvRowArtifact: - return CsvRowArtifact(self.value | other.value) - - def __bool__(self) -> bool: - return len(self) > 0 - - def to_text(self) -> str: - with io.StringIO() as csvfile: - writer = csv.DictWriter( - csvfile, - fieldnames=self.value.keys(), - quoting=csv.QUOTE_MINIMAL, - delimiter=self.delimiter, - ) - - writer.writeheader() - writer.writerow(self.value) - - return csvfile.getvalue().strip() diff --git a/griptape/artifacts/error_artifact.py b/griptape/artifacts/error_artifact.py index d065d754b..27e6a37ab 100644 --- a/griptape/artifacts/error_artifact.py +++ b/griptape/artifacts/error_artifact.py @@ -9,8 +9,15 @@ @define class ErrorArtifact(BaseArtifact): + """Represents an error that may want to be conveyed to the LLM. + + Attributes: + value: The error message. + exception: The exception that caused the error. Defaults to None. + """ + value: str = field(converter=str, metadata={"serializable": True}) exception: Optional[Exception] = field(default=None, kw_only=True, metadata={"serializable": False}) - def __add__(self, other: BaseArtifact) -> ErrorArtifact: - return ErrorArtifact(self.value + other.value) + def to_text(self) -> str: + return self.value diff --git a/griptape/artifacts/generic_artifact.py b/griptape/artifacts/generic_artifact.py index 8e0b7e38c..e90f40ef0 100644 --- a/griptape/artifacts/generic_artifact.py +++ b/griptape/artifacts/generic_artifact.py @@ -9,7 +9,13 @@ @define class GenericArtifact(BaseArtifact): + """Serves as an escape hatch for artifacts that don't fit into any other category. + + Attributes: + value: The value of the Artifact. + """ + value: Any = field(metadata={"serializable": True}) - def __add__(self, other: BaseArtifact) -> BaseArtifact: - raise NotImplementedError + def to_text(self) -> str: + return str(self.value) diff --git a/griptape/artifacts/image_artifact.py b/griptape/artifacts/image_artifact.py index e963b3881..36170ee0d 100644 --- a/griptape/artifacts/image_artifact.py +++ b/griptape/artifacts/image_artifact.py @@ -2,22 +2,29 @@ from attrs import define, field -from griptape.artifacts import MediaArtifact +from griptape.artifacts import BlobArtifact @define -class ImageArtifact(MediaArtifact): - """ImageArtifact is a type of MediaArtifact representing an image. +class ImageArtifact(BlobArtifact): + """Stores image data. Attributes: - value: Raw bytes representing media data. - media_type: The type of media, defaults to "image". - format: The format of the media, like png, jpeg, or gif. - name: Artifact name, generated using creation time and a random string. - model: Optionally specify the model used to generate the media. - prompt: Optionally specify the prompt used to generate the media. + format: The format of the image data. Used when building the MIME type. + width: The width of the image. + height: The height of the image """ - media_type: str = "image" + format: str = field(kw_only=True, metadata={"serializable": True}) width: int = field(kw_only=True, metadata={"serializable": True}) height: int = field(kw_only=True, metadata={"serializable": True}) + + @property + def mime_type(self) -> str: + return f"image/{self.format}" + + def to_bytes(self) -> bytes: + return self.value + + def to_text(self) -> str: + return f"Image, format: {self.format}, size: {len(self.value)} bytes" diff --git a/griptape/artifacts/info_artifact.py b/griptape/artifacts/info_artifact.py index 26fe6366b..3391554e9 100644 --- a/griptape/artifacts/info_artifact.py +++ b/griptape/artifacts/info_artifact.py @@ -7,7 +7,15 @@ @define class InfoArtifact(BaseArtifact): + """Represents helpful info that can be conveyed to the LLM. + + For example, "No results found" or "Please try again.". + + Attributes: + value: The info to convey. + """ + value: str = field(converter=str, metadata={"serializable": True}) - def __add__(self, other: BaseArtifact) -> InfoArtifact: - return InfoArtifact(self.value + other.value) + def to_text(self) -> str: + return self.value diff --git a/griptape/artifacts/json_artifact.py b/griptape/artifacts/json_artifact.py index b292879a9..57700afd5 100644 --- a/griptape/artifacts/json_artifact.py +++ b/griptape/artifacts/json_artifact.py @@ -1,7 +1,7 @@ from __future__ import annotations import json -from typing import Union +from typing import Any, Union from attrs import define, field @@ -12,10 +12,20 @@ @define class JsonArtifact(BaseArtifact): - value: Json = field(converter=lambda v: json.loads(json.dumps(v)), metadata={"serializable": True}) + """Stores JSON data. + + Attributes: + value: The JSON data. Values will automatically be converted to a JSON-compatible format. + """ + + value: Json = field(converter=lambda value: JsonArtifact.value_to_json(value), metadata={"serializable": True}) + + @classmethod + def value_to_json(cls, value: Any) -> Json: + if isinstance(value, str): + return json.loads(value) + else: + return json.loads(json.dumps(value)) def to_text(self) -> str: return json.dumps(self.value) - - def __add__(self, other: BaseArtifact) -> JsonArtifact: - raise NotImplementedError diff --git a/griptape/artifacts/list_artifact.py b/griptape/artifacts/list_artifact.py index 298f29c6a..0e6f81ca5 100644 --- a/griptape/artifacts/list_artifact.py +++ b/griptape/artifacts/list_artifact.py @@ -1,23 +1,37 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Generic, Optional, TypeVar from attrs import Attribute, define, field from griptape.artifacts import BaseArtifact if TYPE_CHECKING: - from collections.abc import Sequence + from collections.abc import Iterator, Sequence + +T = TypeVar("T", bound=BaseArtifact, covariant=True) @define -class ListArtifact(BaseArtifact): - value: Sequence[BaseArtifact] = field(factory=list, metadata={"serializable": True}) +class ListArtifact(BaseArtifact, Generic[T]): + value: Sequence[T] = field(factory=list, metadata={"serializable": True}) item_separator: str = field(default="\n\n", kw_only=True, metadata={"serializable": True}) validate_uniform_types: bool = field(default=False, kw_only=True, metadata={"serializable": True}) + def __getitem__(self, key: int) -> T: + return self.value[key] + + def __bool__(self) -> bool: + return len(self) > 0 + + def __add__(self, other: BaseArtifact) -> ListArtifact[T]: + return ListArtifact(self.value + other.value) + + def __iter__(self) -> Iterator[T]: + return iter(self.value) + @value.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_value(self, _: Attribute, value: list[BaseArtifact]) -> None: + def validate_value(self, _: Attribute, value: list[T]) -> None: if self.validate_uniform_types and len(value) > 0: first_type = type(value[0]) @@ -31,18 +45,9 @@ def child_type(self) -> Optional[type]: else: return None - def __getitem__(self, key: int) -> BaseArtifact: - return self.value[key] - - def __bool__(self) -> bool: - return len(self) > 0 - def to_text(self) -> str: return self.item_separator.join([v.to_text() for v in self.value]) - def __add__(self, other: BaseArtifact) -> BaseArtifact: - return ListArtifact(self.value + other.value) - def is_type(self, target_type: type) -> bool: if self.value: return isinstance(self.value[0], target_type) diff --git a/griptape/artifacts/media_artifact.py b/griptape/artifacts/media_artifact.py deleted file mode 100644 index a57217fc7..000000000 --- a/griptape/artifacts/media_artifact.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import annotations - -import base64 -import random -import string -import time -from typing import Optional - -from attrs import define, field - -from griptape.artifacts import BlobArtifact - - -@define -class MediaArtifact(BlobArtifact): - """MediaArtifact is a type of BlobArtifact that represents media (image, audio, video, etc.) and can be extended to support a specific media type. - - Attributes: - value: Raw bytes representing media data. - media_type: The type of media, like image, audio, or video. - format: The format of the media, like png, wav, or mp4. - name: Artifact name, generated using creation time and a random string. - model: Optionally specify the model used to generate the media. - prompt: Optionally specify the prompt used to generate the media. - """ - - media_type: str = field(default="media", kw_only=True, metadata={"serializable": True}) - format: str = field(kw_only=True, metadata={"serializable": True}) - model: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - prompt: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - - def __attrs_post_init__(self) -> None: - # Generating the name string requires attributes set by child classes. - # This waits until all attributes are available before generating a name. - if self.name == self.id: - self.name = self.make_name() - - @property - def mime_type(self) -> str: - return f"{self.media_type}/{self.format}" - - @property - def base64(self) -> str: - return base64.b64encode(self.value).decode("utf-8") - - def to_text(self) -> str: - return f"Media, type: {self.mime_type}, size: {len(self.value)} bytes" - - def make_name(self) -> str: - entropy = "".join(random.choices(string.ascii_lowercase + string.digits, k=4)) - fmt_time = time.strftime("%y%m%d%H%M%S", time.localtime()) - - return f"{self.media_type}_artifact_{fmt_time}_{entropy}.{self.format}" diff --git a/griptape/artifacts/text_artifact.py b/griptape/artifacts/text_artifact.py index 752f66615..9623c8096 100644 --- a/griptape/artifacts/text_artifact.py +++ b/griptape/artifacts/text_artifact.py @@ -14,13 +14,7 @@ @define class TextArtifact(BaseArtifact): value: str = field(converter=str, metadata={"serializable": True}) - encoding: str = field(default="utf-8", kw_only=True) - encoding_error_handler: str = field(default="strict", kw_only=True) - _embedding: list[float] = field(factory=list, kw_only=True) - - @property - def embedding(self) -> Optional[list[float]]: - return None if len(self._embedding) == 0 else self._embedding + embedding: Optional[list[float]] = field(default=None, kw_only=True) def __add__(self, other: BaseArtifact) -> TextArtifact: return TextArtifact(self.value + other.value) @@ -28,14 +22,18 @@ def __add__(self, other: BaseArtifact) -> TextArtifact: def __bool__(self) -> bool: return bool(self.value.strip()) - def generate_embedding(self, driver: BaseEmbeddingDriver) -> Optional[list[float]]: - self._embedding.clear() - self._embedding.extend(driver.embed_string(str(self.value))) + def to_text(self) -> str: + return self.value + + def generate_embedding(self, driver: BaseEmbeddingDriver) -> list[float]: + embedding = driver.embed_string(str(self.value)) + + if self.embedding is None: + self.embedding = [] + self.embedding.clear() + self.embedding.extend(embedding) return self.embedding def token_count(self, tokenizer: BaseTokenizer) -> int: return tokenizer.count_tokens(str(self.value)) - - def to_bytes(self) -> bytes: - return str(self.value).encode(encoding=self.encoding, errors=self.encoding_error_handler) diff --git a/griptape/common/prompt_stack/prompt_stack.py b/griptape/common/prompt_stack/prompt_stack.py index 6d8dfde75..77ce4ba9b 100644 --- a/griptape/common/prompt_stack/prompt_stack.py +++ b/griptape/common/prompt_stack/prompt_stack.py @@ -7,7 +7,6 @@ from griptape.artifacts import ( ActionArtifact, BaseArtifact, - ErrorArtifact, GenericArtifact, ImageArtifact, ListArtifact, @@ -70,8 +69,6 @@ def __to_message_content(self, artifact: str | BaseArtifact) -> list[BaseMessage return [ImageMessageContent(artifact)] elif isinstance(artifact, GenericArtifact): return [GenericMessageContent(artifact)] - elif isinstance(artifact, ErrorArtifact): - return [TextMessageContent(TextArtifact(artifact.to_text()))] elif isinstance(artifact, ActionArtifact): action = artifact.value output = action.output @@ -81,6 +78,7 @@ def __to_message_content(self, artifact: str | BaseArtifact) -> list[BaseMessage return [ActionResultMessageContent(output, action=action)] elif isinstance(artifact, ListArtifact): processed_contents = [self.__to_message_content(artifact) for artifact in artifact.value] + return [sub_content for processed_content in processed_contents for sub_content in processed_content] else: - raise ValueError(f"Unsupported artifact type: {type(artifact)}") + return [TextMessageContent(TextArtifact(artifact.to_text()))] diff --git a/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py b/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py index 7106c8192..4db302f6f 100644 --- a/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py +++ b/griptape/drivers/image_generation/amazon_bedrock_image_generation_driver.py @@ -46,12 +46,11 @@ def try_text_to_image(self, prompts: list[str], negative_prompts: Optional[list[ image_bytes = self._make_request(request) return ImageArtifact( - prompt=", ".join(prompts), value=image_bytes, format="png", width=self.image_width, height=self.image_height, - model=self.model, + meta={"prompt": ", ".join(prompts), "model": self.model}, ) def try_image_variation( @@ -70,12 +69,11 @@ def try_image_variation( image_bytes = self._make_request(request) return ImageArtifact( - prompt=", ".join(prompts), value=image_bytes, format="png", width=image.width, height=image.height, - model=self.model, + meta={"prompt": ", ".join(prompts), "model": self.model}, ) def try_image_inpainting( @@ -96,12 +94,11 @@ def try_image_inpainting( image_bytes = self._make_request(request) return ImageArtifact( - prompt=", ".join(prompts), value=image_bytes, format="png", width=image.width, height=image.height, - model=self.model, + meta={"prompt": ", ".join(prompts), "model": self.model}, ) def try_image_outpainting( @@ -122,12 +119,11 @@ def try_image_outpainting( image_bytes = self._make_request(request) return ImageArtifact( - prompt=", ".join(prompts), value=image_bytes, format="png", width=image.width, height=image.height, - model=self.model, + meta={"prompt": ", ".join(prompts), "model": self.model}, ) def _make_request(self, request: dict) -> bytes: diff --git a/griptape/drivers/image_generation/huggingface_pipeline_image_generation_driver.py b/griptape/drivers/image_generation/huggingface_pipeline_image_generation_driver.py index 46dbcd331..b89df1c4b 100644 --- a/griptape/drivers/image_generation/huggingface_pipeline_image_generation_driver.py +++ b/griptape/drivers/image_generation/huggingface_pipeline_image_generation_driver.py @@ -44,7 +44,7 @@ def try_text_to_image(self, prompts: list[str], negative_prompts: Optional[list[ format=self.output_format.lower(), height=output_image.height, width=output_image.width, - prompt=prompt, + meta={"prompt": prompt}, ) def try_image_variation( @@ -76,7 +76,7 @@ def try_image_variation( format=self.output_format.lower(), height=output_image.height, width=output_image.width, - prompt=prompt, + meta={"prompt": prompt}, ) def try_image_inpainting( diff --git a/griptape/drivers/image_generation/leonardo_image_generation_driver.py b/griptape/drivers/image_generation/leonardo_image_generation_driver.py index e32dbb4c7..db89244bf 100644 --- a/griptape/drivers/image_generation/leonardo_image_generation_driver.py +++ b/griptape/drivers/image_generation/leonardo_image_generation_driver.py @@ -60,8 +60,10 @@ def try_text_to_image(self, prompts: list[str], negative_prompts: Optional[list[ format="png", width=self.image_width, height=self.image_height, - model=self.model, - prompt=", ".join(prompts), + meta={ + "model": self.model, + "prompt": ", ".join(prompts), + }, ) def try_image_variation( @@ -87,8 +89,10 @@ def try_image_variation( format="png", width=self.image_width, height=self.image_height, - model=self.model, - prompt=", ".join(prompts), + meta={ + "model": self.model, + "prompt": ", ".join(prompts), + }, ) def try_image_outpainting( diff --git a/griptape/drivers/image_generation/openai_image_generation_driver.py b/griptape/drivers/image_generation/openai_image_generation_driver.py index 0ee50a1e2..bf77ac300 100644 --- a/griptape/drivers/image_generation/openai_image_generation_driver.py +++ b/griptape/drivers/image_generation/openai_image_generation_driver.py @@ -151,6 +151,5 @@ def _parse_image_response(self, response: ImagesResponse, prompt: str) -> ImageA format="png", width=image_dimensions[0], height=image_dimensions[1], - model=self.model, - prompt=prompt, + meta={"model": self.model, "prompt": prompt}, ) diff --git a/griptape/engines/extraction/csv_extraction_engine.py b/griptape/engines/extraction/csv_extraction_engine.py index b45bdf7f5..e7be73c73 100644 --- a/griptape/engines/extraction/csv_extraction_engine.py +++ b/griptape/engines/extraction/csv_extraction_engine.py @@ -2,11 +2,11 @@ import csv import io -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, Callable, Optional, cast from attrs import Factory, define, field -from griptape.artifacts import CsvRowArtifact, ListArtifact, TextArtifact +from griptape.artifacts import ListArtifact, TextArtifact from griptape.common import Message, PromptStack from griptape.engines import BaseExtractionEngine from griptape.utils import J2 @@ -20,6 +20,9 @@ class CsvExtractionEngine(BaseExtractionEngine): column_names: list[str] = field(default=Factory(list), kw_only=True) system_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/system.j2")), kw_only=True) user_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/user.j2")), kw_only=True) + formatter_fn: Callable[[dict], str] = field( + default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True + ) def extract( self, @@ -32,26 +35,27 @@ def extract( self._extract_rec( cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], [], + rulesets=rulesets, ), item_separator="\n", ) - def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[CsvRowArtifact]: + def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[TextArtifact]: rows = [] with io.StringIO(text) as f: for row in csv.reader(f): - rows.append(CsvRowArtifact(dict(zip(column_names, [x.strip() for x in row])))) + rows.append(TextArtifact(self.formatter_fn(dict(zip(column_names, [x.strip() for x in row]))))) return rows def _extract_rec( self, artifacts: list[TextArtifact], - rows: list[CsvRowArtifact], + rows: list[TextArtifact], *, rulesets: Optional[list[Ruleset]] = None, - ) -> list[CsvRowArtifact]: + ) -> list[TextArtifact]: artifacts_text = self.chunk_joiner.join([a.value for a in artifacts]) system_prompt = self.system_template_generator.render( column_names=self.column_names, diff --git a/griptape/loaders/csv_loader.py b/griptape/loaders/csv_loader.py index 14dfe3e4a..bcf7029d4 100644 --- a/griptape/loaders/csv_loader.py +++ b/griptape/loaders/csv_loader.py @@ -2,11 +2,11 @@ import csv from io import StringIO -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, Callable, Optional, cast from attrs import define, field -from griptape.artifacts import CsvRowArtifact +from griptape.artifacts import TextArtifact from griptape.loaders import BaseLoader if TYPE_CHECKING: @@ -18,8 +18,11 @@ class CsvLoader(BaseLoader): embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) delimiter: str = field(default=",", kw_only=True) encoding: str = field(default="utf-8", kw_only=True) + formatter_fn: Callable[[dict], str] = field( + default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True + ) - def load(self, source: bytes | str, *args, **kwargs) -> list[CsvRowArtifact]: + def load(self, source: bytes | str, *args, **kwargs) -> list[TextArtifact]: artifacts = [] if isinstance(source, bytes): @@ -28,7 +31,7 @@ def load(self, source: bytes | str, *args, **kwargs) -> list[CsvRowArtifact]: raise ValueError(f"Unsupported source type: {type(source)}") reader = csv.DictReader(StringIO(source), delimiter=self.delimiter) - chunks = [CsvRowArtifact(row) for row in reader] + chunks = [TextArtifact(self.formatter_fn(row)) for row in reader] if self.embedding_driver: for chunk in chunks: @@ -44,8 +47,8 @@ def load_collection( sources: list[bytes | str], *args, **kwargs, - ) -> dict[str, list[CsvRowArtifact]]: + ) -> dict[str, list[TextArtifact]]: return cast( - dict[str, list[CsvRowArtifact]], + dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs), ) diff --git a/griptape/loaders/dataframe_loader.py b/griptape/loaders/dataframe_loader.py index 0b1ae1448..30d705676 100644 --- a/griptape/loaders/dataframe_loader.py +++ b/griptape/loaders/dataframe_loader.py @@ -1,10 +1,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, Callable, Optional, cast from attrs import define, field -from griptape.artifacts import CsvRowArtifact +from griptape.artifacts import TextArtifact from griptape.loaders import BaseLoader from griptape.utils import import_optional_dependency from griptape.utils.hash import str_to_hash @@ -18,11 +18,14 @@ @define class DataFrameLoader(BaseLoader): embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) + formatter_fn: Callable[[dict], str] = field( + default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True + ) - def load(self, source: DataFrame, *args, **kwargs) -> list[CsvRowArtifact]: + def load(self, source: DataFrame, *args, **kwargs) -> list[TextArtifact]: artifacts = [] - chunks = [CsvRowArtifact(row) for row in source.to_dict(orient="records")] + chunks = [TextArtifact(self.formatter_fn(row)) for row in source.to_dict(orient="records")] if self.embedding_driver: for chunk in chunks: @@ -33,8 +36,8 @@ def load(self, source: DataFrame, *args, **kwargs) -> list[CsvRowArtifact]: return artifacts - def load_collection(self, sources: list[DataFrame], *args, **kwargs) -> dict[str, list[CsvRowArtifact]]: - return cast(dict[str, list[CsvRowArtifact]], super().load_collection(sources, *args, **kwargs)) + def load_collection(self, sources: list[DataFrame], *args, **kwargs) -> dict[str, list[TextArtifact]]: + return cast(dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs)) def to_key(self, source: DataFrame, *args, **kwargs) -> str: hash_pandas_object = import_optional_dependency("pandas.core.util.hashing").hash_pandas_object diff --git a/griptape/loaders/sql_loader.py b/griptape/loaders/sql_loader.py index e4522796f..105f585cb 100644 --- a/griptape/loaders/sql_loader.py +++ b/griptape/loaders/sql_loader.py @@ -1,10 +1,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional, cast +from typing import TYPE_CHECKING, Callable, Optional, cast from attrs import define, field -from griptape.artifacts import CsvRowArtifact +from griptape.artifacts import TextArtifact from griptape.loaders import BaseLoader if TYPE_CHECKING: @@ -15,12 +15,15 @@ class SqlLoader(BaseLoader): sql_driver: BaseSqlDriver = field(kw_only=True) embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) + formatter_fn: Callable[[dict], str] = field( + default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True + ) - def load(self, source: str, *args, **kwargs) -> list[CsvRowArtifact]: + def load(self, source: str, *args, **kwargs) -> list[TextArtifact]: rows = self.sql_driver.execute_query(source) artifacts = [] - chunks = [CsvRowArtifact(row.cells) for row in rows] if rows else [] + chunks = [TextArtifact(self.formatter_fn(row.cells)) for row in rows] if rows else [] if self.embedding_driver: for chunk in chunks: @@ -31,5 +34,5 @@ def load(self, source: str, *args, **kwargs) -> list[CsvRowArtifact]: return artifacts - def load_collection(self, sources: list[str], *args, **kwargs) -> dict[str, list[CsvRowArtifact]]: - return cast(dict[str, list[CsvRowArtifact]], super().load_collection(sources, *args, **kwargs)) + def load_collection(self, sources: list[str], *args, **kwargs) -> dict[str, list[TextArtifact]]: + return cast(dict[str, list[TextArtifact]], super().load_collection(sources, *args, **kwargs)) diff --git a/griptape/mixins/media_artifact_file_output_mixin.py b/griptape/mixins/artifact_file_output_mixin.py similarity index 87% rename from griptape/mixins/media_artifact_file_output_mixin.py rename to griptape/mixins/artifact_file_output_mixin.py index 9b9f34911..25ed8718d 100644 --- a/griptape/mixins/media_artifact_file_output_mixin.py +++ b/griptape/mixins/artifact_file_output_mixin.py @@ -7,11 +7,11 @@ from attrs import Attribute, define, field if TYPE_CHECKING: - from griptape.artifacts import BlobArtifact + from griptape.artifacts import BaseArtifact @define(slots=False) -class BlobArtifactFileOutputMixin: +class ArtifactFileOutputMixin: output_dir: Optional[str] = field(default=None, kw_only=True) output_file: Optional[str] = field(default=None, kw_only=True) @@ -31,7 +31,7 @@ def validate_output_file(self, _: Attribute, output_file: str) -> None: if self.output_dir: raise ValueError("Can't have both output_dir and output_file specified.") - def _write_to_file(self, artifact: BlobArtifact) -> None: + def _write_to_file(self, artifact: BaseArtifact) -> None: if self.output_file: outfile = self.output_file elif self.output_dir: @@ -42,4 +42,4 @@ def _write_to_file(self, artifact: BlobArtifact) -> None: if os.path.dirname(outfile): os.makedirs(os.path.dirname(outfile), exist_ok=True) - Path(outfile).write_bytes(artifact.value) + Path(outfile).write_bytes(artifact.to_bytes()) diff --git a/griptape/schemas/base_schema.py b/griptape/schemas/base_schema.py index 9290c6098..dde3ae49a 100644 --- a/griptape/schemas/base_schema.py +++ b/griptape/schemas/base_schema.py @@ -2,7 +2,7 @@ from abc import ABC from collections.abc import Sequence -from typing import Any, Literal, Union, _SpecialForm, get_args, get_origin +from typing import Any, Literal, TypeVar, Union, _SpecialForm, get_args, get_origin import attrs from marshmallow import INCLUDE, Schema, fields @@ -56,6 +56,10 @@ def _get_field_for_type(cls, field_type: type) -> fields.Field | fields.Nested: field_class, args, optional = cls._get_field_type_info(field_type) + # Resolve TypeVars to their bound type + if isinstance(field_class, TypeVar): + field_class = field_class.__bound__ + if attrs.has(field_class): if ABC in field_class.__bases__: return fields.Nested(PolymorphicSchema(inner_class=field_class), allow_none=optional) diff --git a/griptape/tasks/base_audio_generation_task.py b/griptape/tasks/base_audio_generation_task.py index fae217d54..91f7b7501 100644 --- a/griptape/tasks/base_audio_generation_task.py +++ b/griptape/tasks/base_audio_generation_task.py @@ -6,7 +6,7 @@ from attrs import define from griptape.configs import Defaults -from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin +from griptape.mixins.artifact_file_output_mixin import ArtifactFileOutputMixin from griptape.mixins.rule_mixin import RuleMixin from griptape.tasks import BaseTask @@ -14,7 +14,7 @@ @define -class BaseAudioGenerationTask(BlobArtifactFileOutputMixin, RuleMixin, BaseTask, ABC): +class BaseAudioGenerationTask(ArtifactFileOutputMixin, RuleMixin, BaseTask, ABC): def before_run(self) -> None: super().before_run() diff --git a/griptape/tasks/base_image_generation_task.py b/griptape/tasks/base_image_generation_task.py index bd36d0080..326b2a551 100644 --- a/griptape/tasks/base_image_generation_task.py +++ b/griptape/tasks/base_image_generation_task.py @@ -10,20 +10,19 @@ from griptape.configs import Defaults from griptape.loaders import ImageLoader -from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin +from griptape.mixins.artifact_file_output_mixin import ArtifactFileOutputMixin from griptape.mixins.rule_mixin import RuleMixin from griptape.rules import Rule, Ruleset from griptape.tasks import BaseTask if TYPE_CHECKING: - from griptape.artifacts import MediaArtifact - + from griptape.artifacts import ImageArtifact logger = logging.getLogger(Defaults.logging_config.logger_name) @define -class BaseImageGenerationTask(BlobArtifactFileOutputMixin, RuleMixin, BaseTask, ABC): +class BaseImageGenerationTask(ArtifactFileOutputMixin, RuleMixin, BaseTask, ABC): """Provides a base class for image generation-related tasks. Attributes: @@ -65,6 +64,6 @@ def all_negative_rulesets(self) -> list[Ruleset]: return task_rulesets - def _read_from_file(self, path: str) -> MediaArtifact: + def _read_from_file(self, path: str) -> ImageArtifact: logger.info("Reading image from %s", os.path.abspath(path)) return ImageLoader().load(Path(path).read_bytes()) diff --git a/griptape/tasks/tool_task.py b/griptape/tasks/tool_task.py index 2dcb796d8..7ae63b902 100644 --- a/griptape/tasks/tool_task.py +++ b/griptape/tasks/tool_task.py @@ -84,7 +84,11 @@ def run(self) -> BaseArtifact: subtask.after_run() if isinstance(subtask.output, ListArtifact): - self.output = subtask.output[0] + first_artifact = subtask.output[0] + if isinstance(first_artifact, BaseArtifact): + self.output = first_artifact + else: + raise ValueError(f"Output is not an Artifact: {type(first_artifact)}") else: self.output = InfoArtifact("No tool output") except Exception as e: diff --git a/griptape/tools/base_image_generation_tool.py b/griptape/tools/base_image_generation_tool.py index ee1c37b6d..2df5d9747 100644 --- a/griptape/tools/base_image_generation_tool.py +++ b/griptape/tools/base_image_generation_tool.py @@ -1,11 +1,11 @@ from attrs import define -from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin +from griptape.mixins.artifact_file_output_mixin import ArtifactFileOutputMixin from griptape.tools import BaseTool @define -class BaseImageGenerationTool(BlobArtifactFileOutputMixin, BaseTool): +class BaseImageGenerationTool(ArtifactFileOutputMixin, BaseTool): """A base class for tools that generate images from text prompts.""" PROMPT_DESCRIPTION = "Features and qualities to include in the generated image, descriptive and succinct." diff --git a/griptape/tools/query/tool.py b/griptape/tools/query/tool.py index 0089970e9..0274e7940 100644 --- a/griptape/tools/query/tool.py +++ b/griptape/tools/query/tool.py @@ -5,7 +5,7 @@ from attrs import Factory, define, field from schema import Literal, Or, Schema -from griptape.artifacts import BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact +from griptape.artifacts import ErrorArtifact, ListArtifact, TextArtifact from griptape.configs import Defaults from griptape.engines.rag import RagEngine from griptape.engines.rag.modules import ( @@ -60,7 +60,7 @@ class QueryTool(BaseTool, RuleMixin): ), }, ) - def query(self, params: dict) -> BaseArtifact: + def query(self, params: dict) -> ListArtifact | ErrorArtifact: query = params["values"]["query"] content = params["values"]["content"] diff --git a/griptape/tools/text_to_speech/tool.py b/griptape/tools/text_to_speech/tool.py index ea4982029..aca259698 100644 --- a/griptape/tools/text_to_speech/tool.py +++ b/griptape/tools/text_to_speech/tool.py @@ -5,7 +5,7 @@ from attrs import define, field from schema import Literal, Schema -from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin +from griptape.mixins.artifact_file_output_mixin import ArtifactFileOutputMixin from griptape.tools import BaseTool from griptape.utils.decorators import activity @@ -15,7 +15,7 @@ @define -class TextToSpeechTool(BlobArtifactFileOutputMixin, BaseTool): +class TextToSpeechTool(ArtifactFileOutputMixin, BaseTool): """A tool that can be used to generate speech from input text. Attributes: diff --git a/tests/unit/artifacts/test_action_artifact.py b/tests/unit/artifacts/test_action_artifact.py index 2530ed8c3..b7180b1c3 100644 --- a/tests/unit/artifacts/test_action_artifact.py +++ b/tests/unit/artifacts/test_action_artifact.py @@ -11,10 +11,6 @@ class TestActionArtifact: def action(self) -> ToolAction: return ToolAction(tag="TestTag", name="TestName", path="TestPath", input={"foo": "bar"}) - def test___add__(self, action): - with pytest.raises(NotImplementedError): - ActionArtifact(action) + ActionArtifact(action) - def test_to_text(self, action): assert ActionArtifact(action).to_text() == json.dumps(action.to_dict()) diff --git a/tests/unit/artifacts/test_audio_artifact.py b/tests/unit/artifacts/test_audio_artifact.py index 6d44c05b3..aab6af630 100644 --- a/tests/unit/artifacts/test_audio_artifact.py +++ b/tests/unit/artifacts/test_audio_artifact.py @@ -6,20 +6,22 @@ class TestAudioArtifact: @pytest.fixture() def audio_artifact(self): - return AudioArtifact(value=b"some binary audio data", format="pcm", model="provider/model", prompt="two words") + return AudioArtifact( + value=b"some binary audio data", format="pcm", meta={"model": "provider/model", "prompt": "two words"} + ) def test_mime_type(self, audio_artifact: AudioArtifact): assert audio_artifact.mime_type == "audio/pcm" def test_to_text(self, audio_artifact: AudioArtifact): - assert audio_artifact.to_text() == "Media, type: audio/pcm, size: 22 bytes" + assert audio_artifact.to_text() == "Audio, format: pcm, size: 22 bytes" def test_to_dict(self, audio_artifact: AudioArtifact): audio_dict = audio_artifact.to_dict() assert audio_dict["format"] == "pcm" - assert audio_dict["model"] == "provider/model" - assert audio_dict["prompt"] == "two words" + assert audio_dict["meta"]["model"] == "provider/model" + assert audio_dict["meta"]["prompt"] == "two words" assert audio_dict["value"] == "c29tZSBiaW5hcnkgYXVkaW8gZGF0YQ==" def test_deserialization(self, audio_artifact): @@ -31,5 +33,5 @@ def test_deserialization(self, audio_artifact): assert deserialized_artifact.value == b"some binary audio data" assert deserialized_artifact.mime_type == "audio/pcm" assert deserialized_artifact.format == "pcm" - assert deserialized_artifact.model == "provider/model" - assert deserialized_artifact.prompt == "two words" + assert deserialized_artifact.meta["model"] == "provider/model" + assert deserialized_artifact.meta["prompt"] == "two words" diff --git a/tests/unit/artifacts/test_base_artifact.py b/tests/unit/artifacts/test_base_artifact.py index 6cf8f4466..28e2761a8 100644 --- a/tests/unit/artifacts/test_base_artifact.py +++ b/tests/unit/artifacts/test_base_artifact.py @@ -41,7 +41,7 @@ def test_list_artifact_from_dict(self): assert artifact.to_text() == "foobar" def test_blob_artifact_from_dict(self): - dict_value = {"type": "BlobArtifact", "value": b"Zm9vYmFy", "dir_name": "foo", "name": "bar"} + dict_value = {"type": "BlobArtifact", "value": b"Zm9vYmFy", "name": "bar"} artifact = BaseArtifact.from_dict(dict_value) assert isinstance(artifact, BlobArtifact) @@ -51,17 +51,15 @@ def test_image_artifact_from_dict(self): dict_value = { "type": "ImageArtifact", "value": b"aW1hZ2UgZGF0YQ==", - "dir_name": "foo", "format": "png", "width": 256, "height": 256, - "model": "test-model", - "prompt": "some prompt", + "meta": {"model": "test-model", "prompt": "some prompt"}, } artifact = BaseArtifact.from_dict(dict_value) assert isinstance(artifact, ImageArtifact) - assert artifact.to_text() == "Media, type: image/png, size: 10 bytes" + assert artifact.to_text() == "Image, format: png, size: 10 bytes" assert artifact.value == b"image data" def test_unsupported_from_dict(self): diff --git a/tests/unit/artifacts/test_base_media_artifact.py b/tests/unit/artifacts/test_base_media_artifact.py deleted file mode 100644 index c85d070fe..000000000 --- a/tests/unit/artifacts/test_base_media_artifact.py +++ /dev/null @@ -1,30 +0,0 @@ -import pytest -from attrs import define - -from griptape.artifacts import MediaArtifact - - -class TestMediaArtifact: - @define - class ImaginaryMediaArtifact(MediaArtifact): - media_type: str = "imagination" - - @pytest.fixture() - def media_artifact(self): - return self.ImaginaryMediaArtifact(value=b"some binary dream data", format="dream") - - def test_to_dict(self, media_artifact): - image_dict = media_artifact.to_dict() - - assert image_dict["format"] == "dream" - assert image_dict["value"] == "c29tZSBiaW5hcnkgZHJlYW0gZGF0YQ==" - - def test_name(self, media_artifact): - assert media_artifact.name.startswith("imagination_artifact") - assert media_artifact.name.endswith(".dream") - - def test_mime_type(self, media_artifact): - assert media_artifact.mime_type == "imagination/dream" - - def test_to_text(self, media_artifact): - assert media_artifact.to_text() == "Media, type: imagination/dream, size: 22 bytes" diff --git a/tests/unit/artifacts/test_blob_artifact.py b/tests/unit/artifacts/test_blob_artifact.py index 3d88d5793..9db5e21f5 100644 --- a/tests/unit/artifacts/test_blob_artifact.py +++ b/tests/unit/artifacts/test_blob_artifact.py @@ -1,5 +1,4 @@ import base64 -import os import pytest @@ -13,6 +12,9 @@ def test_value_type_conversion(self): def test_to_text(self): assert BlobArtifact(b"foobar", name="foobar.txt").to_text() == "foobar" + def test_to_bytes(self): + assert BlobArtifact(b"foo").to_bytes() == b"foo" + def test_to_text_encoding(self): assert ( BlobArtifact("ß".encode("ascii", errors="backslashreplace"), name="foobar.txt", encoding="ascii").to_text() @@ -30,37 +32,34 @@ def test_to_text_encoding_error_handler(self): ) def test_to_dict(self): - assert BlobArtifact(b"foobar", name="foobar.txt", dir_name="foo").to_dict()["name"] == "foobar.txt" - - def test_full_path_with_path(self): - assert BlobArtifact(b"foobar", name="foobar.txt", dir_name="foo").full_path == os.path.normpath( - "foo/foobar.txt" - ) - - def test_full_path_without_path(self): - assert BlobArtifact(b"foobar", name="foobar.txt").full_path == "foobar.txt" + assert BlobArtifact(b"foobar", name="foobar.txt").to_dict()["name"] == "foobar.txt" def test_serialization(self): - artifact = BlobArtifact(b"foobar", name="foobar.txt", dir_name="foo") + artifact = BlobArtifact(b"foobar", name="foobar.txt") artifact_dict = artifact.to_dict() assert artifact_dict["name"] == "foobar.txt" - assert artifact_dict["dir_name"] == "foo" assert base64.b64decode(artifact_dict["value"]) == b"foobar" def test_deserialization(self): - artifact = BlobArtifact(b"foobar", name="foobar.txt", dir_name="foo") + artifact = BlobArtifact(b"foobar", name="foobar.txt") artifact_dict = artifact.to_dict() deserialized_artifact = BaseArtifact.from_dict(artifact_dict) assert isinstance(deserialized_artifact, BlobArtifact) assert deserialized_artifact.name == "foobar.txt" - assert deserialized_artifact.dir_name == "foo" assert deserialized_artifact.value == b"foobar" def test_name(self): assert BlobArtifact(b"foo", name="bar").name == "bar" + def test_mime_type(self): + assert BlobArtifact(b"foo").mime_type == "application/octet-stream" + + def test___add__(self): + with pytest.raises(TypeError): + BlobArtifact(b"foo") + BlobArtifact(b"bar") + def test___bool__(self): assert not bool(BlobArtifact(b"")) assert bool(BlobArtifact(b"foo")) diff --git a/tests/unit/artifacts/test_boolean_artifact.py b/tests/unit/artifacts/test_boolean_artifact.py index 57bbf1662..6ed21608d 100644 --- a/tests/unit/artifacts/test_boolean_artifact.py +++ b/tests/unit/artifacts/test_boolean_artifact.py @@ -10,6 +10,7 @@ def test_parse_bool(self): assert BooleanArtifact.parse_bool("false").value is False assert BooleanArtifact.parse_bool("True").value is True assert BooleanArtifact.parse_bool("False").value is False + assert BooleanArtifact.parse_bool(True).value is True with pytest.raises(ValueError): BooleanArtifact.parse_bool("foo") @@ -35,3 +36,13 @@ def test_value_type_conversion(self): assert BooleanArtifact([]).value is False assert BooleanArtifact(False).value is False assert BooleanArtifact(True).value is True + + def test_to_text(self): + assert BooleanArtifact(True).to_text() == "true" + assert BooleanArtifact(False).to_text() == "false" + + def test__eq__(self): + assert BooleanArtifact(True) == BooleanArtifact(True) + assert BooleanArtifact(False) == BooleanArtifact(False) + assert BooleanArtifact(True) != BooleanArtifact(False) + assert BooleanArtifact(False) != BooleanArtifact(True) diff --git a/tests/unit/artifacts/test_csv_row_artifact.py b/tests/unit/artifacts/test_csv_row_artifact.py deleted file mode 100644 index fe0b8cd64..000000000 --- a/tests/unit/artifacts/test_csv_row_artifact.py +++ /dev/null @@ -1,30 +0,0 @@ -from griptape.artifacts import CsvRowArtifact - - -class TestCsvRowArtifact: - def test_value_type_conversion(self): - assert CsvRowArtifact({"foo": "bar"}).value == {"foo": "bar"} - assert CsvRowArtifact({"foo": {"bar": "baz"}}).value == {"foo": {"bar": "baz"}} - assert CsvRowArtifact('{"foo": "bar"}').value == {"foo": "bar"} - - def test___add__(self): - assert (CsvRowArtifact({"test1": "foo"}) + CsvRowArtifact({"test2": "bar"})).value == { - "test1": "foo", - "test2": "bar", - } - - def test_to_text(self): - assert CsvRowArtifact({"test1": "foo|bar", "test2": 1}, delimiter="|").to_text() == 'test1|test2\r\n"foo|bar"|1' - - def test_to_dict(self): - assert CsvRowArtifact({"test1": "foo"}).to_dict()["value"] == {"test1": "foo"} - - def test_name(self): - artifact = CsvRowArtifact({}) - - assert artifact.name == artifact.id - assert CsvRowArtifact({}, name="bar").name == "bar" - - def test___bool__(self): - assert not bool(CsvRowArtifact({})) - assert bool(CsvRowArtifact({"foo": "bar"})) diff --git a/tests/unit/artifacts/test_image_artifact.py b/tests/unit/artifacts/test_image_artifact.py index a722ebd91..a632953ae 100644 --- a/tests/unit/artifacts/test_image_artifact.py +++ b/tests/unit/artifacts/test_image_artifact.py @@ -11,12 +11,11 @@ def image_artifact(self): format="png", width=512, height=512, - model="openai/dalle2", - prompt="a cute cat", + meta={"model": "openai/dalle2", "prompt": "a cute cat"}, ) def test_to_text(self, image_artifact: ImageArtifact): - assert image_artifact.to_text() == "Media, type: image/png, size: 26 bytes" + assert image_artifact.to_text() == "Image, format: png, size: 26 bytes" def test_to_dict(self, image_artifact: ImageArtifact): image_dict = image_artifact.to_dict() @@ -24,8 +23,8 @@ def test_to_dict(self, image_artifact: ImageArtifact): assert image_dict["format"] == "png" assert image_dict["width"] == 512 assert image_dict["height"] == 512 - assert image_dict["model"] == "openai/dalle2" - assert image_dict["prompt"] == "a cute cat" + assert image_dict["meta"]["model"] == "openai/dalle2" + assert image_dict["meta"]["prompt"] == "a cute cat" assert image_dict["value"] == "c29tZSBiaW5hcnkgcG5nIGltYWdlIGRhdGE=" def test_deserialization(self, image_artifact): @@ -39,5 +38,5 @@ def test_deserialization(self, image_artifact): assert deserialized_artifact.format == "png" assert deserialized_artifact.width == 512 assert deserialized_artifact.height == 512 - assert deserialized_artifact.model == "openai/dalle2" - assert deserialized_artifact.prompt == "a cute cat" + assert deserialized_artifact.meta["model"] == "openai/dalle2" + assert deserialized_artifact.meta["prompt"] == "a cute cat" diff --git a/tests/unit/artifacts/test_json_artifact.py b/tests/unit/artifacts/test_json_artifact.py index 06f5d6297..be61e3edf 100644 --- a/tests/unit/artifacts/test_json_artifact.py +++ b/tests/unit/artifacts/test_json_artifact.py @@ -1,8 +1,6 @@ import json -import pytest - -from griptape.artifacts import JsonArtifact, TextArtifact +from griptape.artifacts import JsonArtifact class TestJsonArtifact: @@ -14,11 +12,11 @@ def test_value_type_conversion(self): assert JsonArtifact({"foo": None}).value == json.loads(json.dumps({"foo": None})) assert JsonArtifact([{"foo": {"bar": "baz"}}]).value == json.loads(json.dumps([{"foo": {"bar": "baz"}}])) assert JsonArtifact(None).value == json.loads(json.dumps(None)) - assert JsonArtifact("foo").value == json.loads(json.dumps("foo")) - - def test___add__(self): - with pytest.raises(NotImplementedError): - JsonArtifact({"foo": "bar"}) + TextArtifact("invalid json") + assert JsonArtifact('"foo"').value == "foo" + assert JsonArtifact("true").value is True + assert JsonArtifact("false").value is False + assert JsonArtifact("123").value == 123 + assert JsonArtifact("123.4").value == 123.4 def test_to_text(self): assert JsonArtifact({"foo": "bar"}).to_text() == json.dumps({"foo": "bar"}) diff --git a/tests/unit/artifacts/test_list_artifact.py b/tests/unit/artifacts/test_list_artifact.py index 06d234645..0d6faaa7b 100644 --- a/tests/unit/artifacts/test_list_artifact.py +++ b/tests/unit/artifacts/test_list_artifact.py @@ -1,6 +1,7 @@ import pytest -from griptape.artifacts import BlobArtifact, CsvRowArtifact, ListArtifact, TextArtifact +from griptape.artifacts import BlobArtifact, ListArtifact, TextArtifact +from griptape.artifacts.image_artifact import ImageArtifact class TestListArtifact: @@ -23,6 +24,12 @@ def test___add__(self): assert artifact.value[0].value == "foo" assert artifact.value[1].value == "bar" + def test___iter__(self): + assert [a.value for a in ListArtifact([TextArtifact("foo"), TextArtifact("bar")])] == ["foo", "bar"] + + def test_type_var(self): + assert ListArtifact[TextArtifact]([TextArtifact("foo")]).value[0].value == "foo" + def test_validate_value(self): with pytest.raises(ValueError): ListArtifact([TextArtifact("foo"), BlobArtifact(b"bar")], validate_uniform_types=True) @@ -32,8 +39,7 @@ def test_child_type(self): def test_is_type(self): assert ListArtifact([TextArtifact("foo")]).is_type(TextArtifact) - assert ListArtifact([CsvRowArtifact({"foo": "bar"})]).is_type(TextArtifact) - assert ListArtifact([CsvRowArtifact({"foo": "bar"})]).is_type(CsvRowArtifact) + assert ListArtifact([ImageArtifact(b"", width=1234, height=1234, format="png")]).is_type(ImageArtifact) def test_has_items(self): assert not ListArtifact().has_items() diff --git a/tests/unit/artifacts/test_text_artifact.py b/tests/unit/artifacts/test_text_artifact.py index 9e00e2d2e..dda256d27 100644 --- a/tests/unit/artifacts/test_text_artifact.py +++ b/tests/unit/artifacts/test_text_artifact.py @@ -18,6 +18,11 @@ def test___add__(self): def test_to_dict(self): assert TextArtifact("foobar").to_dict()["value"] == "foobar" + def test_to_bytes(self): + artifact = TextArtifact("foobar") + + assert artifact.to_bytes() == b"foobar" + def test_from_dict(self): assert BaseArtifact.from_dict(TextArtifact("foobar").to_dict()).value == "foobar" diff --git a/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py b/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py index 9aa4d3f4f..05e669b66 100644 --- a/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py +++ b/tests/unit/drivers/image_generation/test_amazon_bedrock_stable_diffusion_image_generation_driver.py @@ -60,5 +60,5 @@ def test_try_text_to_image(self, driver): assert image_artifact.mime_type == "image/png" assert image_artifact.width == 512 assert image_artifact.height == 512 - assert image_artifact.model == "stability.stable-diffusion-xl-v1" - assert image_artifact.prompt == "test prompt" + assert image_artifact.meta["model"] == "stability.stable-diffusion-xl-v1" + assert image_artifact.meta["prompt"] == "test prompt" diff --git a/tests/unit/drivers/image_generation/test_azure_openai_image_generation_driver.py b/tests/unit/drivers/image_generation/test_azure_openai_image_generation_driver.py index 268708b2b..a72764211 100644 --- a/tests/unit/drivers/image_generation/test_azure_openai_image_generation_driver.py +++ b/tests/unit/drivers/image_generation/test_azure_openai_image_generation_driver.py @@ -28,7 +28,10 @@ def test_init(self, driver): def test_init_requires_endpoint(self): with pytest.raises(TypeError): AzureOpenAiImageGenerationDriver( - model="dall-e-3", client=Mock(), azure_deployment="dalle-deployment", image_size="512x512" + model="dall-e-3", + client=Mock(), + azure_deployment="dalle-deployment", + image_size="512x512", ) # pyright: ignore[reportCallIssues] def test_try_text_to_image(self, driver): @@ -40,5 +43,5 @@ def test_try_text_to_image(self, driver): assert image_artifact.mime_type == "image/png" assert image_artifact.width == 512 assert image_artifact.height == 512 - assert image_artifact.model == "dall-e-3" - assert image_artifact.prompt == "test prompt" + assert image_artifact.meta["model"] == "dall-e-3" + assert image_artifact.meta["prompt"] == "test prompt" diff --git a/tests/unit/drivers/image_generation/test_leonardo_image_generation_driver.py b/tests/unit/drivers/image_generation/test_leonardo_image_generation_driver.py index 48805cde6..ec70e2dd2 100644 --- a/tests/unit/drivers/image_generation/test_leonardo_image_generation_driver.py +++ b/tests/unit/drivers/image_generation/test_leonardo_image_generation_driver.py @@ -76,5 +76,5 @@ def test_try_text_to_image(self, driver): assert image_artifact.mime_type == "image/png" assert image_artifact.width == 512 assert image_artifact.height == 512 - assert image_artifact.model == "test_model_id" - assert image_artifact.prompt == "test_prompt" + assert image_artifact.meta["model"] == "test_model_id" + assert image_artifact.meta["prompt"] == "test_prompt" diff --git a/tests/unit/drivers/image_generation/test_openai_image_generation_driver.py b/tests/unit/drivers/image_generation/test_openai_image_generation_driver.py index 16bcd2870..ff5528fb6 100644 --- a/tests/unit/drivers/image_generation/test_openai_image_generation_driver.py +++ b/tests/unit/drivers/image_generation/test_openai_image_generation_driver.py @@ -22,8 +22,8 @@ def test_try_text_to_image(self, driver): assert image_artifact.mime_type == "image/png" assert image_artifact.width == 512 assert image_artifact.height == 512 - assert image_artifact.model == "dall-e-2" - assert image_artifact.prompt == "test prompt" + assert image_artifact.meta["model"] == "dall-e-2" + assert image_artifact.meta["prompt"] == "test prompt" def test_try_image_variation(self, driver): driver.client.images.create_variation.return_value = Mock(data=[Mock(b64_json=b"aW1hZ2UgZGF0YQ==")]) @@ -34,7 +34,7 @@ def test_try_image_variation(self, driver): assert image_artifact.mime_type == "image/png" assert image_artifact.width == 512 assert image_artifact.height == 512 - assert image_artifact.model == "dall-e-2" + assert image_artifact.meta["model"] == "dall-e-2" def test_try_image_variation_invalid_size(self, driver): driver.image_size = "1024x1792" @@ -59,8 +59,8 @@ def test_try_image_inpainting(self, driver): assert image_artifact.mime_type == "image/png" assert image_artifact.width == 512 assert image_artifact.height == 512 - assert image_artifact.model == "dall-e-2" - assert image_artifact.prompt == "test prompt" + assert image_artifact.meta["model"] == "dall-e-2" + assert image_artifact.meta["prompt"] == "test prompt" def test_try_image_inpainting_invalid_size(self, driver): driver.image_size = "1024x1792" diff --git a/tests/unit/drivers/vector/test_base_local_vector_store_driver.py b/tests/unit/drivers/vector/test_base_local_vector_store_driver.py index ac4ff8043..20a3e2b50 100644 --- a/tests/unit/drivers/vector/test_base_local_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_base_local_vector_store_driver.py @@ -4,7 +4,6 @@ import pytest from griptape.artifacts import TextArtifact -from griptape.artifacts.csv_row_artifact import CsvRowArtifact class BaseLocalVectorStoreDriver(ABC): @@ -26,20 +25,6 @@ def test_upsert(self, driver): assert len(driver.entries) == 2 - def test_upsert_csv_row(self, driver): - namespace = driver.upsert_text_artifact(CsvRowArtifact(id="foo1", value={"col": "value"})) - - assert len(driver.entries) == 1 - assert list(driver.entries.keys())[0] == namespace - - driver.upsert_text_artifact(CsvRowArtifact(id="foo1", value={"col": "value"})) - - assert len(driver.entries) == 1 - - driver.upsert_text_artifact(CsvRowArtifact(id="foo2", value={"col": "value2"})) - - assert len(driver.entries) == 2 - def test_upsert_multiple(self, driver): driver.upsert_text_artifacts({"foo": [TextArtifact("foo")], "bar": [TextArtifact("bar")]}) diff --git a/tests/unit/engines/extraction/test_csv_extraction_engine.py b/tests/unit/engines/extraction/test_csv_extraction_engine.py index 893c21d60..056df2d5a 100644 --- a/tests/unit/engines/extraction/test_csv_extraction_engine.py +++ b/tests/unit/engines/extraction/test_csv_extraction_engine.py @@ -12,11 +12,11 @@ def test_extract(self, engine): result = engine.extract("foo") assert len(result.value) == 1 - assert result.value[0].value == {"test1": "mock output"} + assert result.value[0].value == "test1: mock output" def test_text_to_csv_rows(self, engine): result = engine.text_to_csv_rows("foo,bar\nbaz,maz", ["test1", "test2"]) assert len(result) == 2 - assert result[0].value == {"test1": "foo", "test2": "bar"} - assert result[1].value == {"test1": "baz", "test2": "maz"} + assert result[0].value == "test1: foo\ntest2: bar" + assert result[1].value == "test1: baz\ntest2: maz" diff --git a/tests/unit/loaders/test_audio_loader.py b/tests/unit/loaders/test_audio_loader.py index 473fd0d9e..b7ebdd912 100644 --- a/tests/unit/loaders/test_audio_loader.py +++ b/tests/unit/loaders/test_audio_loader.py @@ -13,14 +13,13 @@ def loader(self): def create_source(self, bytes_from_resource_path): return bytes_from_resource_path - @pytest.mark.parametrize(("resource_path", "suffix", "mime_type"), [("sentences.wav", ".wav", "audio/wav")]) - def test_load(self, resource_path, suffix, mime_type, loader, create_source): + @pytest.mark.parametrize(("resource_path", "mime_type"), [("sentences.wav", "audio/wav")]) + def test_load(self, resource_path, mime_type, loader, create_source): source = create_source(resource_path) artifact = loader.load(source) assert isinstance(artifact, AudioArtifact) - assert artifact.name.endswith(suffix) assert artifact.mime_type == mime_type assert len(artifact.value) > 0 @@ -35,6 +34,5 @@ def test_load_collection(self, create_source, loader): for key in collection: artifact = collection[key] assert isinstance(artifact, AudioArtifact) - assert artifact.name.endswith(".wav") assert artifact.mime_type == "audio/wav" assert len(artifact.value) > 0 diff --git a/tests/unit/loaders/test_csv_loader.py b/tests/unit/loaders/test_csv_loader.py index a747afff7..7af409152 100644 --- a/tests/unit/loaders/test_csv_loader.py +++ b/tests/unit/loaders/test_csv_loader.py @@ -1,3 +1,5 @@ +import json + import pytest from griptape.loaders.csv_loader import CsvLoader @@ -28,8 +30,7 @@ def test_load(self, loader, create_source): assert len(artifacts) == 10 first_artifact = artifacts[0] - assert first_artifact.value["Foo"] == "foo1" - assert first_artifact.value["Bar"] == "bar1" + assert first_artifact.value == "Foo: foo1\nBar: bar1" assert first_artifact.embedding == [0, 1] def test_load_delimiter(self, loader_with_pipe_delimiter, create_source): @@ -39,8 +40,7 @@ def test_load_delimiter(self, loader_with_pipe_delimiter, create_source): assert len(artifacts) == 10 first_artifact = artifacts[0] - assert first_artifact.value["Foo"] == "bar1" - assert first_artifact.value["Bar"] == "foo1" + assert first_artifact.value == "Bar: foo1\nFoo: bar1" assert first_artifact.embedding == [0, 1] def test_load_collection(self, loader, create_source): @@ -52,10 +52,17 @@ def test_load_collection(self, loader, create_source): keys = {loader.to_key(source) for source in sources} assert collection.keys() == keys - for key in keys: - artifacts = collection[key] - assert len(artifacts) == 10 - first_artifact = artifacts[0] - assert first_artifact.value["Foo"] == "foo1" - assert first_artifact.value["Bar"] == "bar1" - assert first_artifact.embedding == [0, 1] + assert collection[loader.to_key(sources[0])][0].value == "Foo: foo1\nBar: bar1" + assert collection[loader.to_key(sources[0])][0].embedding == [0, 1] + + assert collection[loader.to_key(sources[1])][0].value == "Bar: bar1\nFoo: foo1" + assert collection[loader.to_key(sources[1])][0].embedding == [0, 1] + + def test_formatter_fn(self, loader, create_source): + loader.formatter_fn = lambda value: json.dumps(value) + source = create_source("test-1.csv") + + artifacts = loader.load(source) + + assert len(artifacts) == 10 + assert artifacts[0].value == '{"Foo": "foo1", "Bar": "bar1"}' diff --git a/tests/unit/loaders/test_dataframe_loader.py b/tests/unit/loaders/test_dataframe_loader.py deleted file mode 100644 index 5c2a57ed6..000000000 --- a/tests/unit/loaders/test_dataframe_loader.py +++ /dev/null @@ -1,52 +0,0 @@ -import os - -import pandas as pd -import pytest - -from griptape.loaders.dataframe_loader import DataFrameLoader -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver - - -class TestDataFrameLoader: - @pytest.fixture() - def loader(self): - return DataFrameLoader(embedding_driver=MockEmbeddingDriver()) - - def test_load_with_path(self, loader): - # test loading a file delimited by comma - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../resources/test-1.csv") - - artifacts = loader.load(pd.read_csv(path)) - - assert len(artifacts) == 10 - first_artifact = artifacts[0].value - assert first_artifact["Foo"] == "foo1" - assert first_artifact["Bar"] == "bar1" - - assert artifacts[0].embedding == [0, 1] - - def test_load_collection_with_path(self, loader): - path1 = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../resources/test-1.csv") - path2 = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../resources/test-2.csv") - df1 = pd.read_csv(path1) - df2 = pd.read_csv(path2) - collection = loader.load_collection([df1, df2]) - - key1 = loader.to_key(df1) - key2 = loader.to_key(df2) - - assert list(collection.keys()) == [key1, key2] - - artifacts = collection[key1] - assert len(artifacts) == 10 - first_artifact = artifacts[0].value - assert first_artifact["Foo"] == "foo1" - assert first_artifact["Bar"] == "bar1" - - artifacts = collection[key2] - assert len(artifacts) == 10 - first_artifact = artifacts[0].value - assert first_artifact["Bar"] == "bar1" - assert first_artifact["Foo"] == "foo1" - - assert artifacts[0].embedding == [0, 1] diff --git a/tests/unit/loaders/test_image_loader.py b/tests/unit/loaders/test_image_loader.py index eca4cbccc..7093894b0 100644 --- a/tests/unit/loaders/test_image_loader.py +++ b/tests/unit/loaders/test_image_loader.py @@ -18,23 +18,22 @@ def create_source(self, bytes_from_resource_path): return bytes_from_resource_path @pytest.mark.parametrize( - ("resource_path", "suffix", "mime_type"), + ("resource_path", "mime_type"), [ - ("small.png", ".png", "image/png"), - ("small.jpg", ".jpeg", "image/jpeg"), - ("small.webp", ".webp", "image/webp"), - ("small.bmp", ".bmp", "image/bmp"), - ("small.gif", ".gif", "image/gif"), - ("small.tiff", ".tiff", "image/tiff"), + ("small.png", "image/png"), + ("small.jpg", "image/jpeg"), + ("small.webp", "image/webp"), + ("small.bmp", "image/bmp"), + ("small.gif", "image/gif"), + ("small.tiff", "image/tiff"), ], ) - def test_load(self, resource_path, suffix, mime_type, loader, create_source): + def test_load(self, resource_path, mime_type, loader, create_source): source = create_source(resource_path) artifact = loader.load(source) assert isinstance(artifact, ImageArtifact) - assert artifact.name.endswith(suffix) assert artifact.height == 32 assert artifact.width == 32 assert artifact.mime_type == mime_type @@ -49,7 +48,6 @@ def test_load_normalize(self, resource_path, png_loader, create_source): artifact = png_loader.load(source) assert isinstance(artifact, ImageArtifact) - assert artifact.name.endswith(".png") assert artifact.height == 32 assert artifact.width == 32 assert artifact.mime_type == "image/png" @@ -68,7 +66,6 @@ def test_load_collection(self, create_source, png_loader): for key in keys: artifact = collection[key] assert isinstance(artifact, ImageArtifact) - assert artifact.name.endswith(".png") assert artifact.height == 32 assert artifact.width == 32 assert artifact.mime_type == "image/png" diff --git a/tests/unit/loaders/test_sql_loader.py b/tests/unit/loaders/test_sql_loader.py index fbfa6d4fa..2ff6c7faf 100644 --- a/tests/unit/loaders/test_sql_loader.py +++ b/tests/unit/loaders/test_sql_loader.py @@ -38,24 +38,21 @@ def test_load(self, loader): artifacts = loader.load("SELECT * FROM test_table;") assert len(artifacts) == 3 - assert artifacts[0].value == {"id": 1, "name": "Alice", "age": 25, "city": "New York"} - assert artifacts[1].value == {"id": 2, "name": "Bob", "age": 30, "city": "Los Angeles"} - assert artifacts[2].value == {"id": 3, "name": "Charlie", "age": 22, "city": "Chicago"} + assert artifacts[0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" + assert artifacts[1].value == "id: 2\nname: Bob\nage: 30\ncity: Los Angeles" + assert artifacts[2].value == "id: 3\nname: Charlie\nage: 22\ncity: Chicago" assert artifacts[0].embedding == [0, 1] def test_load_collection(self, loader): - artifacts = loader.load_collection(["SELECT * FROM test_table LIMIT 1;", "SELECT * FROM test_table LIMIT 2;"]) + sources = ["SELECT * FROM test_table LIMIT 1;", "SELECT * FROM test_table LIMIT 2;"] + artifacts = loader.load_collection(sources) assert list(artifacts.keys()) == [ loader.to_key("SELECT * FROM test_table LIMIT 1;"), loader.to_key("SELECT * FROM test_table LIMIT 2;"), ] - assert [a.value for artifact_list in artifacts.values() for a in artifact_list] == [ - {"age": 25, "city": "New York", "id": 1, "name": "Alice"}, - {"age": 25, "city": "New York", "id": 1, "name": "Alice"}, - {"age": 30, "city": "Los Angeles", "id": 2, "name": "Bob"}, - ] - + assert artifacts[loader.to_key(sources[0])][0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" + assert artifacts[loader.to_key(sources[1])][0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" assert list(artifacts.values())[0][0].embedding == [0, 1] diff --git a/tests/unit/memory/tool/test_task_memory.py b/tests/unit/memory/tool/test_task_memory.py index 2f6ffe1c9..d2575959a 100644 --- a/tests/unit/memory/tool/test_task_memory.py +++ b/tests/unit/memory/tool/test_task_memory.py @@ -1,6 +1,6 @@ import pytest -from griptape.artifacts import BlobArtifact, CsvRowArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact +from griptape.artifacts import BlobArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact from griptape.memory import TaskMemory from griptape.memory.task.storage import BlobArtifactStorage, TextArtifactStorage from griptape.structures import Agent @@ -10,10 +10,6 @@ class TestTaskMemory: - @pytest.fixture(autouse=True) - def _mock_griptape(self, mocker): - mocker.patch("griptape.engines.CsvExtractionEngine.extract", return_value=[CsvRowArtifact({"foo": "bar"})]) - @pytest.fixture() def memory(self): return defaults.text_task_memory("MyMemory") diff --git a/tests/unit/mixins/test_image_artifact_file_output_mixin.py b/tests/unit/mixins/test_image_artifact_file_output_mixin.py index cf124da39..7e2926e09 100644 --- a/tests/unit/mixins/test_image_artifact_file_output_mixin.py +++ b/tests/unit/mixins/test_image_artifact_file_output_mixin.py @@ -4,12 +4,12 @@ import pytest from griptape.artifacts import ImageArtifact -from griptape.mixins.media_artifact_file_output_mixin import BlobArtifactFileOutputMixin +from griptape.mixins.artifact_file_output_mixin import ArtifactFileOutputMixin -class TestMediaArtifactFileOutputMixin: +class TestArtifactFileOutputMixin: def test_no_output(self): - class Test(BlobArtifactFileOutputMixin): + class Test(ArtifactFileOutputMixin): pass assert Test().output_file is None @@ -18,7 +18,7 @@ class Test(BlobArtifactFileOutputMixin): def test_output_file(self): artifact = ImageArtifact(name="test.png", value=b"test", height=1, width=1, format="png") - class Test(BlobArtifactFileOutputMixin): + class Test(ArtifactFileOutputMixin): def run(self) -> None: self._write_to_file(artifact) @@ -33,7 +33,7 @@ def run(self) -> None: def test_output_dir(self): artifact = ImageArtifact(name="test.png", value=b"test", height=1, width=1, format="png") - class Test(BlobArtifactFileOutputMixin): + class Test(ArtifactFileOutputMixin): def run(self) -> None: self._write_to_file(artifact) @@ -46,7 +46,7 @@ def run(self) -> None: assert os.path.exists(os.path.join(outdir, artifact.name)) def test_output_file_and_dir(self): - class Test(BlobArtifactFileOutputMixin): + class Test(ArtifactFileOutputMixin): pass outfile = "test.txt" diff --git a/tests/unit/tasks/test_extraction_task.py b/tests/unit/tasks/test_extraction_task.py index 2d7ab442c..06d444f9b 100644 --- a/tests/unit/tasks/test_extraction_task.py +++ b/tests/unit/tasks/test_extraction_task.py @@ -18,4 +18,4 @@ def test_run(self, task): result = task.run() assert len(result.value) == 1 - assert result.value[0].value == {"test1": "mock output"} + assert result.value[0].value == "test1: mock output" diff --git a/tests/unit/tools/test_extraction_tool.py b/tests/unit/tools/test_extraction_tool.py index 1219da373..3f783e8a4 100644 --- a/tests/unit/tools/test_extraction_tool.py +++ b/tests/unit/tools/test_extraction_tool.py @@ -58,10 +58,10 @@ def test_csv_extract_artifacts(self, csv_tool): ) assert len(result.value) == 1 - assert result.value[0].value == {"test1": "mock output"} + assert result.value[0].value == "test1: mock output" def test_csv_extract_content(self, csv_tool): result = csv_tool.extract({"values": {"data": "foo"}}) assert len(result.value) == 1 - assert result.value[0].value == {"test1": "mock output"} + assert result.value[0].value == "test1: mock output" diff --git a/tests/unit/tools/test_file_manager.py b/tests/unit/tools/test_file_manager.py index 569c0a280..469918a02 100644 --- a/tests/unit/tools/test_file_manager.py +++ b/tests/unit/tools/test_file_manager.py @@ -5,7 +5,7 @@ import pytest -from griptape.artifacts import CsvRowArtifact, ListArtifact, TextArtifact +from griptape.artifacts import ListArtifact, TextArtifact from griptape.drivers.file_manager.local_file_manager_driver import LocalFileManagerDriver from griptape.loaders.text_loader import TextLoader from griptape.tools import FileManagerTool @@ -106,29 +106,6 @@ def test_save_memory_artifacts_to_disk_for_multiple_artifacts(self, temp_dir): assert Path(os.path.join(temp_dir, "test", f"{artifacts[1].name}-{file_name}")).read_text() == "baz" assert result.value == "Successfully saved memory artifacts to disk" - def test_save_memory_artifacts_to_disk_for_non_string_artifact(self, temp_dir): - memory = defaults.text_task_memory("Memory1") - artifact = CsvRowArtifact({"foo": "bar"}) - - memory.store_artifact("foobar", artifact) - - file_manager = FileManagerTool( - input_memory=[memory], file_manager_driver=LocalFileManagerDriver(workdir=temp_dir) - ) - result = file_manager.save_memory_artifacts_to_disk( - { - "values": { - "dir_name": "test", - "file_name": "foobar.txt", - "memory_name": memory.name, - "artifact_namespace": "foobar", - } - } - ) - - assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foo\nbar" - assert result.value == "Successfully saved memory artifacts to disk" - def test_save_content_to_file(self, temp_dir): file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(workdir=temp_dir)) result = file_manager.save_content_to_file( diff --git a/tests/unit/tools/test_inpainting_image_generation_tool.py b/tests/unit/tools/test_inpainting_image_generation_tool.py index 45afcbc63..a558921a9 100644 --- a/tests/unit/tools/test_inpainting_image_generation_tool.py +++ b/tests/unit/tools/test_inpainting_image_generation_tool.py @@ -59,8 +59,8 @@ def test_image_inpainting_with_outfile( engine=image_generation_engine, output_file=outfile, image_loader=image_loader ) - image_generator.engine.run.return_value = Mock( # pyright: ignore[reportFunctionMemberAccess] - value=b"image data", format="png", width=512, height=512, model="test model", prompt="test prompt" + image_generator.engine.run.return_value = ImageArtifact( # pyright: ignore[reportFunctionMemberAccess] + value=b"image data", format="png", width=512, height=512 ) image_artifact = image_generator.image_inpainting_from_file( @@ -83,8 +83,8 @@ def test_image_inpainting_from_memory(self, image_generation_engine, image_artif memory.load_artifacts = Mock(return_value=[image_artifact]) image_generator.find_input_memory = Mock(return_value=memory) - image_generator.engine.run.return_value = Mock( # pyright: ignore[reportFunctionMemberAccess] - value=b"image data", format="png", width=512, height=512, model="test model", prompt="test prompt" + image_generator.engine.run.return_value = ImageArtifact( # pyright: ignore[reportFunctionMemberAccess] + value=b"image data", format="png", width=512, height=512 ) image_artifact = image_generator.image_inpainting_from_memory( diff --git a/tests/unit/tools/test_outpainting_image_variation_tool.py b/tests/unit/tools/test_outpainting_image_variation_tool.py index 4fbcbe8d4..e3f0de847 100644 --- a/tests/unit/tools/test_outpainting_image_variation_tool.py +++ b/tests/unit/tools/test_outpainting_image_variation_tool.py @@ -34,8 +34,8 @@ def test_validate_output_configs(self, image_generation_engine) -> None: OutpaintingImageGenerationTool(engine=image_generation_engine, output_dir="test", output_file="test") def test_image_outpainting(self, image_generator, path_from_resource_path) -> None: - image_generator.engine.run.return_value = Mock( - value=b"image data", format="png", width=512, height=512, model="test model", prompt="test prompt" + image_generator.engine.run.return_value = ImageArtifact( + value=b"image data", format="png", width=512, height=512 ) image_artifact = image_generator.image_outpainting_from_file( @@ -59,8 +59,8 @@ def test_image_outpainting_with_outfile( engine=image_generation_engine, output_file=outfile, image_loader=image_loader ) - image_generator.engine.run.return_value = Mock( # pyright: ignore[reportFunctionMemberAccess] - value=b"image data", format="png", width=512, height=512, model="test model", prompt="test prompt" + image_generator.engine.run.return_value = ImageArtifact( # pyright: ignore[reportFunctionMemberAccess] + value=b"image data", format="png", width=512, height=512 ) image_artifact = image_generator.image_outpainting_from_file( diff --git a/tests/unit/tools/test_prompt_image_generation_tool.py b/tests/unit/tools/test_prompt_image_generation_tool.py index a0c5c7037..4252d887e 100644 --- a/tests/unit/tools/test_prompt_image_generation_tool.py +++ b/tests/unit/tools/test_prompt_image_generation_tool.py @@ -5,6 +5,7 @@ import pytest +from griptape.artifacts.image_artifact import ImageArtifact from griptape.tools import PromptImageGenerationTool @@ -36,8 +37,8 @@ def test_generate_image_with_outfile(self, image_generation_engine) -> None: outfile = f"{tempfile.gettempdir()}/{str(uuid.uuid4())}.png" image_generator = PromptImageGenerationTool(engine=image_generation_engine, output_file=outfile) - image_generator.engine.run.return_value = Mock( # pyright: ignore[reportFunctionMemberAccess] - value=b"image data", format="png", width=512, height=512, model="test model", prompt="test prompt" + image_generator.engine.run.return_value = ImageArtifact( # pyright: ignore[reportFunctionMemberAccess] + value=b"image data", format="png", width=512, height=512 ) image_artifact = image_generator.generate_image( diff --git a/tests/unit/tools/test_sql_tool.py b/tests/unit/tools/test_sql_tool.py index 2ef50ff54..061b31f4d 100644 --- a/tests/unit/tools/test_sql_tool.py +++ b/tests/unit/tools/test_sql_tool.py @@ -26,7 +26,7 @@ def test_execute_query(self, driver): result = client.execute_query({"values": {"sql_query": "SELECT * from test_table;"}}) assert len(result.value) == 1 - assert result.value[0].value == {"id": 1, "name": "Alice", "age": 25, "city": "New York"} + assert result.value[0].value == "id: 1\nname: Alice\nage: 25\ncity: New York" def test_execute_query_description(self, driver): client = SqlTool( diff --git a/tests/unit/tools/test_text_to_speech_tool.py b/tests/unit/tools/test_text_to_speech_tool.py index 8821d48fc..6f2c43bd3 100644 --- a/tests/unit/tools/test_text_to_speech_tool.py +++ b/tests/unit/tools/test_text_to_speech_tool.py @@ -5,6 +5,7 @@ import pytest +from griptape.artifacts.audio_artifact import AudioArtifact from griptape.tools.text_to_speech.tool import TextToSpeechTool @@ -32,7 +33,7 @@ def test_text_to_speech_with_outfile(self, text_to_speech_engine) -> None: outfile = f"{tempfile.gettempdir()}/{str(uuid.uuid4())}.mp3" text_to_speech_client = TextToSpeechTool(engine=text_to_speech_engine, output_file=outfile) - text_to_speech_client.engine.run.return_value = Mock(value=b"audio data", format="mp3") # pyright: ignore[reportFunctionMemberAccess] + text_to_speech_client.engine.run.return_value = AudioArtifact(value=b"audio data", format="mp3") # pyright: ignore[reportFunctionMemberAccess] audio_artifact = text_to_speech_client.text_to_speech(params={"values": {"text": "say this!"}}) diff --git a/tests/unit/tools/test_variation_image_generation_tool.py b/tests/unit/tools/test_variation_image_generation_tool.py index c4528a044..5fd3513c1 100644 --- a/tests/unit/tools/test_variation_image_generation_tool.py +++ b/tests/unit/tools/test_variation_image_generation_tool.py @@ -58,8 +58,8 @@ def test_image_variation_with_outfile(self, image_generation_engine, image_loade engine=image_generation_engine, output_file=outfile, image_loader=image_loader ) - image_generator.engine.run.return_value = Mock( # pyright: ignore[reportFunctionMemberAccess] - value=b"image data", format="png", width=512, height=512, model="test model", prompt="test prompt" + image_generator.engine.run.return_value = ImageArtifact( # pyright: ignore[reportFunctionMemberAccess] + value=b"image data", format="png", width=512, height=512 ) image_artifact = image_generator.image_variation_from_file(