diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 9813b6aba..044c53f3b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -4,10 +4,9 @@ about: Create a report to help us improve title: '' labels: bug assignees: '' - --- - - [ ] I have read and agree to the [contributing guidelines](https://github.com/griptape-ai/griptape#contributing). +- [ ] I have read and agree to the [contributing guidelines](https://github.com/griptape-ai/griptape#contributing). **Describe the bug** A clear and concise description of what the bug is. @@ -22,8 +21,9 @@ A clear and concise description of what you expected to happen. If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - - OS: [e.g. iOS] - - Version [e.g. 0.5.1] + +- OS: \[e.g. iOS\] +- Version \[e.g. 0.5.1\] **Additional context** Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index a4643237f..c132bfd35 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -4,13 +4,12 @@ about: Suggest an idea for this project title: '' labels: enhancement assignees: '' - --- - - [ ] I have read and agree to the [contributing guidelines](https://github.com/griptape-ai/griptape#contributing). +- [ ] I have read and agree to the [contributing guidelines](https://github.com/griptape-ai/griptape#contributing). **Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] +A clear and concise description of what the problem is. Ex. I'm always frustrated when \[...\] **Describe the solution you'd like** A clear and concise description of what you want to happen. diff --git a/.github/actions/init-bare-environment/action.yml b/.github/actions/init-bare-environment/action.yml index 00a588497..ba9cdc0e2 100644 --- a/.github/actions/init-bare-environment/action.yml +++ b/.github/actions/init-bare-environment/action.yml @@ -4,11 +4,11 @@ runs: using: "composite" steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup-python name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -21,7 +21,7 @@ runs: - name: Load cached venv id: cached-poetry-dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: .venv key: venv-bare-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} diff --git a/.github/actions/init-environment/action.yml b/.github/actions/init-environment/action.yml index 34a1fc926..338ea3661 100644 --- a/.github/actions/init-environment/action.yml +++ b/.github/actions/init-environment/action.yml @@ -4,11 +4,11 @@ runs: using: "composite" steps: - name: Checkout actions - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup-python name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -21,7 +21,7 @@ runs: - name: Load cached venv id: cached-poetry-dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: .venv key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} @@ -30,13 +30,26 @@ runs: if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' run: poetry install --no-interaction --with test --with dev --with docs --all-extras shell: bash - - name: Activate venv run: | source $VENV echo PATH=$PATH >> $GITHUB_ENV shell: bash + - name: Get installed Playwright version + id: playwright-version + run: | + version=$(poetry run playwright/ -V | awk '{print $2}' | tr -d '\n') + echo "version=$version" >> $GITHUB_OUTPUT + shell: bash + + - uses: actions/cache@v4 + id: playwright-cache + with: + path: ${{ matrix.os == 'windows-latest' && '~\\AppData\\Local\\ms-playwright' || '~/.cache/ms-playwright' }} + key: '${{ runner.os }}-playwright-${{ steps.playwright-version.outputs.version }}' + - name: Install playwright + if: steps.playwright-cache.outputs.cache-hit != 'true' run: playwright install --with-deps shell: bash diff --git a/.github/workflows/docs-integration-tests.yml b/.github/workflows/docs-integration-tests.yml index d8e2162ed..81807be59 100644 --- a/.github/workflows/docs-integration-tests.yml +++ b/.github/workflows/docs-integration-tests.yml @@ -125,6 +125,11 @@ jobs: ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.INTEG_ASTRA_DB_APPLICATION_TOKEN }} TAVILY_API_KEY: ${{ secrets.INTEG_TAVILY_API_KEY }} EXA_API_KEY: ${{ secrets.INTEG_EXA_API_KEY }} + AMAZON_S3_BUCKET: ${{ secrets.INTEG_AMAZON_S3_BUCKET }} + AMAZON_S3_KEY: ${{ secrets.INTEG_AMAZON_S3_KEY }} + GT_CLOUD_BUCKET_ID: ${{ secrets.INTEG_GT_CLOUD_BUCKET_ID }} + GT_CLOUD_ASSET_NAME: ${{ secrets.INTEG_GT_CLOUD_ASSET_NAME }} + services: postgres: image: ankane/pgvector:v0.5.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 11ee648e5..9d1a34cbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ # Changelog + All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), @@ -6,16 +7,103 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased -## [0.33.1] - 2024-10-11 +### Added + +- `griptape.configs.logging.JsonFormatter` for formatting logs as JSON. +- Request/response debug logging to all Prompt Drivers. +- `griptape.schemas.UnionField` for serializing union fields. +- `BaseEventListener.flush_events()` to flush events from an Event Listener. +- Exponential backoff to `BaseEventListenerDriver` for retrying failed event publishing. +- `BaseTask.task_outputs` to get a dictionary of all task outputs. This has been added to `Workflow.context` and `Pipeline.context`. +- `Chat.input_fn` for customizing the input to the Chat utility. +- `GriptapeCloudFileManagerDriver` for managing files on Griptape Cloud. +- `BaseFileManagerDriver.load_artifact()` & `BaseFileManagerDriver.save_artifact()` for loading & saving artifacts as files. +- Events `BaseChunkEvent`, `TextChunkEvent`, `ActionChunkEvent`. +- `wrapt` dependency for more robust decorators. +- `BasePromptDriver.extra_params` for passing extra parameters not explicitly declared by the Driver. +- `RunnableMixin` which adds `on_before_run` and `on_after_run` hooks. +- `griptape.utils.with_contextvars` utility for running functions with the current `contextvars` context. + +### Changed + +- **BREAKING**: Removed `BaseEventListenerDriver.publish_event` `flush` argument. Use `BaseEventListenerDriver.flush_events()` instead. +- **BREAKING**: Renamed parameter `driver` on `EventListener` to `event_listener_driver`. +- **BREAKING**: Updated `EventListener.handler` return value behavior. + - If `EventListener.handler` returns `None`, the event will not be published to the `event_listener_driver`. + - If `EventListener.handler` is None, the event will be published to the `event_listener_driver` as-is. +- **BREAKING**: Removed `CompletionChunkEvent`. +- **BREAKING**: Moved `griptape.common.observable.observable` to `griptape.common.decorators.observable`. +- **BREAKING**: `AnthropicDriversConfig` no longer bundles `VoyageAiEmbeddingDriver`. +- **BREAKING**: Removed `HuggingFaceHubPromptDriver.params`, use `HuggingFaceHubPromptDriver.extra_params` instead. +- **BREAKING**: Removed `HuggingFacePipelinePromptDriver.params`, use `HuggingFacePipelinePromptDriver.extra_params` instead. +- **BREAKING**: Renamed `BaseTask.run` to `BaseTask.try_run`. +- **BREAKING**: Renamed `BaseTask.execute` to `BaseTask.run`. +- **BREAKING**: Renamed `BaseTask.can_execute` to `BaseTool.can_run`. +- **BREAKING**: Renamed `BaseTool.run` to `BaseTool.try_run`. +- **BREAKING**: Renamed `BaseTool.execute` to `BaseTool.run`. +- **BREAKING**: Renamed callables throughout the framework for consistency: + - Renamed `LocalStructureRunDriver.structure_factory_fn` to `LocalStructureRunDriver.create_structure`. + - Renamed `SnowflakeSqlDriver.connection_func` to `SnowflakeSqlDriver.get_connection`. + - Renamed `CsvLoader.formatter_fn` to `CsvLoader.format_row`. + - Renamed `SqlLoader.formatter_fn` to `SqlLoader.format_row`. + - Renamed `CsvExtractionEngine.system_template_generator` to `CsvExtractionEngine.generate_system_template`. + - Renamed `CsvExtractionEngine.user_template_generator` to `CsvExtractionEngine.generate_user_template`. + - Renamed `JsonExtractionEngine.system_template_generator` to `JsonExtractionEngine.generate_system_template`. + - Renamed `JsonExtractionEngine.user_template_generator` to `JsonExtractionEngine.generate_user_template`. + - Renamed `PromptResponseRagModule.generate_system_template` to `PromptResponseRagModule.generate_system_template`. + - Renamed `PromptTask.generate_system_template` to `PromptTask.generate_system_template`. + - Renamed `ToolkitTask.generate_assistant_subtask_template` to `ToolkitTask.generate_assistant_subtask_template`. + - Renamed `JsonSchemaRule.template_generator` to `JsonSchemaRule.generate_template`. + - Renamed `ToolkitTask.generate_user_subtask_template` to `ToolkitTask.generate_user_subtask_template`. + - Renamed `TextLoaderRetrievalRagModule.process_query_output_fn` to `TextLoaderRetrievalRagModule.process_query_output`. + - Renamed `FuturesExecutorMixin.futures_executor_fn` to `FuturesExecutorMixin.create_futures_executor`. + - Renamed `VectorStoreTool.process_query_output_fn` to `VectorStoreTool.process_query_output`. + - Renamed `CodeExecutionTask.run_fn` to `CodeExecutionTask.on_run`. + - Renamed `Chat.input_fn` to `Chat.handle_input`. + - Renamed `Chat.output_fn` to `Chat.handle_output`. + - Renamed `EventListener.handler` to `EventListener.on_event`. +- Updated `EventListener.handler` return type to `Optional[BaseEvent | dict]`. +- `BaseTask.parent_outputs` type has changed from `dict[str, str | None]` to `dict[str, BaseArtifact]`. +- `Workflow.context["parent_outputs"]` type has changed from `dict[str, str | None]` to `dict[str, BaseArtifact]`. +- `Pipeline.context["parent_output"]` has changed type from `str | None` to `BaseArtifact | None`. +- `_DefaultsConfig.logging_config` and `Defaults.drivers_config` are now lazily instantiated. +- `griptape.schemas.BaseSchema` now uses `griptape.schemas.UnionField` for `Union` fields. +- `BaseTask.add_parent`/`BaseTask.add_child` now only add the parent/child task to the structure if it is not already present. +- `BaseEventListener.flush_events()` to flush events from an Event Listener. +- `BaseEventListener` no longer requires a thread lock for batching events. +- Updated `ToolkitTask` system prompt to retry/fix actions when using native tool calling. +- `Chat` input now uses a slightly customized version of `Rich.prompt.Prompt` by default. +- `Chat` output now uses `Rich.print` by default. +- `Chat.output_fn`'s now takes an optional kwarg parameter, `stream`. +- Implemented `SerializableMixin` in `Structure`, `BaseTask`, `BaseTool`, and `TaskMemory` +- `@activity` decorated functions can now accept kwargs that are defined in the activity schema. +- Updated `ToolkitTask` system prompt to no longer mention `memory_name` and `artifact_namespace`. +- Models in `ToolkitTask` with native tool calling no longer need to provide their final answer as `Answer:`. +- `EventListener.event_types` will now listen on child types of any provided type. +- Only install Tool dependencies if the Tool provides a `requirements.txt` and the dependencies are not already met. +- Implemented `RunnableMixin` in `Structure`, `BaseTask`, and `BaseTool`. +- `EventBus`'s Event Listeners are now thread/coroutine-local. Event Listeners from the spawning thread will be automatically copied when using concurrent griptape features like Workflows. + +### Fixed + +- Structures not flushing events when not listening for `FinishStructureRunEvent`. +- `EventListener.event_types` and the argument to `BaseEventListenerDriver.handler` being out of sync. +- Models occasionally hallucinating `memory_name` and `artifact_namespace` into Tool schemas when using `ToolkitTask`. +- Models occasionally providing overly succinct final answers when using `ToolkitTask`. +- Exception getting raised in `FuturesExecutorMixin.__del__`. +- Issues when using `EventListener` as a context manager in a multi-threaded environment. + +## \[0.33.1\] - 2024-10-11 ### Fixed - Pinned `cohere` at `~5.11.0` to resolve slow dependency resolution. - Missing `exa-py` from `all` extra. -## [0.33.0] - 2024-10-09 +## \[0.33.0\] - 2024-10-09 ## Added + - `Workflow.input_tasks` and `Workflow.output_tasks` to access the input and output tasks of a Workflow. - Ability to pass nested list of `Tasks` to `Structure.tasks` allowing for more complex declarative Structure definitions. - `TavilyWebSearchDriver` to integrate Tavily's web search capabilities. @@ -32,10 +120,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `LocalRulesetDriver` for loading a `Ruleset` from a local `.json` file. - `GriptapeCloudRulesetDriver` for loading a `Ruleset` resource from Griptape Cloud. - Parameter `alias` on `GriptapeCloudConversationMemoryDriver` for fetching a Thread by alias. -- Basic support for OpenAi Structured Output via `OpenAiChatPromptDriver.response_format` parameter. +- Basic support for OpenAi Structured Output via `OpenAiChatPromptDriver.response_format` parameter. - Ability to pass callable to `activity.schema` for dynamic schema generation. ### Changed + - **BREAKING**: Renamed parameters on several classes to `client`: - `bedrock_client` on `AmazonBedrockCohereEmbeddingDriver`. - `bedrock_client` on `AmazonBedrockCohereEmbeddingDriver`. @@ -85,16 +174,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Remove `manifest.yml` requirements for custom tool creation. ### Fixed + - Anthropic native Tool calling. - Empty `ActionsSubtask.thought` being logged. - `RuleMixin` no longer prevents setting `rulesets` _and_ `rules` at the same time. - `PromptTask` will merge in its Structure's Rulesets and Rules. - `PromptTask` not checking whether Structure was set before building Prompt Stack. - `BaseTask.full_context` context being empty when not connected to a Structure. +- Tool calling when using `OpenAiChatPromptDriver` with Groq. -## [0.32.0] - 2024-09-17 +## \[0.32.0\] - 2024-09-17 ### Added + - `BaseArtifact.to_bytes()` method to convert an Artifact's value to bytes. - `BlobArtifact.base64` property for converting a `BlobArtifact`'s value to a base64 string. - `CsvLoader`/`SqlLoader`/`DataframeLoader` `formatter_fn` field for customizing how SQL results are formatted into `TextArtifact`s. @@ -105,8 +197,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Generic type support to `ListArtifact`. - Iteration support to `ListArtifact`. - ### Changed + - **BREAKING**: Removed `CsvRowArtifact`. Use `TextArtifact` instead. - **BREAKING**: Removed `DataframeLoader`. - **BREAKING**: Removed `MediaArtifact`, use `ImageArtifact` or `AudioArtifact` instead. @@ -117,19 +209,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING**: Moved `ImageArtifact.prompt` and `ImageArtifact.model` into `ImageArtifact.meta`. - **BREAKING**: `ImageArtifact.format` is now required. - **BREAKING**: Removed the `__all__` declaration from the `griptape.mixins` module. -- Updated `JsonArtifact` value converter to properly handle more types. +- Updated `JsonArtifact` value converter to properly handle more types. - `AudioArtifact` now subclasses `BlobArtifact` instead of `MediaArtifact`. - `ImageArtifact` now subclasses `BlobArtifact` instead of `MediaArtifact`. - Removed `__add__` method from `BaseArtifact`, implemented it where necessary. ### Fixed + - Crash when passing "empty" Artifacts or no Artifacts to `CohereRerankDriver`. -## [0.31.0] - 2024-09-03 +## \[0.31.0\] - 2024-09-03 **Note**: This release includes breaking changes. Please refer to the [Migration Guide](./MIGRATION.md#030x-to-031x) for details. ### Added + - Parameter `meta: dict` on `BaseEvent`. - `AzureOpenAiTextToSpeechDriver`. - Ability to use Event Listeners as Context Managers for temporarily setting the Event Bus listeners. @@ -137,17 +231,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Ability to use Drivers Configs as Context Managers for temporarily setting the default Drivers. ### Changed + - **BREAKING**: Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorArtifacts`. - **BREAKING**: Parameter `driver` on `BaseConversationMemory` renamed to `conversation_memory_driver`. - **BREAKING**: `BaseConversationMemory.add_to_prompt_stack` now takes a `prompt_driver` parameter. - **BREAKING**: `BaseConversationMemoryDriver.load` now returns `tuple[list[Run], dict]`. This represents the runs and metadata. - **BREAKING**: `BaseConversationMemoryDriver.store` now takes `runs: list[Run]` and `metadata: dict` as input. - **BREAKING**: Parameter `file_path` on `LocalConversationMemoryDriver` renamed to `persist_file` and is now type `Optional[str]`. -- **BREAKING**: Removed the `__all__` declaration from the `griptape.mixins` module. +- **BREAKING**: Removed the `__all__` declaration from the `griptape.mixins` module. - `Defaults.drivers_config.conversation_memory_driver` now defaults to `LocalConversationMemoryDriver` instead of `None`. - `CsvRowArtifact.to_text()` now includes the header. ### Fixed + - Parsing streaming response with some OpenAI compatible services. - Issue in `PromptSummaryEngine` if there are no artifacts during recursive summarization. - Issue in `GooglePromptDriver` using Tools with no schema. @@ -156,22 +252,24 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Crash when using `CohereRerankDriver` with `CsvRowArtifact`s. - Crash when passing "empty" Artifacts or no Artifacts to `CohereRerankDriver`. - -## [0.30.2] - 2024-08-26 +## \[0.30.2\] - 2024-08-26 ### Fixed -- Ensure thread safety when publishing events by adding a thread lock to batch operations in `BaseEventListenerDriver`. + +- Ensure thread safety when publishing events by adding a thread lock to batch operations in `BaseEventListenerDriver`. - `FileManagerTool` failing to save Artifacts created by `ExtractionTool` with a `CsvExtractionEngine`. -## [0.30.1] - 2024-08-21 +## \[0.30.1\] - 2024-08-21 ### Fixed + - `CsvExtractionEngine` not using provided `Ruleset`s. - Docs examples for Extraction Engines not properly passing in schemas. -## [0.30.0] - 2024-08-20 +## \[0.30.0\] - 2024-08-20 ### Added + - `AstraDbVectorStoreDriver` to support DataStax Astra DB as a vector store. - Ability to set custom schema properties on Tool Activities via `extra_schema_properties`. - Parameter `structure` to `BaseTask`. @@ -185,13 +283,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `QueryTool` for having the LLM query text. - Support for bitshift composition in `BaseTask` for adding parent/child tasks. - `JsonArtifact` for handling de/seralization of values. -- `Chat.logger_level` for setting what the `Chat` utility sets the logger level to. +- `Chat.logger_level` for setting what the `Chat` utility sets the logger level to. - `FuturesExecutorMixin` to DRY up and optimize concurrent code across multiple classes. - `utils.execute_futures_list_dict` for executing a dict of lists of futures. - `GriptapeCloudConversationMemoryDriver` to store conversation history in Griptape Cloud. - `griptape.utils.decorators.lazy_property` for creating lazy properties. ### Changed + - **BREAKING**: Removed all uses of `EventPublisherMixin` in favor of `EventBus`. - **BREAKING**: Removed `EventPublisherMixin`. - **BREAKING**: Removed `Pipeline.prompt_driver` and `Workflow.prompt_driver`. Set this via `griptape.configs.Defaults.drivers.prompt_driver` instead. `Agent.prompt_driver` has not been removed. @@ -203,18 +302,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING**: `RagContext.output` was changed to `RagContext.outputs` to support multiple outputs. All relevant RAG modules were adjusted accordingly. - **BREAKING**: Removed before and after response modules from `ResponseRagStage`. - **BREAKING**: Moved ruleset and metadata ingestion from standalone modules to `PromptResponseRagModule`. -- **BREAKING**: Dropped `Client` from all Tool names for better naming consistency. -- **BREAKING**: Dropped `_client` suffix from all Tool packages. -- **BREAKING**: Added `Tool` suffix to all Tool names for better naming consistency. -- **BREAKING**: Removed `TextArtifactStorage.query` and `TextArtifactStorage.summarize`. +- **BREAKING**: Dropped `Client` from all Tool names for better naming consistency. +- **BREAKING**: Dropped `_client` suffix from all Tool packages. +- **BREAKING**: Added `Tool` suffix to all Tool names for better naming consistency. +- **BREAKING**: Removed `TextArtifactStorage.query` and `TextArtifactStorage.summarize`. - **BREAKING**: Removed `TextArtifactStorage.rag_engine`, and `TextArtifactStorage.retrieval_rag_module_name`. - **BREAKING**: Removed `TextArtifactStorage.summary_engine`, `TextArtifactStorage.csv_extraction_engine`, and `TextArtifactStorage.json_extraction_engine`. - **BREAKING**: Removed `TaskMemory.summarize_namespace` and `TaskMemory.query_namespace`. - **BREAKING**: Removed `Structure.rag_engine`. - **BREAKING**: Split `JsonExtractionEngine.template_generator` into `JsonExtractionEngine.system_template_generator` and `JsonExtractionEngine.user_template_generator`. - **BREAKING**: Split `CsvExtractionEngine.template_generator` into `CsvExtractionEngine.system_template_generator` and `CsvExtractionEngine.user_template_generator`. -- **BREAKING**: Changed `JsonExtractionEngine.template_schema` from a `run` argument to a class attribute. -- **BREAKING**: Changed `CsvExtractionEngine.column_names` from a `run` argument to a class attribute. +- **BREAKING**: Changed `JsonExtractionEngine.template_schema` from a `run` argument to a class attribute. +- **BREAKING**: Changed `CsvExtractionEngine.column_names` from a `run` argument to a class attribute. - **BREAKING**: Removed `JsonExtractionTask`, and `CsvExtractionTask` use `ExtractionTask` instead. - **BREAKING**: Removed `TaskMemoryClient`, use `QueryClient`, `ExtractionTool`, or `PromptSummaryTool` instead. - **BREAKING**: `BaseTask.add_parent/child` now take a `BaseTask` instead of `str | BaseTask`. @@ -224,6 +323,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `Chat` now sets the `griptape` logger level to `logging.ERROR`, suppressing all logs except for errors. ### Fixed + - `JsonExtractionEngine` failing to parse json when the LLM outputs more than just the json. - Exception when adding `ErrorArtifact`'s to the Prompt Stack. - Concurrency bug in `BaseVectorStoreDriver.upsert_text_artifacts`. @@ -231,23 +331,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Issue with native Tool calling and streaming with `GooglePromptDriver`. - Description not being used properly in `StructureRunTool`. -## [0.29.2] - 2024-08-16 +## \[0.29.2\] - 2024-08-16 ### Fixed + - `Workflow` threads not being properly cleaned up after completion. - Crash when `ToolAction`s were missing output due to an `ActionsSubtask` exception. -## [0.29.1] - 2024-08-02 +## \[0.29.1\] - 2024-08-02 ### Changed + - Remove `BaseTextArtifact`, revert `CsvRowArtifact` to subclass `TextArtifact`. ### Fixed + - Missing extra for `drivers-text-to-speech-elevenlabs`. -## [0.29.0] - 2024-07-30 +## \[0.29.0\] - 2024-07-30 ### Added + - Native function calling support to `OpenAiChatPromptDriver`, `AzureOpenAiChatPromptDriver`, `AnthropicPromptDriver`, `AmazonBedrockPromptDriver`, `GooglePromptDriver`, `OllamaPromptDriver`, and `CoherePromptDriver`. - `OllamaEmbeddingDriver` for generating embeddings with Ollama. - `GriptapeCloudKnowledgeBaseVectorStoreDriver` to query Griptape Cloud Knowledge Bases. @@ -269,6 +373,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Optional `params` field to `WebSearch`'s `search` schema that the LLM can be steered into using. ### Changed + - **BREAKING**: `BaseVectorStoreDriver.upsert_text_artifacts` optional arguments are now keyword-only arguments. - **BREAKING**: `BaseVectorStoreDriver.upsert_text_artifact` optional arguments are now keyword-only arguments. - **BREAKING**: `BaseVectorStoreDriver.upsert_text` optional arguments are now keyword-only arguments. @@ -296,23 +401,29 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `TextArtifact` now inherits from `BaseTextArtifact`. ### Fixed + - Parameter `count` for `QdrantVectorStoreDriver.query` now optional as per documentation. - Path issues on Windows with `LocalFileManagerDriver` and `AmazonS3FileManagerDriver`. -## [0.28.2] - 2024-07-12 +## \[0.28.2\] - 2024-07-12 + ### Fixed + - Conversation Memory being incorrectly inserted into the `PromptTask.prompt_stack` when no system content is present. -## [0.28.1] - 2024-07-10 +## \[0.28.1\] - 2024-07-10 ### Fixed + - Sending empty system content in `PromptTask`. - Throttling issues with `DuckDuckGoWebSearchDriver`. -## [0.28.0] - 2024-07-09 +## \[0.28.0\] - 2024-07-09 + ### Added + - `RagEngine` is an abstraction for implementing modular RAG pipelines. - - `RagContext` is a container object for passing around RAG context. + - `RagContext` is a container object for passing around RAG context. - RAG stages: - `QueryRagStage` for parsing and expanding queries. - `RetrievalRagStage` for retrieving content. @@ -353,7 +464,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `ImageMessageContent` for storing image content in a `Message`. - Support for adding `TextArtifact`s, `ImageArtifact`s, and `ListArtifact`s to `PromptStack`. - Support for image inputs to `OpenAiChatPromptDriver`, `AzureOpenAiChatPromptDriver`, `AmazonBedrockPromptDriver`, `AnthropicPromptDriver`, and `GooglePromptDriver`. -- Input/output token usage metrics to all Prompt Drivers. +- Input/output token usage metrics to all Prompt Drivers. - `FinishPromptEvent.input_token_count` and `FinishPromptEvent.output_token_count`. - Support for storing Artifacts as inputs/outputs in Conversation Memory Runs. - `Agent.input` for passing Artifacts as input. @@ -362,6 +473,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `Reference` for supporting artifact citations in loaders and RAG engine modules. ### Changed + - **BREAKING**: Moved/renamed `griptape.utils.PromptStack` to `griptape.common.PromptStack`. - **BREAKING**: Renamed `PromptStack.inputs` to `PromptStack.messages`. - **BREAKING**: Moved `PromptStack.USER_ROLE`, `PromptStack.ASSISTANT_ROLE`, and `PromptStack.SYSTEM_ROLE` to `Message`. @@ -385,28 +497,33 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Default Prompt Driver model in `GoogleStructureConfig` to `gemini-1.5-pro`. ### Fixed + - `CoherePromptDriver` to properly handle empty history. -- `StructureVisualizer.to_url()` by wrapping task IDs in single quotes. +- `StructureVisualizer.to_url()` by wrapping task IDs in single quotes. -## [0.27.2] - 2024-06-27 +## \[0.27.2\] - 2024-06-27 ### Fixed + - Avoid adding duplicate Tokenizer stop sequences in a `ToolkitTask`. - Fixed token count calculation in `VectorQueryEngine`. -## [0.27.1] - 2024-06-20 +## \[0.27.1\] - 2024-06-20 ### Added + - Support for Claude 3.5 Sonnet in `AnthropicPromptDriver` and `AmazonBedrockPromptDriver`. ### Changed + - Base Tool schema so that `input` is optional when no Tool Activity schema is set. -- Tool Task system prompt for better results with lower-end models. +- Tool Task system prompt for better results with lower-end models. - Default Prompt Driver model to Claude 3.5 Sonnet in `AnthropicStructureConfig` and `AmazonBedrockStructureConfig.` -## [0.27.0] - 2024-06-19 +## \[0.27.0\] - 2024-06-19 ### Added + - `BaseTask.add_child()` to add a child task to a parent task. - `BaseTask.add_children()` to add multiple child tasks to a parent task. - `BaseTask.add_parent()` to add a parent task to a child task. @@ -419,13 +536,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `AmazonSageMakerJumpstartEmbeddingDriver.custom_attributes` for setting custom attributes when invoking an endpoint. - `ToolkitTask.response_stop_sequence` for overriding the default Chain of Thought stop sequence. - `griptape.utils.StructureVisualizer` for visualizing Workflow structures with Mermaid.js -- `BaseTask.parents_outputs` to get the textual output of all parent tasks. +- `BaseTask.parents_outputs` to get the textual output of all parent tasks. - `BaseTask.parents_output_text` to get a concatenated string of all parent tasks' outputs. - `parents_output_text` to Workflow context. - `OllamaPromptModelDriver` for using models with Ollama. - Parameter `output` on `Structure` as a convenience for `output_task.output` ### Changed + - **BREAKING**: `Workflow` no longer modifies task relationships when adding tasks via `tasks` init param, `add_tasks()` or `add_task()`. Previously, adding a task would automatically add the previously added task as its parent. Existing code that relies on this behavior will need to be updated to explicitly add parent/child relationships using the API offered by `BaseTask`. - **BREAKING**: Removed `AmazonBedrockPromptDriver.prompt_model_driver` as it is no longer needed with the `AmazonBedrockPromptDriver` Converse API implementation. - **BREAKING**: Removed `BedrockClaudePromptModelDriver`. @@ -463,6 +581,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Updated template `templates/tasks/tool_task/system.j2`. ### Fixed + - `Workflow.insert_task()` no longer inserts duplicate tasks when given multiple parent tasks. - Performance issue in `OpenAiChatPromptDriver` when extracting unused rate-limiting headers. - Streaming not working when using deprecated `Structure.stream` field. @@ -474,11 +593,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `TextArtifacts` contained in `ListArtifact` returned by `WebSearch.search` to properly formatted stringified JSON. - Structure run args not being set immediately. - Input and output logging in BaseAudioInputTasks and BaseAudioGenerationTasks -- Validation of `max_tokens` < 0 on `BaseChunker` +- Validation of `max_tokens` \< 0 on `BaseChunker` -## [0.26.0] - 2024-06-04 +## \[0.26.0\] - 2024-06-04 ### Added + - `AzureOpenAiStructureConfig` for providing Structures with all Azure OpenAI Driver configuration. - `AzureOpenAiVisionImageQueryDriver` to support queries on images using Azure's OpenAI Vision models. - `AudioLoader` for loading audio content into an `AudioArtifact`. @@ -488,10 +608,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `PusherEventListenerDriver` to enable sending of framework events over a Pusher WebSocket. ### Changed + - **BREAKING**: Updated OpenAI-based image query drivers to remove Vision from the name. - **BREAKING**: `off_prompt` now defaults to `False` on all Tools, making Task Memory something that must be explicitly opted into. -- **BREAKING**: Removed `StructureConfig.global_drivers`. Pass Drivers directly to the Structure Config instead. -- **BREAKING**: Removed `StructureConfig.task_memory` in favor of configuring directly on the Structure. +- **BREAKING**: Removed `StructureConfig.global_drivers`. Pass Drivers directly to the Structure Config instead. +- **BREAKING**: Removed `StructureConfig.task_memory` in favor of configuring directly on the Structure. - **BREAKING**: Updated OpenAI-based image query drivers to remove Vision from the name. - **BREAKING**: `off_prompt` now defaults to `False` on all Tools, making Task Memory something that must be explicitly opted into. - **BREAKING**: `AmazonSageMakerPromptDriver.model` parameter, which gets passed to `SageMakerRuntime.Client.invoke_endpoint` as `EndpointName`, is now renamed to `AmazonSageMakerPromptDriver.endpoint`. @@ -504,27 +625,32 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Error message to be more helpful when importing optional dependencies. ### Fixed + - Extra fields being excluded when using `SerializableMixin.from_dict`. -- Validation of `max_tokens` < 0 on `BaseChunker` +- Validation of `max_tokens` \< 0 on `BaseChunker` -## [0.25.1] - 2024-05-15 +## \[0.25.1\] - 2024-05-15 ### Fixed + - Honor `namespace` in `RedisVectorStoreDriver.query()`. - Correctly set the `meta`, `score`, and `vector` fields of query result returned from `RedisVectorStoreDriver.query()`. - Standardize behavior between omitted and empty actions list when initializing `ActionsSubtask`. ### Added + - Optional event batching on Event Listener Drivers. - `id` field to all events. ### Changed + - Default behavior of Event Listener Drivers to batch events. - Default behavior of OpenAiStructureConfig to utilize `gpt-4o` for prompt_driver. -## [0.25.0] - 2024-05-06 +## \[0.25.0\] - 2024-05-06 ### Added + - `list_files_from_disk` activity to `FileManager` Tool. - Support for Drivers in `EventListener`. - `AmazonSqsEventListenerDriver` for sending events to an Amazon SQS queue. @@ -547,6 +673,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `LocalStructureRunDriver` for running Structures in the same run-time environment as the code that is running the Structure. ### Changed + - **BREAKING**: Secret fields (ex: api_key) removed from serialized Drivers. - **BREAKING**: Remove `FileLoader`. - **BREAKING**: `CsvLoader` no longer accepts `str` file paths as a source. It will now accept the content of the CSV file as a `str` or `bytes` object. @@ -561,16 +688,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Updated `EventListener.handler`'s behavior so that the return value will be passed to the `EventListenerDriver.try_publish_event_payload`'s `event_payload` parameter. ### Fixed + - Type hint for parameter `azure_ad_token_provider` on Azure OpenAI drivers to `Optional[Callable[[], str]]`. - Missing parameters `azure_ad_token` and `azure_ad_token_provider` on the default client for `AzureOpenAiCompletionPromptDriver`. -## [0.24.2] - 2024-04-04 +## \[0.24.2\] - 2024-04-04 - Fixed FileManager.load_files_from_disk schema. -## [0.24.1] - 2024-03-28 +## \[0.24.1\] - 2024-03-28 -### Fixed +### Fixed - Fixed boto3 type-checking stub dependency. @@ -578,14 +706,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Use `schema` instead of `jsonschema` for JSON validation. -## [0.24.0] - 2024-03-27 +## \[0.24.0\] - 2024-03-27 ### Added + - Every subtask in `ToolkitTask` can now execute multiple actions in parallel. - Added `BaseActionSubtaskEvent.subtask_actions`. - Support for `text-embedding-3-small` and `text-embedding-3-large` models. -- `GooglePromptDriver` and `GoogleTokenizer` for use with `gemini-pro`. -- `GoogleEmbeddingDriver` for use with `embedding-001`. +- `GooglePromptDriver` and `GoogleTokenizer` for use with `gemini-pro`. +- `GoogleEmbeddingDriver` for use with `embedding-001`. - `GoogleStructureConfig` for providing Structures with Google Prompt and Embedding Driver configuration. - Support for `claude-3-opus`, `claude-3-sonnet`, and `claude-3-haiku` in `AnthropicPromptDriver`. - Support for `anthropic.claude-3-sonnet-20240229-v1:0` and `anthropic.claude-3-haiku-20240307-v1:0` in `BedrockClaudePromptModelDriver`. @@ -595,14 +724,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `BaseWebScraperDriver` allowing multiple web scraping implementations. - `TrafilaturaWebScraperDriver` for scraping text from web pages using trafilatura. - `MarkdownifyWebScraperDriver` for scraping text from web pages using playwright and converting to markdown using markdownify. -- `VoyageAiEmbeddingDriver` for use with VoyageAi's embedding models. +- `VoyageAiEmbeddingDriver` for use with VoyageAi's embedding models. - `AnthropicStructureConfig` for providing Structures with Anthropic Prompt and VoyageAi Embedding Driver configuration. - `QdrantVectorStoreDriver` to integrate with Qdrant vector databases. ### Fixed + - Improved system prompt in `ToolTask` to support more use cases. ### Changed + - **BREAKING**: `ActionSubtask` was renamed to `ActionsSubtask`. - **BREAKING**: Removed `subtask_action_name`, `subtask_action_path`, and `subtask_action_input` in `BaseActionSubtaskEvent`. - **BREAKING**: `OpenAiVisionImageQueryDriver` field `model` no longer defaults to `gpt-4-vision-preview` and must be specified @@ -614,30 +745,33 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `OpenAiVisionImageQueryDriver` now has a required field `max_tokens` that defaults to 256 - `GriptapeCloudStructureRunDriver` now outputs a `BaseArtifact` instead of a `TextArtifact` -## [0.23.2] - 2024-03-15 +## \[0.23.2\] - 2024-03-15 ### Fixed + - Deprecation warnings not displaying for `Structure.prompt_driver`, `Structure.embedding_driver`, and `Structure.stream`. - `DummyException` error message not fully displaying. - `StructureConfig.task_memory` not defaulting to using `StructureConfig.global_drivers` by default. -## [0.23.1] - 2024-03-07 +## \[0.23.1\] - 2024-03-07 ### Fixed -- Action Subtask incorrectly raising an exception for actions without an input. -- Incorrect `GriptapeCloudKnowledgeBaseClient`'s API URLs. + +- Action Subtask incorrectly raising an exception for actions without an input. +- Incorrect `GriptapeCloudKnowledgeBaseClient`'s API URLs. - Issue with Tool Task system prompt causing the LLM to generate an invalid action. -## [0.23.0] - 2024-02-26 +## \[0.23.0\] - 2024-02-26 + +### Added -### Added - Image-to-image generation support for OpenAi Dall-E 2 model. - Image tools support loading artifacts from memory. - `AzureMongoDbVectorStoreDriver` for using CosmosDB with MongoDB vCore API. - `vector_path` field on `MongoDbAtlasVectorStoreDriver`. - `LeonardoImageGenerationDriver` supports image to image generation. -- `OpenAiStructureConfig` for providing Structures with all OpenAi Driver configuration. -- `AmazonBedrockStructureConfig` for providing Structures with all Amazon Bedrock Driver configuration. +- `OpenAiStructureConfig` for providing Structures with all OpenAi Driver configuration. +- `AmazonBedrockStructureConfig` for providing Structures with all Amazon Bedrock Driver configuration. - `StructureConfig` for building your own Structure configuration. - `JsonExtractionTask` for convenience over using `ExtractionTask` with a `JsonExtractionEngine`. - `CsvExtractionTask` for convenience over using `ExtractionTask` with a `CsvExtractionEngine`. @@ -645,11 +779,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `ImageQueryTool` allowing an Agent to make queries on images on disk or in Task Memory. - `ImageQueryTask` and `ImageQueryEngine`. -### Fixed +### Fixed + - `BedrockStableDiffusionImageGenerationModelDriver` request parameters for SDXLv1 (`stability.stable-diffusion-xl-v1`). - `BedrockStableDiffusionImageGenerationModelDriver` correctly handles the CONTENT_FILTERED response case. ### Changed + - **BREAKING**: Make `index_name` on `MongoDbAtlasVectorStoreDriver` a required field. - **BREAKING**: Remove `create_index()` from `MarqoVectorStoreDriver`, `OpenSearchVectorStoreDriver`, `PineconeVectorStoreDriver`, `RedisVectorStoreDriver`. - **BREAKING**: `ImageLoader().load()` now accepts image bytes instead of a file path. @@ -664,60 +800,80 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `InpaintingImageGenerationTask.image_generation_engine` now defaults to an `InpaintingImageGenerationEngine` with an Image Generation Driver default of `Structure.config.global_drivers.image_generation_driver`. - `OutpaintingImageGenerationTask.image_generation_engine` now defaults to an `OutpaintingImageGenerationEngine` with an Image Generation Driver default of `Structure.config.global_drivers.image_generation_driver`. -## [0.22.3] - 2024-01-22 +## \[0.22.3\] - 2024-01-22 ### Fixed + - `ToolkitTask`'s user subtask prompt occasionally causing the Task to end prematurely. -## [0.22.2] - 2024-01-18 +## \[0.22.2\] - 2024-01-18 ### Fixed + - `ToolkitTask`'s user subtask prompt occasionally causing a loop with Chain of Thought. ### Security -- Updated stale dependencies [CVE-2023-50447, CVE-2024-22195, and CVE-2023-36464] -## [0.22.1] - 2024-01-12 +- Updated stale dependencies \[CVE-2023-50447, CVE-2024-22195, and CVE-2023-36464\] + +## \[0.22.1\] - 2024-01-12 ### Fixed + - Action Subtasks incorrectly outputting the Task input after failing to follow the ReAct prompt. -## [0.22.0] - 2024-01-11 +## \[0.22.0\] - 2024-01-11 ### Added -- `PromptImageGenerationEngine` for generating images from text prompts. + +- `PromptImageGenerationEngine` for generating images from text prompts. + - `VariationImageGenerationEngine` for generating variations of an input image according to a text prompt. -- `InpaintingImageGenerationEngine` for modifying an input image according to a text prompt within the bounds of a mask defined by a mask image. + +- `InpaintingImageGenerationEngine` for modifying an input image according to a text prompt within the bounds of a mask defined by a mask image. + - `OutpaintingImageGenerationEngine` for modifying an input image according to a text prompt outside the bounds of a mask defined by a mask image. - `PromptImageGenerationClient` for enabling an LLM to use the `PromptImageGenerationEngine`. + - `VariationImageGenerationClient` for enabling an LLM to use the `VariationImageGenerationEngine`. + - `InpaintingImageGenerationClient` for enabling an LLM to use the `InpaintingImageGenerationEngine`. + - `OutpaintingImageGenerationClient` for enabling an LLM to use the `OutpaintingImageGenerationEngine`. - `OpenAiImageGenerationDriver` for use with OpenAI's image generation models. + - `LeonardoImageGenerationDriver` for use with Leonoaro AI's image generation models. + - `AmazonBedrockImageGenerationDriver` for use with Amazon Bedrock's image generation models; requires a Image Generation Model Driver. + - `BedrockTitanImageGenerationModelDriver` for use with Amazon Bedrock's Titan image generation. - `ImageArtifact` for storing image data; used heavily by the image Engines, Tasks, and Drivers. + - `ImageLoader` for loading images files into `ImageArtifact`s. - Support for all Tokenizers in `OpenAiChatPromptDriver`, enabling OpenAI drop-in clients such as Together AI. + - `AmazonSageMakerJumpstartEmbeddingDriver` for using Amazon SageMaker to generate embeddings. Thanks @KaushikIyer16! + - Claude 2.1 support in `AnthropicPromptDriver` and `AmazonBedrockPromptDriver` via `BedrockClaudePromptModelDriver`. + - `CodeExecutionTask` for executing code as a Task without the need for an LLM. -- `BedrockLlamaPromptModelDriver` for using Llama models on Amazon Bedrock. +- `BedrockLlamaPromptModelDriver` for using Llama models on Amazon Bedrock. ### Fixed -- `MongoDbAtlasVectorStore` namespace not being used properly when querying. + +- `MongoDbAtlasVectorStore` namespace not being used properly when querying. - Miscellaneous type errors throughout the codebase. - Remove unused section from `ToolTask` system prompt template. - Structure execution args being cleared after run, preventing inspection of the Structure's `input_task`'s `input`. - Unhandled `SqlClient` exception. Thanks @michal-repo! ### Changed + - **BREAKING**: Rename `input_template` field to `input` in Tasks that take a text input. - **BREAKING**: Rename `BedrockTitanEmbeddingDriver` to `AmazonBedrockTitanEmbeddingDriver`. - **BREAKING**: Rename `AmazonBedrockStableDiffusionImageGenerationModelDriver` to `BedrockStableDiffusionImageGenerationModelDriver`. diff --git a/MIGRATION.md b/MIGRATION.md index 9dd5a6fef..1764a7787 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,6 +1,205 @@ # Migration Guide This document provides instructions for migrating your codebase to accommodate breaking changes introduced in new versions of Griptape. + +## 0.33.X to 0.34.X + +### `AnthropicDriversConfig` Embedding Driver + +`AnthropicDriversConfig` no longer bundles `VoyageAiEmbeddingDriver`. If you rely on embeddings when using Anthropic, you must specify an Embedding Driver yourself. + +#### Before + +```python +from griptape.configs import Defaults +from griptape.configs.drivers import AnthropicDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = AnthropicDriversConfig() + +agent = Agent() +``` + +#### After + +```python +from griptape.configs import Defaults +from griptape.configs.drivers import AnthropicDriversConfig +from griptape.drivers import VoyageAiEmbeddingDriver, LocalVectorStoreDriver + +Defaults.drivers_config = AnthropicDriversConfig( + embedding_driver=VoyageAiEmbeddingDriver(), + vector_store_driver=LocalVectorStoreDriver( + embedding_driver=VoyageAiEmbeddingDriver() + ) +) +``` + +### Renamed Callables + +Many callables have been renamed for consistency. Update your code to use the new names using the [CHANGELOG.md](https://github.com/griptape-ai/griptape/pull/1275/files#diff-06572a96a58dc510037d5efa622f9bec8519bc1beab13c9f251e97e657a9d4ed) as the source of truth. + + +### Removed `CompletionChunkEvent` + +`CompletionChunkEvent` has been removed. There is now `BaseChunkEvent` with children `TextChunkEvent` and `ActionChunkEvent`. `BaseChunkEvent` can replace `completion_chunk_event.token` by doing `str(base_chunk_event)`. + +#### Before + +```python +def handler_fn_stream(event: CompletionChunkEvent) -> None: + print(f"CompletionChunkEvent: {event.to_json()}") + +def handler_fn_stream_text(event: CompletionChunkEvent) -> None: + # This prints out Tool actions with no easy way + # to filter them out + print(event.token, end="", flush=True) + +EventListener(handler=handler_fn_stream, event_types=[CompletionChunkEvent]) +EventListener(handler=handler_fn_stream_text, event_types=[CompletionChunkEvent]) +``` + +#### After + +```python +def handler_fn_stream(event: BaseChunkEvent) -> None: + print(str(e), end="", flush=True) + # print out each child event type + if isinstance(event, TextChunkEvent): + print(f"TextChunkEvent: {event.to_json()}") + if isinstance(event, ActionChunkEvent): + print(f"ActionChunkEvent: {event.to_json()}") + + +def handler_fn_stream_text(event: TextChunkEvent) -> None: + # This will only be text coming from the + # prompt driver, not Tool actions + print(event.token, end="", flush=True) + +EventListener(handler=handler_fn_stream, event_types=[BaseChunkEvent]) +EventListener(handler=handler_fn_stream_text, event_types=[TextChunkEvent]) +``` + +### `EventListener.handler` behavior, `driver` parameter rename + +Returning `None` from the `handler` function now causes the event to not be published to the `EventListenerDriver`. +The `handler` function can now return a `BaseEvent` object. + +#### Before + +```python +def handler_fn_return_none(event: BaseEvent) -> Optional[dict]: + # This causes the `BaseEvent` object to be passed to the EventListenerDriver + return None + +def handler_fn_return_dict(event: BaseEvent) -> Optional[dict]: + # This causes the returned dictionary to be passed to the EventListenerDriver + return { + "key": "value + } + +EventListener(handler=handler_fn_return_none, driver=driver) +EventListener(handler=handler_fn_return_dict, driver=driver) +``` + +#### After + +```python +def handler_fn_return_none(event: BaseEvent) -> Optional[dict | BaseEvent]: + # This causes the `BaseEvent` object to NOT get passed to the EventListenerDriver + return None + +def handler_fn_return_dict(event: BaseEvent) -> Optional[dict | BaseEvent]: + # This causes the returned dictionary to be passed to the EventListenerDriver + return { + "key": "value + } + +def handler_fn_return_base_event(event: BaseEvent) -> Optional[dict | BaseEvent]: + # This causes the returned `BaseEvent` object to be passed to the EventListenerDriver + return ChildClassOfBaseEvent() + +# `driver` has been renamed to `event_listener_driver` +EventListener(handler=handler_fn_return_none, event_listener_driver=driver) +EventListener(handler=handler_fn_return_dict, event_listener_driver=driver) +EventListener(handler=handler_fn_return_base_event, event_listener_driver=driver) +``` + +### Removed `BaseEventListener.publish_event` `flush` argument. + +`BaseEventListenerDriver.publish_event` no longer takes a `flush` argument. If you need to flush the event, call `BaseEventListenerDriver.flush_events` directly. + +#### Before + +```python +event_listener_driver.publish_event(event, flush=True) +``` + +#### After + +```python +event_listener_driver.publish_event(event) +event_listener_driver.flush_events() +``` + +### Moved `observable` decorator location. + +The `observable` decorator has been moved to `griptape.common.decorators`. Update your imports accordingly. + + +#### Before + +```python +from griptape.common.observable import observable +``` + +#### After + +```python +from griptape.common.decorators import observable +``` + +### Removed `HuggingFacePipelinePromptDriver.params` + +`HuggingFacePipelinePromptDriver.params` has been removed. Use `HuggingFacePipelinePromptDriver.extra_params` instead. + +#### Before + +```python +driver = HuggingFacePipelinePromptDriver( + params={"max_length": 50} +) +``` + +#### After + +```python +driver = HuggingFacePipelinePromptDriver( + extra_params={"max_length": 50} +) +``` + +### Renamed `execute` to `run` in several places + +`execute` has been renamed to `run` in several places. Update your code accordingly. + + +#### Before + +```python +task = PromptTask() +if task.can_execute(): + task.execute() +``` + +#### After + +```python +task = PromptTask() +if task.can_run(): + task.run() +``` + ## 0.32.X to 0.33.X ### Removed `DataframeLoader` @@ -14,6 +213,7 @@ DataframeLoader().load(df) ``` #### After + ```python # Convert the dataframe to csv bytes and parse it CsvLoader().parse(bytes(df.to_csv(line_terminator='\r\n', index=False), encoding='utf-8')) @@ -24,12 +224,14 @@ CsvLoader().parse(bytes(df.to_csv(line_terminator='\r\n', index=False), encoding ### `TextLoader`, `PdfLoader`, `ImageLoader`, and `AudioLoader` now take a `str | PathLike` instead of `bytes`. #### Before + ```python PdfLoader().load(Path("attention.pdf").read_bytes()) PdfLoader().load_collection([Path("attention.pdf").read_bytes(), Path("CoT.pdf").read_bytes()]) ``` #### After + ```python PdfLoader().load("attention.pdf") PdfLoader().load_collection([Path("attention.pdf"), "CoT.pdf"]) @@ -46,7 +248,7 @@ You can now pass the file path directly to the Loader. PdfLoader().load(load_file("attention.pdf").read_bytes()) PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values())) ``` - + ```python PdfLoader().load("attention.pdf") PdfLoader().load_collection(["attention.pdf", "CoT.pdf"]) @@ -68,6 +270,7 @@ vector_store.upsert_text_artifacts( ``` #### After + ```python artifact = PdfLoader().load("attention.pdf") chunks = Chunker().chunk(artifact) @@ -78,27 +281,105 @@ vector_store.upsert_text_artifacts( ) ``` - ### Removed `torch` extra from `transformers` dependency The `torch` extra has been removed from the `transformers` dependency. If you require `torch`, install it separately. #### Before + ```bash pip install griptape[drivers-prompt-huggingface-hub] ``` #### After + ```bash pip install griptape[drivers-prompt-huggingface-hub] pip install torch ``` -### `CsvLoader`, `DataframeLoader`, and `SqlLoader` return types +### Removed `MediaArtifact` + +`MediaArtifact` has been removed. Use `ImageArtifact` or `AudioArtifact` instead. + +#### Before + +```python +image_media = MediaArtifact( + b"image_data", + media_type="image", + format="jpeg" +) + +audio_media = MediaArtifact( + b"audio_data", + media_type="audio", + format="wav" +) +``` + +#### After + +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg" +) + +audio_artifact = AudioArtifact( + b"audio_data", + format="wav" +) +``` + +### `ImageArtifact.format` is now required + +`ImageArtifact.format` is now a required parameter. Update any code that does not provide a `format` parameter. + +#### Before + +```python +image_artifact = ImageArtifact( + b"image_data" +) +``` + +#### After + +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg" +) +``` + +### Removed `CsvRowArtifact` + +`CsvRowArtifact` has been removed. Use `TextArtifact` instead. + +#### Before + +```python +artifact = CsvRowArtifact({"name": "John", "age": 30}) +print(artifact.value) # {"name": "John", "age": 30} +print(type(artifact.value)) # +``` + +#### After + +```python +artifact = TextArtifact("name: John\nage: 30") +print(artifact.value) # name: John\nage: 30 +print(type(artifact.value)) # +``` + +If you require storing a dictionary as an Artifact, you can use `GenericArtifact` instead. + +### `CsvLoader`, `DataframeLoader`, and `SqlLoader` return types `CsvLoader`, `DataframeLoader`, and `SqlLoader` now return a `list[TextArtifact]` instead of `list[CsvRowArtifact]`. -If you require a dictionary, set a custom `formatter_fn` and then parse the text to a dictionary. +If you require a dictionary, set a custom `formatter_fn` and then parse the text to a dictionary. #### Before @@ -110,6 +391,7 @@ print(type(results[0].value)) # ``` #### After + ```python results = CsvLoader().load(Path("people.csv").read_text()) @@ -125,10 +407,40 @@ dict_results = [json.loads(result.value) for result in results] print(dict_results[0]) # {"name": "John", "age": 30} print(type(dict_results[0])) # ``` - + +### Moved `ImageArtifact.prompt` and `ImageArtifact.model` to `ImageArtifact.meta` + +`ImageArtifact.prompt` and `ImageArtifact.model` have been moved to `ImageArtifact.meta`. + +#### Before + +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg", + prompt="Generate an image of a cat", + model="DALL-E" +) + +print(image_artifact.prompt, image_artifact.model) # Generate an image of a cat, DALL-E +``` + +#### After + +```python +image_artifact = ImageArtifact( + b"image_data", + format="jpeg", + meta={"prompt": "Generate an image of a cat", "model": "DALL-E"} +) + +print(image_artifact.meta["prompt"], image_artifact.meta["model"]) # Generate an image of a cat, DALL-E +``` + Renamed `GriptapeCloudKnowledgeBaseVectorStoreDriver` to `GriptapeCloudVectorStoreDriver`. #### Before + ```python from griptape.drivers.griptape_cloud_knowledge_base_vector_store_driver import GriptapeCloudKnowledgeBaseVectorStoreDriver @@ -136,6 +448,7 @@ driver = GriptapeCloudKnowledgeBaseVectorStoreDriver(...) ``` #### After + ```python from griptape.drivers.griptape_cloud_vector_store_driver import GriptapeCloudVectorStoreDriver @@ -147,6 +460,7 @@ driver = GriptapeCloudVectorStoreDriver(...) `OpenAiChatPromptDriver.response_format` is now structured as the `openai` SDK accepts it. #### Before + ```python driver = OpenAiChatPromptDriver( response_format="json_object" @@ -154,6 +468,7 @@ driver = OpenAiChatPromptDriver( ``` #### After + ```python driver = OpenAiChatPromptDriver( response_format={"type": "json_object"} @@ -162,6 +477,85 @@ driver = OpenAiChatPromptDriver( ## 0.31.X to 0.32.X +### Removed `DataframeLoader` + +`DataframeLoader` has been removed. Use `CsvLoader.parse` or build `TextArtifact`s from the dataframe instead. + +#### Before + +```python +DataframeLoader().load(df) +``` + +#### After + +```python +# Convert the dataframe to csv bytes and parse it +CsvLoader().parse(bytes(df.to_csv(line_terminator='\r\n', index=False), encoding='utf-8')) +# Or build TextArtifacts from the dataframe +[TextArtifact(row) for row in source.to_dict(orient="records")] +``` + +### `TextLoader`, `PdfLoader`, `ImageLoader`, and `AudioLoader` now take a `str | PathLike` instead of `bytes`. + +#### Before + +```python +PdfLoader().load(Path("attention.pdf").read_bytes()) +PdfLoader().load_collection([Path("attention.pdf").read_bytes(), Path("CoT.pdf").read_bytes()]) +``` + +#### After + +```python +PdfLoader().load("attention.pdf") +PdfLoader().load_collection([Path("attention.pdf"), "CoT.pdf"]) +``` + +### Removed `fileutils.load_file` and `fileutils.load_files` + +`griptape.utils.file_utils.load_file` and `griptape.utils.file_utils.load_files` have been removed. +You can now pass the file path directly to the Loader. + +#### Before + +```python +PdfLoader().load(load_file("attention.pdf").read_bytes()) +PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values())) +``` + +```python +PdfLoader().load("attention.pdf") +PdfLoader().load_collection(["attention.pdf", "CoT.pdf"]) +``` + +### Loaders no longer chunk data + +Loaders no longer chunk the data after loading it. If you need to chunk the data, use a [Chunker](https://docs.griptape.ai/stable/griptape-framework/data/chunkers/) after loading the data. + +#### Before + +```python +chunks = PdfLoader().load("attention.pdf") +vector_store.upsert_text_artifacts( + { + "griptape": chunks, + } +) +``` + +#### After + +```python +artifact = PdfLoader().load("attention.pdf") +chunks = Chunker().chunk(artifact) +vector_store.upsert_text_artifacts( + { + "griptape": chunks, + } +) +``` + ### Removed `MediaArtifact` `MediaArtifact` has been removed. Use `ImageArtifact` or `AudioArtifact` instead. @@ -180,9 +574,10 @@ audio_media = MediaArtifact( media_type="audio", format="wav" ) -``` +``` #### After + ```python image_artifact = ImageArtifact( b"image_data", @@ -208,6 +603,7 @@ image_artifact = ImageArtifact( ``` #### After + ```python image_artifact = ImageArtifact( b"image_data", @@ -228,6 +624,7 @@ print(type(artifact.value)) # ``` #### After + ```python artifact = TextArtifact("name: John\nage: 30") print(artifact.value) # name: John\nage: 30 @@ -236,11 +633,11 @@ print(type(artifact.value)) # If you require storing a dictionary as an Artifact, you can use `GenericArtifact` instead. -### `CsvLoader`, `DataframeLoader`, and `SqlLoader` return types +### `CsvLoader`, `DataframeLoader`, and `SqlLoader` return types `CsvLoader`, `DataframeLoader`, and `SqlLoader` now return a `list[TextArtifact]` instead of `list[CsvRowArtifact]`. -If you require a dictionary, set a custom `formatter_fn` and then parse the text to a dictionary. +If you require a dictionary, set a custom `formatter_fn` and then parse the text to a dictionary. #### Before @@ -252,9 +649,11 @@ print(type(results[0].value)) # ``` #### After + ```python results = CsvLoader().load(Path("people.csv").read_text()) +print(type(results)) # print(results[0].value) # name: John\nAge: 30 print(type(results[0].value)) # @@ -267,7 +666,7 @@ dict_results = [json.loads(result.value) for result in results] print(dict_results[0]) # {"name": "John", "age": 30} print(type(dict_results[0])) # ``` - + ### Moved `ImageArtifact.prompt` and `ImageArtifact.model` to `ImageArtifact.meta` `ImageArtifact.prompt` and `ImageArtifact.model` have been moved to `ImageArtifact.meta`. @@ -286,6 +685,7 @@ print(image_artifact.prompt, image_artifact.model) # Generate an image of a cat, ``` #### After + ```python image_artifact = ImageArtifact( b"image_data", @@ -296,7 +696,6 @@ image_artifact = ImageArtifact( print(image_artifact.meta["prompt"], image_artifact.meta["model"]) # Generate an image of a cat, DALL-E ``` - ## 0.30.X to 0.31.X ### Exceptions Over `ErrorArtifact`s @@ -305,6 +704,7 @@ Drivers, Loaders, and Engines now raise exceptions rather than returning `ErrorA Update any logic that expects `ErrorArtifact` to handle exceptions instead. #### Before + ```python artifacts = WebLoader().load("https://www.griptape.ai") @@ -313,6 +713,7 @@ if isinstance(artifacts, ErrorArtifact): ``` #### After + ```python try: artifacts = WebLoader().load("https://www.griptape.ai") @@ -325,6 +726,7 @@ except Exception as e: `LocalConversationMemoryDriver.file_path` has been renamed to `persist_file` and is now `Optional[str]`. If `persist_file` is not passed as a parameter, nothing will be persisted and no errors will be raised. `LocalConversationMemoryDriver` is now the default driver in the global `Defaults` object. #### Before + ```python local_driver_with_file = LocalConversationMemoryDriver( file_path="my_file.json" @@ -337,6 +739,7 @@ assert local_driver.file_path == "griptape_memory.json" ``` #### After + ```python local_driver_with_file = LocalConversationMemoryDriver( persist_file="my_file.json" @@ -353,6 +756,7 @@ assert local_driver.persist_file is None `BaseConversationMemoryDriver.driver` has been renamed to `conversation_memory_driver`. Method signatures for `.store` and `.load` have been changed. #### Before + ```python memory_driver = LocalConversationMemoryDriver() @@ -366,6 +770,7 @@ memory_driver.store(conversation_memory) ``` #### After + ```python memory_driver = LocalConversationMemoryDriver() diff --git a/Makefile b/Makefile index 1db428b2c..a95d8b1f2 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,10 @@ test: test/unit test/integration test/unit: ## Run unit tests. @poetry run pytest -n auto tests/unit +.PHONY: test/unit/% +test/unit/%: ## Run specific unit tests. + @poetry run pytest -n auto tests/unit -k $* + .PHONY: test/unit/coverage test/unit/coverage: @poetry run pytest -n auto --cov=griptape tests/unit @@ -55,6 +59,7 @@ lint: ## Lint project. .PHONY: format format: ## Format project. @poetry run ruff format + @poetry run mdformat . .PHONY: check check: check/format check/lint check/types check/spell ## Run all checks. @@ -77,7 +82,7 @@ check/spell: .PHONY: docs docs: ## Build documentation. - @poetry run mkdocs build + @poetry run python -m mkdocs build --clean --strict .DEFAULT_GOAL := help .PHONY: help diff --git a/NOTICE b/NOTICE new file mode 100644 index 000000000..41360802f --- /dev/null +++ b/NOTICE @@ -0,0 +1,13 @@ +# NOTICE + +This project includes the `marshmallow_union` module, which is licensed under the MIT License. + +- **Author**: Adam Boche +- **Project**: [python-marshmallow-union](https://github.com/adamboche/python-marshmallow-union) +- **License**: MIT License + +This project includes the `marshmallow-oneofschema` module, which is licensed under the MIT License. + +- **Author**: Maxim Kulkin, Alex Rothberg, Steven Loria, and other contributors +- **Project**: [marshmallow-oneofschema](https://github.com/marshmallow-code/marshmallow-oneofschema) +- **License**: MIT License \ No newline at end of file diff --git a/README.md b/README.md index 95f6326dd..854dc281f 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ Griptape is a modular Python framework for building AI-powered applications that securely connect to your enterprise data and APIs. It offers developers the ability to maintain control and flexibility at every step. - ## 🛠️ Core Components ### 🏗️ Structures @@ -68,7 +67,7 @@ Engines wrap Drivers and provide use-case-specific functionality: Please refer to [Griptape Docs](https://docs.griptape.ai/) for: -- Getting started guides. +- Getting started guides. - Core concepts and design overviews. - Examples. - Contribution guidelines. @@ -103,6 +102,7 @@ agent.run("https://griptape.ai", "griptape.txt") ``` And here is the output: + ``` [08/12/24 14:48:15] INFO ToolkitTask c90d263ec69046e8b30323c131ae4ba0 Input: Load https://griptape.ai, summarize it, and store it in a file called griptape.txt. @@ -169,9 +169,9 @@ The important thing to note here is that no matter how big the webpage is it can In the above example, we set [off_prompt](https://docs.griptape.ai/stable/griptape-framework/structures/task-memory.md#off-prompt) to `True`, which means that the LLM can never see the data it manipulates, but can send it to other Tools. -> [!IMPORTANT] +> \[!IMPORTANT\]\ > This example uses Griptape's [ToolkitTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#toolkit-task), which requires a highly capable LLM to function correctly. By default, Griptape uses the [OpenAiChatPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#openai-chat); for another powerful LLM try swapping to the [AnthropicPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#anthropic)! -If you're using a less powerful LLM, consider using the [ToolTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#tool-task) instead, as the `ToolkitTask` might not work properly or at all. +> If you're using a less powerful LLM, consider using the [ToolTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#tool-task) instead, as the `ToolkitTask` might not work properly or at all. [Check out our docs](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/) to learn more about how to use Griptape with other LLM providers like Anthropic, Claude, Hugging Face, and Azure. @@ -193,9 +193,9 @@ We welcome and encourage pull requests. To streamline the process, please follow 1. **Existing Issues:** Please submit pull requests only for existing issues. If you want to work on new functionality or fix a bug that hasn't been addressed yet, please first submit an issue. This allows the Griptape team to internally process the request and provide a public response. -2. **Branch:** Submit all pull requests to the `dev` branch. This helps us manage changes and integrate them smoothly. +1. **Branch:** Submit all pull requests to the `dev` branch. This helps us manage changes and integrate them smoothly. -3. **Unit Tests:** Ensure that your pull request passes all existing unit tests. Additionally, if you are introducing new code, please include new unit tests to validate its functionality. +1. **Unit Tests:** Ensure that your pull request passes all existing unit tests. Additionally, if you are introducing new code, please include new unit tests to validate its functionality. Run `make test/unit` to execute the test suite locally. diff --git a/docs/contributing.md b/docs/contributing.md index 0342dcd81..f02a27108 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -12,20 +12,21 @@ We welcome and encourage pull requests. To streamline the process, please follow 1. **Existing Issues:** Please submit pull requests only for existing issues. If you want to add new documentation or fix a documentation issue that hasn't been addressed yet, please first submit an issue. This allows the Griptape team to internally process the request and provide a public response. -2. **Branch:** Submit all pull requests to the `dev` branch. This helps us manage changes and integrate them smoothly. +1. **Branch:** Submit all pull requests to the `dev` branch. This helps us manage changes and integrate them smoothly. ## Getting Started + Griptape docs are built using [MkDocs](https://squidfunk.github.io/mkdocs-material/getting-started/). Dependencies are managed using [Poetry](https://python-poetry.org/). To contribute to Griptape docs, install the `docs` extra with: -```poetry install --with docs``` +`poetry install --with docs` Then serve the documentation locally with: -```poetry run mkdocs serve``` +`poetry run mkdocs serve` -You should see something similar to the following: +You should see something similar to the following: ``` INFO - Building documentation... diff --git a/docs/examples/amazon-dynamodb-sessions.md b/docs/examples/amazon-dynamodb-sessions.md index d9a6e4bdd..949af985e 100644 --- a/docs/examples/amazon-dynamodb-sessions.md +++ b/docs/examples/amazon-dynamodb-sessions.md @@ -2,7 +2,7 @@ Griptape provides [Conversation Memory](../griptape-framework/structures/convers If you provide it with a suitable Driver, the memory of the previous conversation can be preserved between run of a Structure, giving it additional context for how to respond. While we can use the [LocalConversationMemoryDriver](../griptape-framework/drivers/conversation-memory-drivers.md#local) to store the conversation history in a local file, in production use-cases we may want to store in a proper database. -In this example, we will show you how to use the [AmazonDynamoDbConversationMemoryDriver](../griptape-framework/drivers/conversation-memory-drivers.md#amazon-dynamodb) to persist the memory in an [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) table. Please refer to the [Amazon DynamoDB documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/getting-started-step-1.html) for information on setting up DynamoDB. +In this example, we will show you how to use the [AmazonDynamoDbConversationMemoryDriver](../griptape-framework/drivers/conversation-memory-drivers.md#amazon-dynamodb) to persist the memory in an [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) table. Please refer to the [Amazon DynamoDB documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/getting-started-step-1.html) for information on setting up DynamoDB. This code implements the idea of a generic "Session" that represents a Conversation Memory entry. For example, a "Session" could be used to represent an individual user's conversation, or a group conversation thread. diff --git a/docs/examples/multi-agent-workflow.md b/docs/examples/multi-agent-workflow.md index 8763a0a3a..a2515c24f 100644 --- a/docs/examples/multi-agent-workflow.md +++ b/docs/examples/multi-agent-workflow.md @@ -3,7 +3,6 @@ In this example we implement a multi-agent Workflow. We have a single "Researche By splitting up our workloads across multiple Structures, we can parallelize the work and leverage the strengths of each Agent. The Researcher can focus on gathering data and insights, while the Writers can focus on crafting engaging narratives. Additionally, this architecture opens us up to using services such as [Griptape Cloud](https://www.griptape.ai/cloud) to have each Agent run completely independently, allowing us to scale our Workflow as needed 🤯. To try out how this would work, you can deploy this example as multiple structures from our [Sample Structures](https://github.com/griptape-ai/griptape-sample-structures/tree/main/griptape-multi-agent-workflows) repo. - ```python --8<-- "docs/examples/src/multi_agent_workflow_1.py" ``` diff --git a/docs/examples/query-webpage-astra-db.md b/docs/examples/query-webpage-astra-db.md index 7e98b63ac..b96906f87 100644 --- a/docs/examples/query-webpage-astra-db.md +++ b/docs/examples/query-webpage-astra-db.md @@ -10,7 +10,6 @@ _Note:_ Besides the [Astra DB](../griptape-framework/drivers/vector-store-driver this example requires the `drivers-web-scraper-trafilatura` Griptape extra to be installed as well. - ```python --8<-- "docs/examples/src/query_webpage_astra_db_1.py" ``` diff --git a/docs/examples/src/multi_agent_workflow_1.py b/docs/examples/src/multi_agent_workflow_1.py index ad9436a55..ea880435a 100644 --- a/docs/examples/src/multi_agent_workflow_1.py +++ b/docs/examples/src/multi_agent_workflow_1.py @@ -133,7 +133,7 @@ def build_writer(role: str, goal: str, backstory: str) -> Agent: ), id="research", driver=LocalStructureRunDriver( - structure_factory_fn=build_researcher, + create_structure=build_researcher, ), ), ) @@ -150,7 +150,7 @@ def build_writer(role: str, goal: str, backstory: str) -> Agent: {{ parent_outputs["research"] }}""", ), driver=LocalStructureRunDriver( - structure_factory_fn=lambda writer=writer: build_writer( + create_structure=lambda writer=writer: build_writer( role=writer["role"], goal=writer["goal"], backstory=writer["backstory"], diff --git a/docs/griptape-cloud/data-sources/create-data-source.md b/docs/griptape-cloud/data-sources/create-data-source.md index 87a5286a0..a0a45a173 100644 --- a/docs/griptape-cloud/data-sources/create-data-source.md +++ b/docs/griptape-cloud/data-sources/create-data-source.md @@ -32,6 +32,6 @@ If you do not see a Data Source configuration you'd wish to use, you can submit ## Adding Structure as Transform to Data Source (Experimental) -When creating any Data Source, you can optionally specify a [Structure](../structures/create-structure.md) to run as a transform step of your data ingetstion before loading into the vector store. Ensure the Structure you select to run as a transform is configured to take in a `ListArtifact` as its first positional argument and returns either a `TextArtifact` or `ListArtifact`. +When creating any Data Source, you can optionally specify a [Structure](../structures/create-structure.md) to run as a transform step of your data ingetstion before loading into the vector store. Ensure the Structure you select to run as a transform is configured to take in a `ListArtifact` as its first positional argument and returns either a `TextArtifact` or `ListArtifact`. -Take a look at the [Find and Replace Sample Structure](https://github.com/griptape-ai/griptape-sample-structures/tree/main/griptape-find-replace-transform) for more details on how to implement this for your own Structure. \ No newline at end of file +Take a look at the [Find and Replace Sample Structure](https://github.com/griptape-ai/griptape-sample-structures/tree/main/griptape-find-replace-transform) for more details on how to implement this for your own Structure. diff --git a/docs/griptape-cloud/index.md b/docs/griptape-cloud/index.md index 74556ab62..d70f020bc 100644 --- a/docs/griptape-cloud/index.md +++ b/docs/griptape-cloud/index.md @@ -3,13 +3,17 @@ [Griptape Cloud](https://cloud.griptape.ai/) provides managed services for your AI app stack. Deploy and scale end-to-end solutions, from LLM-powered data prep and retrieval to AI Agents, Pipelines, and Workflows. ## Build Your Own RAG Pipeline + Connect to your data with our [Data Sources](data-sources/create-data-source.md) and prepare them for retrieval with [Knowledge Bases](knowledge-bases/create-knowledge-base.md). ## Host and Run Your Code + Have Griptape code? Have existing code with another LLM framework? You can host your Python code using [Structures](structures/create-structure.md) whether it uses the Griptape Framework or not. ## Store Configuration for LLM Agents + [Rules and Rulesets](rules/rulesets.md) enable rapid and collabortive iteration for managing LLM behavior. [Threads and Messages](threads/threads.md) allow for persisted and editable conversation memory across any LLM invocation. ## APIs + All of our features can be called via API with a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys). See the [API Reference](api/api-reference.md) for detailed information. diff --git a/docs/griptape-cloud/knowledge-bases/accessing-data.md b/docs/griptape-cloud/knowledge-bases/accessing-data.md index 8343933dd..1890127e6 100644 --- a/docs/griptape-cloud/knowledge-bases/accessing-data.md +++ b/docs/griptape-cloud/knowledge-bases/accessing-data.md @@ -8,7 +8,7 @@ You can explore your data with a natural language question on the `Test` tab of ## From the API -You can enact both `Search` and `Query` via the API by hitting their respective endpoints using a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys) and the Knowledge Base ID found on the `Config` tab of your Knowledge Base. +You can enact both `Search` and `Query` via the API by hitting their respective endpoints using a [Griptape Cloud API Key](https://cloud.griptape.ai/configuration/api-keys) and the Knowledge Base ID found on the `Config` tab of your Knowledge Base. The following example commands will send the string `"test question"` and return the results from the Knowledge Base. diff --git a/docs/griptape-cloud/structures/create-structure.md b/docs/griptape-cloud/structures/create-structure.md index df0449891..13b29f801 100644 --- a/docs/griptape-cloud/structures/create-structure.md +++ b/docs/griptape-cloud/structures/create-structure.md @@ -6,9 +6,9 @@ Structures are a primary component in Griptape for organizing and executing Task 1. [Connect Your GitHub Account in your Griptape Cloud account](https://cloud.griptape.ai/account) 1. Install the [Griptape Cloud GitHub app to your GitHub account or organization](https://github.com/apps/griptape-cloud/installations/new/) - - Be sure to allow the app access to `All Repositories` or select the specific repositories you need + - Be sure to allow the app access to `All Repositories` or select the specific repositories you need 1. Ensure your repository has a Structure Config YAML file - - To learn more see [Structure Config YAML](structure-config.md) + - To learn more see [Structure Config YAML](structure-config.md) You can now [create a Structure in the Griptape Cloud console](https://cloud.griptape.ai/structures/create) by providing your GitHub repository information. diff --git a/docs/griptape-cloud/structures/structure-config.md b/docs/griptape-cloud/structures/structure-config.md index fa920be3c..1891591a3 100644 --- a/docs/griptape-cloud/structures/structure-config.md +++ b/docs/griptape-cloud/structures/structure-config.md @@ -42,15 +42,15 @@ The specific version of the runtime environment for the Structure. The build-time configuration for the Structure. -* **pre_build_install_script** - The path to your pre_build_install_script, for running during the Structure build prior to dependency installation. This path is relative to the structure configuration file. Or absolute from the repository root if a forward slash is used: `/my-pre-build-install-script.sh`. -* **post_build_install_script** - The path to your post_build_install_script, for running during the Structure build after dependency installation. This path is relative to the structure configuration file. Or absolute from the repository root if a forward slash is used: `/my-post-build-install-script.sh`. -* **requirements_file** - The path to your Structure's requirements.txt file. -* **cache_build_dependencies** - Defines the configuration for caching build dependencies in order to speed up Deployments - * **enabled** - Defines whether the build dependency caching is on or off - * **watched_files** - Defines the particular files that will trigger cache invalidation, resulting in a full rebuild of the Structure and dependencies +- **pre_build_install_script** - The path to your pre_build_install_script, for running during the Structure build prior to dependency installation. This path is relative to the structure configuration file. Or absolute from the repository root if a forward slash is used: `/my-pre-build-install-script.sh`. +- **post_build_install_script** - The path to your post_build_install_script, for running during the Structure build after dependency installation. This path is relative to the structure configuration file. Or absolute from the repository root if a forward slash is used: `/my-post-build-install-script.sh`. +- **requirements_file** - The path to your Structure's requirements.txt file. +- **cache_build_dependencies** - Defines the configuration for caching build dependencies in order to speed up Deployments + - **enabled** - Defines whether the build dependency caching is on or off + - **watched_files** - Defines the particular files that will trigger cache invalidation, resulting in a full rebuild of the Structure and dependencies #### run (REQUIRED) The run-time configuration for the Structure. -* **main_file** - Specifies the path to the entry point file of the Managed Structure. This path is relative to the structure_config.yaml. Or absolute from the repository root if a forward slash is used: `/structure.py`. +- **main_file** - Specifies the path to the entry point file of the Managed Structure. This path is relative to the structure_config.yaml. Or absolute from the repository root if a forward slash is used: `/structure.py`. diff --git a/docs/griptape-framework/data/artifacts.md b/docs/griptape-framework/data/artifacts.md index 2edd1ebec..a55ac6afe 100644 --- a/docs/griptape-framework/data/artifacts.md +++ b/docs/griptape-framework/data/artifacts.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview diff --git a/docs/griptape-framework/data/chunkers.md b/docs/griptape-framework/data/chunkers.md index bafbc1c80..da37ee69e 100644 --- a/docs/griptape-framework/data/chunkers.md +++ b/docs/griptape-framework/data/chunkers.md @@ -1,17 +1,17 @@ --- search: - boost: 2 + boost: 2 --- ## Overview -Chunkers are used to split arbitrarily long text into chunks of certain token length. -Each chunker has a tokenizer, a max token count, and a list of default separators used to split up text into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s. +Chunkers are used to split arbitrarily long text into chunks of certain token length. +Each chunker has a tokenizer, a max token count, and a list of default separators used to split up text into [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s. Different types of chunkers provide lists of separators for specific text shapes: -* [TextChunker](../../reference/griptape/chunkers/text_chunker.md): works on most texts. -* [PdfChunker](../../reference/griptape/chunkers/pdf_chunker.md): works on text from PDF docs. -* [MarkdownChunker](../../reference/griptape/chunkers/markdown_chunker.md) works on markdown text. +- [TextChunker](../../reference/griptape/chunkers/text_chunker.md): works on most texts. +- [PdfChunker](../../reference/griptape/chunkers/pdf_chunker.md): works on text from PDF docs. +- [MarkdownChunker](../../reference/griptape/chunkers/markdown_chunker.md) works on markdown text. Here is how to use a chunker: diff --git a/docs/griptape-framework/data/index.md b/docs/griptape-framework/data/index.md index 3e4359737..13bbba3c4 100644 --- a/docs/griptape-framework/data/index.md +++ b/docs/griptape-framework/data/index.md @@ -1,9 +1,9 @@ ## Overview + Griptape provides several abstractions for working with data. ![Data Architecture](../../assets/img/data-architecture.png) - [Artifacts](./artifacts.md) are used for passing different types of data, such as text, lists, and blobs, between Griptape components. [Embedding Drivers](../drivers/embedding-drivers.md) are used to generate vector embeddings from text. diff --git a/docs/griptape-framework/data/loaders.md b/docs/griptape-framework/data/loaders.md index a8a8cb7c5..ce403c9fe 100644 --- a/docs/griptape-framework/data/loaders.md +++ b/docs/griptape-framework/data/loaders.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -9,7 +9,6 @@ Loaders are used to load data from sources and parse it into [Artifact](../../gr Each loader can be used to load a single "source" with [load()](../../reference/griptape/loaders/base_loader.md#griptape.loaders.base_loader.BaseLoader.load) or multiple sources with [load_collection()](../../reference/griptape/loaders/base_loader.md#griptape.loaders.base_loader.BaseLoader.load_collection). - ## File The following Loaders load a file using a [FileManagerDriver](../../reference/griptape/drivers/file_manager/base_file_manager_driver.md) and loads the resulting data into an [Artifact](../../griptape-framework/data/artifacts.md) for the respective file type. @@ -48,7 +47,6 @@ Loads CSV files into [ListArtifact](../../griptape-framework/data/artifacts.md#l Loads images into [ImageArtifact](../../griptape-framework/data/artifacts.md#image)s: - ```python --8<-- "docs/griptape-framework/data/src/loaders_7.py" ``` @@ -82,7 +80,7 @@ Scrapes web pages using a [WebScraperDriver](../drivers/web-scraper-drivers.md) ## SQL -Loads data from a SQL database using a [SQLDriver](../drivers/sql-drivers.md) and loads the resulting data into [ListArtifact](../../griptape-framework/data/artifacts.md#list)s, where each element is a [CsvRowArtifact](../../griptape-framework/data/artifacts.md#csv) containing a row of the SQL query. +Loads data from a SQL database using a [SQLDriver](../drivers/sql-drivers.md) and loads the resulting data into [ListArtifact](../../griptape-framework/data/artifacts.md#list)s, where each element is a [TextArtifact](../../griptape-framework/data/artifacts.md#text) containing a row of the SQL query. ```python --8<-- "docs/griptape-framework/data/src/loaders_2.py" diff --git a/docs/griptape-framework/drivers/audio-transcription-drivers.md b/docs/griptape-framework/drivers/audio-transcription-drivers.md index 10630cf22..793084e08 100644 --- a/docs/griptape-framework/drivers/audio-transcription-drivers.md +++ b/docs/griptape-framework/drivers/audio-transcription-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -15,7 +15,7 @@ This capability is essential for enhancing accessibility, improving content disc ### OpenAI -The [OpenAI Audio Transcription Driver](../../reference/griptape/drivers/audio_transcription/openai_audio_transcription_driver.md) utilizes OpenAI's sophisticated `whisper` model to accurately transcribe spoken audio into text. This model supports multiple languages, ensuring precise transcription across a wide range of dialects. +The [OpenAI Audio Transcription Driver](../../reference/griptape/drivers/audio_transcription/openai_audio_transcription_driver.md) utilizes OpenAI's sophisticated `whisper` model to accurately transcribe spoken audio into text. This model supports multiple languages, ensuring precise transcription across a wide range of dialects. ```python --8<-- "docs/griptape-framework/drivers/src/audio_transcription_drivers_1.py" diff --git a/docs/griptape-framework/drivers/conversation-memory-drivers.md b/docs/griptape-framework/drivers/conversation-memory-drivers.md index bb4c1b35a..4732a7bb7 100644 --- a/docs/griptape-framework/drivers/conversation-memory-drivers.md +++ b/docs/griptape-framework/drivers/conversation-memory-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -35,8 +35,8 @@ The [AmazonDynamoDbConversationMemoryDriver](../../reference/griptape/drivers/me ```python --8<-- "docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py" ``` -Optional parameters `sort_key` and `sort_key_value` can be supplied for tables with a composite primary key. +Optional parameters `sort_key` and `sort_key_value` can be supplied for tables with a composite primary key. ### Redis @@ -48,4 +48,3 @@ The [RedisConversationMemoryDriver](../../reference/griptape/drivers/memory/conv ```python --8<-- "docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py" ``` - diff --git a/docs/griptape-framework/drivers/embedding-drivers.md b/docs/griptape-framework/drivers/embedding-drivers.md index 68f40f09e..71954e000 100644 --- a/docs/griptape-framework/drivers/embedding-drivers.md +++ b/docs/griptape-framework/drivers/embedding-drivers.md @@ -1,15 +1,16 @@ --- search: - boost: 2 + boost: 2 --- ## Overview + Embeddings in Griptape are multidimensional representations of text data. Embeddings carry semantic information, which makes them useful for extracting relevant chunks from large bodies of text for search and querying. Griptape provides a way to build Embedding Drivers that are reused in downstream framework components. Every Embedding Driver has two basic methods that can be used to generate embeddings: -* [embed_text_artifact()](../../reference/griptape/drivers/embedding/base_embedding_driver.md#griptape.drivers.embedding.base_embedding_driver.BaseEmbeddingDriver.embed_text_artifact) for [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s. -* [embed_string()](../../reference/griptape/drivers/embedding/base_embedding_driver.md#griptape.drivers.embedding.base_embedding_driver.BaseEmbeddingDriver.embed_string) for any string. +- [embed_text_artifact()](../../reference/griptape/drivers/embedding/base_embedding_driver.md#griptape.drivers.embedding.base_embedding_driver.BaseEmbeddingDriver.embed_text_artifact) for [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s. +- [embed_string()](../../reference/griptape/drivers/embedding/base_embedding_driver.md#griptape.drivers.embedding.base_embedding_driver.BaseEmbeddingDriver.embed_string) for any string. You can optionally provide a [Tokenizer](../misc/tokenizers.md) via the [tokenizer](../../reference/griptape/drivers/embedding/base_embedding_driver.md#griptape.drivers.embedding.base_embedding_driver.BaseEmbeddingDriver.tokenizer) field to have the Driver automatically chunk the input text to fit into the token limit. @@ -19,10 +20,10 @@ You can optionally provide a [Tokenizer](../misc/tokenizers.md) via the [tokeniz The [OpenAiEmbeddingDriver](../../reference/griptape/drivers/embedding/openai_embedding_driver.md) uses the [OpenAI Embeddings API](https://platform.openai.com/docs/guides/embeddings). - ```python --8<-- "docs/griptape-framework/drivers/src/embedding_drivers_1.py" ``` + ``` [0.0017853748286142945, 0.006118456833064556, -0.005811543669551611] ``` @@ -54,11 +55,13 @@ The [AmazonBedrockTitanEmbeddingDriver](../../reference/griptape/drivers/embeddi ```python --8<-- "docs/griptape-framework/drivers/src/embedding_drivers_3.py" ``` + ``` [-0.234375, -0.024902344, -0.14941406] ``` ### Google + !!! info This driver requires the `drivers-embedding-google` [extra](../index.md#extras). @@ -67,6 +70,7 @@ The [GoogleEmbeddingDriver](../../reference/griptape/drivers/embedding/google_em ```python --8<-- "docs/griptape-framework/drivers/src/embedding_drivers_4.py" ``` + ``` [0.0588633, 0.0033929371, -0.072810836] ``` @@ -107,6 +111,7 @@ The [AmazonSageMakerJumpstartEmbeddingDriver](../../reference/griptape/drivers/e ``` ### VoyageAI + The [VoyageAiEmbeddingDriver](../../reference/griptape/drivers/embedding/voyageai_embedding_driver.md) uses the [VoyageAI Embeddings API](https://www.voyageai.com/). !!! info @@ -128,6 +133,7 @@ The [CohereEmbeddingDriver](../../reference/griptape/drivers/embedding/cohere_em ``` ### Override Default Structure Embedding Driver + Here is how you can override the Embedding Driver that is used by default in Structures. ```python diff --git a/docs/griptape-framework/drivers/event-listener-drivers.md b/docs/griptape-framework/drivers/event-listener-drivers.md index ab0609c51..a734618b1 100644 --- a/docs/griptape-framework/drivers/event-listener-drivers.md +++ b/docs/griptape-framework/drivers/event-listener-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -54,7 +54,7 @@ The [GriptapeCloudEventListenerDriver](../../reference/griptape/drivers/event_li ```python --8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_5.py" -``` +``` ### Webhook Event Listener Driver @@ -63,6 +63,7 @@ The [WebhookEventListenerDriver](../../reference/griptape/drivers/event_listener ```python --8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_6.py" ``` + ### Pusher !!! info diff --git a/docs/griptape-framework/drivers/file-manager-drivers.md b/docs/griptape-framework/drivers/file-manager-drivers.md new file mode 100644 index 000000000..37012c29f --- /dev/null +++ b/docs/griptape-framework/drivers/file-manager-drivers.md @@ -0,0 +1,48 @@ +--- +search: + boost: 2 +--- + +## Overview + +File Manager Drivers can be used to load and save files with local or external file systems. + +You can use File Manager Drivers with Loaders: + +```python +--8<-- "docs/griptape-framework/drivers/src/file_manager_driver.py" +``` + +Or use them independently as shown below for each driver: + +## File Manager Drivers + +### Griptape Cloud + +!!! info + This driver requires the `drivers-file-manager-griptape-cloud` [extra](../index.md#extras). + +The [GriptapeCloudFileManagerDriver](../../reference/griptape/drivers/file_manager/griptape_cloud_file_manager_driver.md) allows you to load and save files sourced from Griptape Cloud Asset and Bucket resources. + +```python +--8<-- "docs/griptape-framework/drivers/src/griptape_cloud_file_manager_driver.py" +``` + +### Local + +The [LocalFileManagerDriver](../../reference/griptape/drivers/file_manager/local_file_manager_driver.md) allows you to load and save files sourced from a local directory. + +```python +--8<-- "docs/griptape-framework/drivers/src/local_file_manager_driver.py" +``` + +### Amazon S3 + +!!! info + This driver requires the `drivers-file-manager-amazon-s3` [extra](../index.md#extras). + +The [LocalFile ManagerDriver](../../reference/griptape/drivers/file_manager/amazon_s3_file_manager_driver.md) allows you to load and save files sourced from an Amazon S3 bucket. + +```python +--8<-- "docs/griptape-framework/drivers/src/amazon_s3_file_manager_driver.py" +``` diff --git a/docs/griptape-framework/drivers/image-generation-drivers.md b/docs/griptape-framework/drivers/image-generation-drivers.md index bcc91aca6..549fb0c28 100644 --- a/docs/griptape-framework/drivers/image-generation-drivers.md +++ b/docs/griptape-framework/drivers/image-generation-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -29,7 +29,7 @@ This Model Driver supports negative prompts. When provided (for example, when us --8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_2.py" ``` -#### Titan +#### Titan The [Bedrock Titan Image Generator Model Driver](../../reference/griptape/drivers/image_generation_model/bedrock_titan_image_generation_model_driver.md) provides support for Titan Image Generator models hosted by Amazon Bedrock. This Model Driver supports configurations specific to Titan Image Generator, like quality, seed, and cfg_scale. @@ -78,9 +78,9 @@ The [HuggingFace Pipelines Image Generation Driver](../../reference/griptape/dri This Driver requires a `model` configuration, specifying the model to use for image generation. The value of the `model` configuration must be one of the following: - - A model name from the HuggingFace Model Hub, like `stabilityai/stable-diffusion-3-medium-diffusers` - - A path to the directory containing a model on the filesystem, like `./models/stable-diffusion-3/` - - A path to a file containing a model on the filesystem, like `./models/sd3_medium_incl_clips.safetensors` +- A model name from the HuggingFace Model Hub, like `stabilityai/stable-diffusion-3-medium-diffusers` +- A path to the directory containing a model on the filesystem, like `./models/stable-diffusion-3/` +- A path to a file containing a model on the filesystem, like `./models/sd3_medium_incl_clips.safetensors` The `device` configuration specifies the hardware device used to run inference. Common values include `cuda` (supporting CUDA-enabled GPUs), `cpu` (supported by a device's CPU), and `mps` (supported by Apple silicon GPUs). For more information, see [HuggingFace's documentation](https://huggingface.co/docs/transformers/en/perf_infer_gpu_one) on GPU inference. diff --git a/docs/griptape-framework/drivers/image-query-drivers.md b/docs/griptape-framework/drivers/image-query-drivers.md index b0c598572..e3dc9032f 100644 --- a/docs/griptape-framework/drivers/image-query-drivers.md +++ b/docs/griptape-framework/drivers/image-query-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -8,7 +8,7 @@ search: Image Query Drivers are used by [Image Query Engines](../engines/image-query-engines.md) to execute natural language queries on the contents of images. You can specify the provider and model used to query the image by providing the Engine with a particular Image Query Driver. !!! info - All Image Query Drivers default to a `max_tokens` of 256. It is recommended that you set this value to correspond to the desired response length. + All Image Query Drivers default to a `max_tokens` of 256. It is recommended that you set this value to correspond to the desired response length. ## Image Query Drivers @@ -41,7 +41,7 @@ The [OpenAiVisionImageQueryDriver](../../reference/griptape/drivers/image_query/ ``` ### Azure OpenAI - + !!! info In order to use the `gpt-4-vision-preview` model on Azure OpenAI, the `gpt-4` model must be deployed with the version set to `vision-preview`. More information can be found in the [Azure OpenAI documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/gpt-with-vision). diff --git a/docs/griptape-framework/drivers/observability-drivers.md b/docs/griptape-framework/drivers/observability-drivers.md index 701aca504..ec7352118 100644 --- a/docs/griptape-framework/drivers/observability-drivers.md +++ b/docs/griptape-framework/drivers/observability-drivers.md @@ -1,16 +1,15 @@ --- search: - boost: 2 + boost: 2 --- ## Overview Observability Drivers are used by [Observability](../structures/observability.md) to send telemetry (metrics and traces) related to the execution of an LLM application. The telemetry can be used to monitor the application and to diagnose and troubleshoot issues. All Observability Drivers implement the following methods: -* `__enter__()` sets up the Driver. -* `__exit__()` tears down the Driver. -* `observe()` wraps all functions and methods marked with the `@observable` decorator. At a bare minimum, implementations call the wrapped function and return its result (a no-op). This enables the Driver to generate telemetry related to the invocation's call arguments, return values, exceptions, latency, etc. - +- `__enter__()` sets up the Driver. +- `__exit__()` tears down the Driver. +- `observe()` wraps all functions and methods marked with the `@observable` decorator. At a bare minimum, implementations call the wrapped function and return its result (a no-op). This enables the Driver to generate telemetry related to the invocation's call arguments, return values, exceptions, latency, etc. ## Observability Drivers @@ -27,12 +26,10 @@ The Griptape Cloud Observability Driver instruments `@observable` functions and Here is an example of how to use the `GriptapeCloudObservabilityDriver` with the `Observability` context manager to send the telemetry to Griptape Cloud: - ```python --8<-- "docs/griptape-framework/drivers/src/observability_drivers_1.py" ``` - ### OpenTelemetry !!! info @@ -40,7 +37,6 @@ Here is an example of how to use the `GriptapeCloudObservabilityDriver` with the The [OpenTelemetry](https://opentelemetry.io/) Observability Driver instruments `@observable` functions and methods with metrics and traces for use with OpenTelemetry. You must configure a destination for the telemetry by providing a `SpanProcessor` to the Driver. - Here is an example of how to use the `OpenTelemetryObservabilityDriver` with the `Observability` context manager to output the telemetry directly to the console: ```python @@ -48,6 +44,7 @@ Here is an example of how to use the `OpenTelemetryObservabilityDriver` with the ``` Output (only relevant because of use of `ConsoleSpanExporter`): + ``` [06/18/24 06:57:22] INFO PromptTask 2d8ef95bf817480188ae2f74e754308a Input: Name an animal diff --git a/docs/griptape-framework/drivers/prompt-drivers.md b/docs/griptape-framework/drivers/prompt-drivers.md index 54230b999..131c596cf 100644 --- a/docs/griptape-framework/drivers/prompt-drivers.md +++ b/docs/griptape-framework/drivers/prompt-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview diff --git a/docs/griptape-framework/drivers/ruleset-drivers.md b/docs/griptape-framework/drivers/ruleset-drivers.md index 117f5836b..4e4f908c4 100644 --- a/docs/griptape-framework/drivers/ruleset-drivers.md +++ b/docs/griptape-framework/drivers/ruleset-drivers.md @@ -5,7 +5,7 @@ search: ## Overview -Ruleset Drivers can be used to load rules in from external sources. +Ruleset Drivers can be used to load rules in from external sources. ## Ruleset Drivers diff --git a/docs/griptape-framework/drivers/sql-drivers.md b/docs/griptape-framework/drivers/sql-drivers.md index c5c15e258..68c252894 100644 --- a/docs/griptape-framework/drivers/sql-drivers.md +++ b/docs/griptape-framework/drivers/sql-drivers.md @@ -1,14 +1,15 @@ --- search: - boost: 2 + boost: 2 --- ## Overview + SQL drivers can be used to make SQL queries and load table schemas. They are used by the [SqlLoader](../../reference/griptape/loaders/sql_loader.md) to process data. All loaders implement the following methods: -* `execute_query()` executes a query and returns [RowResult](../../reference/griptape/drivers/sql/base_sql_driver.md#griptape.drivers.sql.base_sql_driver.BaseSqlDriver.RowResult)s. -* `execute_query_row()` executes a query and returns a raw result from SQL. -* `get_table_schema()` returns a table schema. +- `execute_query()` executes a query and returns [RowResult](../../reference/griptape/drivers/sql/base_sql_driver.md#griptape.drivers.sql.base_sql_driver.BaseSqlDriver.RowResult)s. +- `execute_query_row()` executes a query and returns a raw result from SQL. +- `get_table_schema()` returns a table schema. ## SQL Drivers @@ -31,7 +32,7 @@ This is a basic SQL loader based on [SQLAlchemy 2.0](https://docs.sqlalchemy.org !!! info This driver requires the `drivers-sql-amazon-redshift` [extra](../index.md#extras). -This is a SQL driver for interacting with the [Amazon Redshift Data API](https://docs.aws.amazon.com/redshift-data/latest/APIReference/Welcome.html) +This is a SQL driver for interacting with the [Amazon Redshift Data API](https://docs.aws.amazon.com/redshift-data/latest/APIReference/Welcome.html) to execute statements. Here is an example of how to use it for Redshift Serverless: ```python diff --git a/docs/griptape-framework/drivers/src/amazon_s3_file_manager_driver.py b/docs/griptape-framework/drivers/src/amazon_s3_file_manager_driver.py new file mode 100644 index 000000000..5fa9324cb --- /dev/null +++ b/docs/griptape-framework/drivers/src/amazon_s3_file_manager_driver.py @@ -0,0 +1,24 @@ +import os + +import boto3 + +from griptape.drivers import AmazonS3FileManagerDriver + +amazon_s3_file_manager_driver = AmazonS3FileManagerDriver( + bucket=os.environ["AMAZON_S3_BUCKET"], + session=boto3.Session( + region_name=os.environ["AWS_DEFAULT_REGION"], + aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], + aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], + ), +) + +# Download File +file_contents = amazon_s3_file_manager_driver.load_file(os.environ["AMAZON_S3_KEY"]) + +print(file_contents) + +# Upload File +response = amazon_s3_file_manager_driver.save_file(os.environ["AMAZON_S3_KEY"], file_contents.value) + +print(response) diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_1.py b/docs/griptape-framework/drivers/src/event_listener_drivers_1.py index 66b9372c3..024acb221 100644 --- a/docs/griptape-framework/drivers/src/event_listener_drivers_1.py +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_1.py @@ -8,7 +8,7 @@ EventBus.add_event_listeners( [ EventListener( - driver=AmazonSqsEventListenerDriver( + event_listener_driver=AmazonSqsEventListenerDriver( queue_url=os.environ["AMAZON_SQS_QUEUE_URL"], ), ), diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_3.py b/docs/griptape-framework/drivers/src/event_listener_drivers_3.py index 0bb248362..b0ceccc3c 100644 --- a/docs/griptape-framework/drivers/src/event_listener_drivers_3.py +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_3.py @@ -8,7 +8,7 @@ EventBus.add_event_listeners( [ EventListener( - driver=AmazonSqsEventListenerDriver( + event_listener_driver=AmazonSqsEventListenerDriver( queue_url=os.environ["AMAZON_SQS_QUEUE_URL"], ), ), diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_4.py b/docs/griptape-framework/drivers/src/event_listener_drivers_4.py index 6d03d2ce3..31f7c3394 100644 --- a/docs/griptape-framework/drivers/src/event_listener_drivers_4.py +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_4.py @@ -12,7 +12,7 @@ [ EventListener( event_types=[FinishStructureRunEvent], - driver=AwsIotCoreEventListenerDriver( + event_listener_driver=AwsIotCoreEventListenerDriver( topic=os.environ["AWS_IOT_CORE_TOPIC"], iot_endpoint=os.environ["AWS_IOT_CORE_ENDPOINT"], ), diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_5.py b/docs/griptape-framework/drivers/src/event_listener_drivers_5.py index 27186e229..638e2bf76 100644 --- a/docs/griptape-framework/drivers/src/event_listener_drivers_5.py +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_5.py @@ -8,7 +8,7 @@ event_types=[FinishStructureRunEvent], # By default, GriptapeCloudEventListenerDriver uses the api key provided # in the GT_CLOUD_API_KEY environment variable. - driver=GriptapeCloudEventListenerDriver(), + event_listener_driver=GriptapeCloudEventListenerDriver(), ), ] ) diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_6.py b/docs/griptape-framework/drivers/src/event_listener_drivers_6.py index c60cc6984..803ebfc1b 100644 --- a/docs/griptape-framework/drivers/src/event_listener_drivers_6.py +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_6.py @@ -8,7 +8,7 @@ [ EventListener( event_types=[FinishStructureRunEvent], - driver=WebhookEventListenerDriver( + event_listener_driver=WebhookEventListenerDriver( webhook_url=os.environ["WEBHOOK_URL"], ), ), diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_7.py b/docs/griptape-framework/drivers/src/event_listener_drivers_7.py index c010cb8f9..bcb8c2acd 100644 --- a/docs/griptape-framework/drivers/src/event_listener_drivers_7.py +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_7.py @@ -8,7 +8,7 @@ [ EventListener( event_types=[FinishStructureRunEvent], - driver=PusherEventListenerDriver( + event_listener_driver=PusherEventListenerDriver( batched=False, app_id=os.environ["PUSHER_APP_ID"], key=os.environ["PUSHER_KEY"], diff --git a/docs/griptape-framework/drivers/src/file_manager_driver.py b/docs/griptape-framework/drivers/src/file_manager_driver.py new file mode 100644 index 000000000..0ba2e26c7 --- /dev/null +++ b/docs/griptape-framework/drivers/src/file_manager_driver.py @@ -0,0 +1,9 @@ +from griptape.drivers import LocalFileManagerDriver +from griptape.loaders import TextLoader + +local_file_manager_driver = LocalFileManagerDriver() + +loader = TextLoader(file_manager_driver=local_file_manager_driver) +text_artifact = loader.load("tests/resources/test.txt") + +print(text_artifact.value) diff --git a/docs/griptape-framework/drivers/src/griptape_cloud_file_manager_driver.py b/docs/griptape-framework/drivers/src/griptape_cloud_file_manager_driver.py new file mode 100644 index 000000000..b222b5d4a --- /dev/null +++ b/docs/griptape-framework/drivers/src/griptape_cloud_file_manager_driver.py @@ -0,0 +1,18 @@ +import os + +from griptape.drivers import GriptapeCloudFileManagerDriver + +gtc_file_manager_driver = GriptapeCloudFileManagerDriver( + api_key=os.environ["GT_CLOUD_API_KEY"], + bucket_id=os.environ["GT_CLOUD_BUCKET_ID"], +) + +# Download File +file_contents = gtc_file_manager_driver.load_file(os.environ["GT_CLOUD_ASSET_NAME"]) + +print(file_contents) + +# Upload File +response = gtc_file_manager_driver.save_file(os.environ["GT_CLOUD_ASSET_NAME"], file_contents.value) + +print(response) diff --git a/docs/griptape-framework/drivers/src/local_file_manager_driver.py b/docs/griptape-framework/drivers/src/local_file_manager_driver.py new file mode 100644 index 000000000..a53378060 --- /dev/null +++ b/docs/griptape-framework/drivers/src/local_file_manager_driver.py @@ -0,0 +1,13 @@ +from griptape.drivers import LocalFileManagerDriver + +local_file_manager_driver = LocalFileManagerDriver() + +# Download File +file_contents = local_file_manager_driver.load_file("tests/resources/test.txt") + +print(file_contents) + +# Upload File +response = local_file_manager_driver.save_file("tests/resources/test.txt", file_contents.value) + +print(response) diff --git a/docs/griptape-framework/drivers/src/sql_drivers_3.py b/docs/griptape-framework/drivers/src/sql_drivers_3.py index 29ee4a818..cf1e7c1dc 100644 --- a/docs/griptape-framework/drivers/src/sql_drivers_3.py +++ b/docs/griptape-framework/drivers/src/sql_drivers_3.py @@ -17,6 +17,6 @@ def get_snowflake_connection() -> SnowflakeConnection: ) -driver = SnowflakeSqlDriver(connection_func=get_snowflake_connection) +driver = SnowflakeSqlDriver(get_connection=get_snowflake_connection) driver.execute_query("select * from people;") diff --git a/docs/griptape-framework/drivers/src/structure_run_drivers_1.py b/docs/griptape-framework/drivers/src/structure_run_drivers_1.py index a29bfbedf..5fc5b29fe 100644 --- a/docs/griptape-framework/drivers/src/structure_run_drivers_1.py +++ b/docs/griptape-framework/drivers/src/structure_run_drivers_1.py @@ -28,13 +28,13 @@ def build_joke_rewriter() -> Agent: tasks=[ StructureRunTask( driver=LocalStructureRunDriver( - structure_factory_fn=build_joke_teller, + create_structure=build_joke_teller, ), ), StructureRunTask( ("Rewrite this joke: {{ parent_output }}",), driver=LocalStructureRunDriver( - structure_factory_fn=build_joke_rewriter, + create_structure=build_joke_rewriter, ), ), ] diff --git a/docs/griptape-framework/drivers/src/structure_run_drivers_2.py b/docs/griptape-framework/drivers/src/structure_run_drivers_2.py index 6103a6507..bec40c6ee 100644 --- a/docs/griptape-framework/drivers/src/structure_run_drivers_2.py +++ b/docs/griptape-framework/drivers/src/structure_run_drivers_2.py @@ -15,7 +15,7 @@ StructureRunTask( ("Think of a question related to Retrieval Augmented Generation.",), driver=LocalStructureRunDriver( - structure_factory_fn=lambda: Agent( + create_structure=lambda: Agent( rules=[ Rule( value="You are an expert in Retrieval Augmented Generation.", diff --git a/docs/griptape-framework/drivers/structure-run-drivers.md b/docs/griptape-framework/drivers/structure-run-drivers.md index 1f57ff57e..7ba3aed65 100644 --- a/docs/griptape-framework/drivers/structure-run-drivers.md +++ b/docs/griptape-framework/drivers/structure-run-drivers.md @@ -1,9 +1,10 @@ --- search: - boost: 2 + boost: 2 --- ## Overview + Structure Run Drivers can be used to run Griptape Structures in a variety of runtime environments. When combined with the [Structure Run Task](../../griptape-framework/structures/tasks.md#structure-run-task) or [Structure Run Tool](../../griptape-tools/official-tools/structure-run-tool.md) you can create complex, multi-agent pipelines that span multiple runtime environments. @@ -21,7 +22,6 @@ The [LocalStructureRunDriver](../../reference/griptape/drivers/structure_run/loc The [GriptapeCloudStructureRunDriver](../../reference/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.md) is used to run Griptape Structures in the Griptape Cloud. - ```python --8<-- "docs/griptape-framework/drivers/src/structure_run_drivers_2.py" ``` diff --git a/docs/griptape-framework/drivers/text-to-speech-drivers.md b/docs/griptape-framework/drivers/text-to-speech-drivers.md index a6fb955e6..4ea1c574f 100644 --- a/docs/griptape-framework/drivers/text-to-speech-drivers.md +++ b/docs/griptape-framework/drivers/text-to-speech-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview diff --git a/docs/griptape-framework/drivers/vector-store-drivers.md b/docs/griptape-framework/drivers/vector-store-drivers.md index 6a76a8cf5..84438c1ed 100644 --- a/docs/griptape-framework/drivers/vector-store-drivers.md +++ b/docs/griptape-framework/drivers/vector-store-drivers.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -79,6 +79,7 @@ Here is an example of how the Driver can be used to load and query information i ``` The format for creating a vector index should look similar to the following: + ```json { "fields": [ @@ -95,6 +96,7 @@ The format for creating a vector index should look similar to the following: ] } ``` + Replace `path_to_vector` with the expected field name where the vector content will be. ### Azure MongoDB @@ -124,6 +126,7 @@ Here is an example of how the Driver can be used to load and query information i ``` The format for creating a vector index should be similar to the following: + ``` FT.CREATE idx:griptape ON hash PREFIX 1 "griptape:" SCHEMA namespace TAG vector VECTOR FLAT 6 TYPE FLOAT32 DIM 1536 DISTANCE_METRIC COSINE ``` @@ -142,6 +145,7 @@ Here is an example of how the Driver can be used to load and query information i ``` The body mappings for creating a vector index should look similar to the following: + ```json { "mappings": { @@ -163,7 +167,7 @@ The [PGVectorVectorStoreDriver](../../reference/griptape/drivers/vector/pgvector Here is an example of how the Driver can be used to load and query information in a Postgres database: -```python +```python --8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_9.py" ``` diff --git a/docs/griptape-framework/drivers/web-scraper-drivers.md b/docs/griptape-framework/drivers/web-scraper-drivers.md index 7bfb7be99..e215d118f 100644 --- a/docs/griptape-framework/drivers/web-scraper-drivers.md +++ b/docs/griptape-framework/drivers/web-scraper-drivers.md @@ -1,13 +1,13 @@ --- search: - boost: 2 + boost: 2 --- ## Overview Web Scraper Drivers can be used to scrape text from the web. They are used by [WebLoader](../../reference/griptape/loaders/web_loader.md) to provide its functionality. All Web Scraper Drivers implement the following methods: -* `scrape_url()` scrapes text from a website and returns a [TextArtifact](../../reference/griptape/artifacts/text_artifact.md). The format of the scrapped text is determined by the Driver. +- `scrape_url()` scrapes text from a website and returns a [TextArtifact](../../reference/griptape/artifacts/text_artifact.md). The format of the scrapped text is determined by the Driver. ## Web Scraper Drivers diff --git a/docs/griptape-framework/drivers/web-search-drivers.md b/docs/griptape-framework/drivers/web-search-drivers.md index 2c64ceba8..6e950dba0 100644 --- a/docs/griptape-framework/drivers/web-search-drivers.md +++ b/docs/griptape-framework/drivers/web-search-drivers.md @@ -7,13 +7,14 @@ search: Web Search Drivers can be used to search for links from a search query. They are used by [WebSearch](../../reference/griptape/tools/web_search/tool.md) to provide its functionality. All Web Search Drivers implement the following methods: -* `search()` searches the web and returns a [ListArtifact](../../reference/griptape/artifacts/list_artifact.md) that contains JSON-serializable [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s with the search results. +- `search()` searches the web and returns a [ListArtifact](../../reference/griptape/artifacts/list_artifact.md) that contains JSON-serializable [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s with the search results. You can use Web Search Drivers with [Structures](../structures/agents.md): ```python --8<-- "docs/griptape-framework/drivers/src/web_search_drivers_5.py" ``` + ``` ToolkitTask 45a53f1024494baab41a1f10a67017b1 Output: Here are some websites with information about AI @@ -35,11 +36,13 @@ ToolkitTask 45a53f1024494baab41a1f10a67017b1 Software](https://clockwise.software/blog/artificial-intelligence -framework/) ``` + Or use them independently: ```python --8<-- "docs/griptape-framework/drivers/src/web_search_drivers_3.py" ``` + ``` {"title": "The Top 16 AI Frameworks and Libraries: A Beginner's Guide", "url": "https://www.datacamp.com/blog/top-ai-frameworks-and-libraries", "description": "PyTorch. Torch is an open-source machine learning library known for its dynamic computational graph and is favored by researchers. The framework is excellent for prototyping and experimentation. Moreover, it's empowered by growing community support, with tools like PyTorch being built on the library."} @@ -48,7 +51,6 @@ Or use them independently: {"title": "The Top 16 AI Frameworks and Libraries | AI Slackers", "url": "https://aislackers.com/the-top-16-ai-frameworks-and-libraries/", "description": "Experiment with different frameworks to find the one that aligns with your needs and goals as a data practitioner. Embrace the world of AI frameworks, and embark on a journey of building smarter software with confidence. Discover the top AI frameworks and libraries like PyTorch, Scikit-Learn, TensorFlow, Keras, LangChain, and more."} ``` - ## Web Search Drivers ### Google @@ -75,6 +77,7 @@ Example of using `DuckDuckGoWebSearchDriver` directly: ``` ### Tavily + !!! info This driver requires the `drivers-web-search-tavily` [extra](../index.md#extras), and a Tavily [api key](https://app.tavily.com). @@ -84,7 +87,8 @@ Example of using `TavilyWebSearchDriver` directly: --8<-- "docs/griptape-framework/drivers/src/web_search_drivers_4.py" ``` -### Exa +### Exa + !!! info This driver requires the `drivers-web-search-exa` [extra](../index.md#extras), and an Exa [api key](https://dashboard.exa.ai/api-keys) @@ -93,4 +97,4 @@ Example of using `ExaWebSearchDriver` directly: ```python --8<-- "docs/griptape-framework/drivers/src/web_search_drivers_6.py" -``` \ No newline at end of file +``` diff --git a/docs/griptape-framework/engines/audio-engines.md b/docs/griptape-framework/engines/audio-engines.md index b5b0b24a6..2b4392518 100644 --- a/docs/griptape-framework/engines/audio-engines.md +++ b/docs/griptape-framework/engines/audio-engines.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview diff --git a/docs/griptape-framework/engines/extraction-engines.md b/docs/griptape-framework/engines/extraction-engines.md index c00352691..09b1d5ca1 100644 --- a/docs/griptape-framework/engines/extraction-engines.md +++ b/docs/griptape-framework/engines/extraction-engines.md @@ -1,9 +1,10 @@ --- search: - boost: 2 + boost: 2 --- ## Overview + Extraction Engines in Griptape facilitate the extraction of data from text formats such as CSV and JSON. These engines play a crucial role in the functionality of [Extraction Tasks](../../griptape-framework/structures/tasks.md). As of now, Griptape supports two types of Extraction Engines: the CSV Extraction Engine and the JSON Extraction Engine. @@ -15,6 +16,7 @@ The CSV Extraction Engine extracts tabular content from unstructured text. ```python --8<-- "docs/griptape-framework/engines/src/extraction_engines_1.py" ``` + ``` name,age,location Alice,28,New York @@ -24,12 +26,12 @@ Charlie,40,Texas ## JSON -The JSON Extraction Engine extracts JSON-formatted content from unstructured text. - +The JSON Extraction Engine extracts JSON-formatted content from unstructured text. ```python --8<-- "docs/griptape-framework/engines/src/extraction_engines_2.py" ``` + ``` { "model": "GPT-3.5", diff --git a/docs/griptape-framework/engines/image-generation-engines.md b/docs/griptape-framework/engines/image-generation-engines.md index 6a8f039aa..fb31254d0 100644 --- a/docs/griptape-framework/engines/image-generation-engines.md +++ b/docs/griptape-framework/engines/image-generation-engines.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview diff --git a/docs/griptape-framework/engines/image-query-engines.md b/docs/griptape-framework/engines/image-query-engines.md index 3290a20f1..168f6e601 100644 --- a/docs/griptape-framework/engines/image-query-engines.md +++ b/docs/griptape-framework/engines/image-query-engines.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Image Query Engines diff --git a/docs/griptape-framework/engines/rag-engines.md b/docs/griptape-framework/engines/rag-engines.md index 688f46ddd..b603a7633 100644 --- a/docs/griptape-framework/engines/rag-engines.md +++ b/docs/griptape-framework/engines/rag-engines.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## RAG Engines @@ -14,7 +14,6 @@ search: `RagEngine`s consist of three _stages_: `QueryRagStage`, `RetrievalRagStage`, and `ResponseRagStage`. These stages are always executed sequentially. Each stage comprises multiple _modules_, which are executed in a customized manner. Due to this unique structure, `RagEngines` are not intended to replace [Workflows](../structures/workflows.md) or [Pipelines](../structures/pipelines.md). - - `QueryRagStage` is used for modifying user queries. - `RetrievalRagStage` is used for retrieving and re-ranking text chunks. - `ResponseRagStage` is used for generating responses. @@ -28,11 +27,13 @@ RAG modules are used to implement concrete actions in the RAG pipeline. `RagEngi - `TranslateQueryRagModule` is for translating the query into another language. #### Retrieval/Rerank Modules + - `TextChunksRerankRagModule` is for re-ranking retrieved results. - `TextLoaderRetrievalRagModule` is for retrieving data with text loaders in real time. - `VectorStoreRetrievalRagModule` is for retrieving text chunks from a vector store. #### Response Modules + - `PromptResponseRagModule` is for generating responses based on retrieved text chunks. - `TextChunksResponseRagModule` is for responding with retrieved text chunks. - `FootnotePromptResponseRagModule` is for responding with automatic footnotes from text chunk references. diff --git a/docs/griptape-framework/engines/summary-engines.md b/docs/griptape-framework/engines/summary-engines.md index 90c72e4dd..573d56dad 100644 --- a/docs/griptape-framework/engines/summary-engines.md +++ b/docs/griptape-framework/engines/summary-engines.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -9,7 +9,7 @@ Summary engines are used to summarize text and collections of [TextArtifact](../ ## Prompt -Used to summarize texts with LLMs. You can set a custom [prompt_driver](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.prompt_driver), [system_template_generator](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.system_template_generator), [user_template_generator](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.user_template_generator), and [chunker](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.chunker). +Used to summarize texts with LLMs. You can set a custom [prompt_driver](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.prompt_driver), [generate_system_template](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.generate_system_template), [generate_user_template](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.generate_user_template), and [chunker](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.chunker). Use the [summarize_artifacts](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.summarize_artifacts) method to summarize a list of artifacts or [summarize_text](../../reference/griptape/engines/summary/base_summary_engine.md#griptape.engines.summary.base_summary_engine.BaseSummaryEngine.summarize_text) to summarize an arbitrary string. diff --git a/docs/griptape-framework/index.md b/docs/griptape-framework/index.md index 3dd294f0b..de6206d22 100644 --- a/docs/griptape-framework/index.md +++ b/docs/griptape-framework/index.md @@ -1,4 +1,4 @@ -The Griptape framework provides developers with the ability to create AI systems that operate across two dimensions: **predictability** and **creativity**. +The Griptape framework provides developers with the ability to create AI systems that operate across two dimensions: **predictability** and **creativity**. For **predictability**, Griptape enforces structures like sequential pipelines, DAG-based workflows, and long-term memory. To facilitate creativity, Griptape safely prompts LLMs with tools and short-term memory connecting them to external APIs and data stores. The framework allows developers to transition between those two dimensions effortlessly based on their use case. @@ -7,14 +7,15 @@ Griptape not only helps developers harness the potential of LLMs but also enforc Griptape’s design philosophy is based on the following tenets: 1. **Modularity and composability**: All framework primitives are useful and usable on their own in addition to being easy to plug into each other. -2. **Technology-agnostic**: Griptape is designed to work with any capable LLM, data store, and backend through the abstraction of drivers. -3. **Keep data off prompt by default**: When working with data through loaders and tools, Griptape aims to keep it off prompt by default, making it easy to work with big data securely and with low latency. -4. **Minimal prompt engineering**: It’s much easier to reason about code written in Python, not natural languages. Griptape aims to default to Python in most cases unless absolutely necessary. +1. **Technology-agnostic**: Griptape is designed to work with any capable LLM, data store, and backend through the abstraction of drivers. +1. **Keep data off prompt by default**: When working with data through loaders and tools, Griptape aims to keep it off prompt by default, making it easy to work with big data securely and with low latency. +1. **Minimal prompt engineering**: It’s much easier to reason about code written in Python, not natural languages. Griptape aims to default to Python in most cases unless absolutely necessary. ## Quick Start ### OpenAI API Key -First, configure an OpenAI client by [getting an API key](https://platform.openai.com/account/api-keys) and adding it to your environment as `OPENAI_API_KEY`. + +First, configure an OpenAI client by [getting an API key](https://platform.openai.com/account/api-keys) and adding it to your environment as `OPENAI_API_KEY`. By default, Griptape uses [OpenAI Completions API](https://platform.openai.com/docs/guides/completion) to execute LLM prompts, but other LLMs can be configured with the use of [Prompt Drivers](./drivers/prompt-drivers.md). ### Using pip @@ -27,13 +28,13 @@ pip install "griptape[all]" -U ### Using Poetry -To get started with Griptape using Poetry first create a new poetry project from the terminal: +To get started with Griptape using Poetry first create a new poetry project from the terminal: ``` poetry new griptape-quickstart ``` -Change your working directory to the new `griptape-quickstart` directory created by Poetry and add the the `griptape` dependency. +Change your working directory to the new `griptape-quickstart` directory created by Poetry and add the the `griptape` dependency. ``` poetry add "griptape[all]" @@ -41,33 +42,38 @@ poetry add "griptape[all]" ### Extras -The `[all]` [extra](https://peps.python.org/pep-0508/#extras) ensures that you have access to the entire range of functionalities that Griptape offers. +The `[all]` [extra](https://peps.python.org/pep-0508/#extras) ensures that you have access to the entire range of functionalities that Griptape offers. This comprehensive installation is recommended for newcomers to get the complete Griptape experience. However, if you wish to optimize the installation size or only require specific functionalities, you have two main options: 1. Core Dependencies: These are the foundational dependencies that enable Griptape to function with most of its default settings. -2. Extras: These are additional, vendor-specific drivers integrated within the Griptape framework. If a particular Driver mandates an extra, it will be explicitly highlighted in the documentation. +1. Extras: These are additional, vendor-specific drivers integrated within the Griptape framework. If a particular Driver mandates an extra, it will be explicitly highlighted in the documentation. To install just the core dependencies: + ``` poetry add griptape ``` To install specific extras (e.g., drivers for [AnthropicPromptDriver](./drivers/prompt-drivers.md#anthropic) and [PineconeVectorStoreDriver](./drivers/vector-store-drivers.md#pinecone)): + ``` poetry add "griptape[drivers-prompt-anthropic,drivers-vector-pinecone]" ``` For a comprehensive list of extras, please refer to the `[tool.poetry.extras]` section of Griptape's [pyproject.toml](https://github.com/griptape-ai/griptape/blob/main/pyproject.toml). -## Build a Simple Agent -With Griptape, you can create *structures*, such as [Agents](./structures/agents.md), [Pipelines](./structures/pipelines.md), and [Workflows](./structures/workflows.md), that are composed of different types of tasks. First, let's build a simple Agent that we can interact with through a chat based interface. +## Build a Simple Agent + +With Griptape, you can create *structures*, such as [Agents](./structures/agents.md), [Pipelines](./structures/pipelines.md), and [Workflows](./structures/workflows.md), that are composed of different types of tasks. First, let's build a simple Agent that we can interact with through a chat based interface. ```python --8<-- "docs/griptape-framework/src/index_1.py" ``` -Run this script in your IDE and you'll be presented with a `Q:` prompt where you can interact with your model. + +Run this script in your IDE and you'll be presented with a `Q:` prompt where you can interact with your model. + ``` Q: Write me a haiku about griptape processing... @@ -82,19 +88,22 @@ Skateboard's trusty, silent guide, In each ride, we're glued. Q: ``` -If you want to skip the chat interface and load an initial prompt, you can do so using the `.run()` method: + +If you want to skip the chat interface and load an initial prompt, you can do so using the `.run()` method: ```python --8<-- "docs/griptape-framework/src/index_2.py" ``` -Agents on their own are fun, but let's add some capabilities to them using Griptape Tools. -### Build a Simple Agent with Tools + +Agents on their own are fun, but let's add some capabilities to them using Griptape Tools. + +### Build a Simple Agent with Tools ```python --8<-- "docs/griptape-framework/src/index_3.py" ``` -Here is the chain of thought from the Agent. Notice where it realizes it can use the tool you just injected to do the calculation.[^1] -[^1]: In some cases a model might be capable of basic arithmetic. For example, gpt-3.5 returns the correct numeric answer but in an odd format. + +Here is the chain of thought from the Agent. Notice where it realizes it can use the tool you just injected to do the calculation.[^1] ``` [07/23/24 10:47:38] INFO ToolkitTask 6a51060d1fb74e57840a91aa319f26dc @@ -200,3 +209,5 @@ Agents are great for getting started, but they are intentionally limited to a si [08/12/24 14:50:42] INFO PromptTask dbbb38f144f445db896dc12854f17ad3 Output: El contenido de https://www.griptape.ai ha sido resumido y almacenado en griptape.txt. ``` + +[^1]: In some cases a model might be capable of basic arithmetic. For example, gpt-3.5 returns the correct numeric answer but in an odd format. diff --git a/docs/griptape-framework/misc/events.md b/docs/griptape-framework/misc/events.md index beb02d66a..c33e3a3c6 100644 --- a/docs/griptape-framework/misc/events.md +++ b/docs/griptape-framework/misc/events.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -15,6 +15,7 @@ You can listen to specific event types: ```python --8<-- "docs/griptape-framework/misc/src/events_1.py" ``` + ``` [09/08/23 10:51:16] INFO PromptTask a20c236d1d86480fb14ae976e6cf8983 @@ -84,20 +85,24 @@ The `EventListener` will automatically be added and removed from the [EventBus]( ## Streaming - -You can use the [CompletionChunkEvent](../../reference/griptape/events/completion_chunk_event.md) to stream the completion results from Prompt Drivers. +You can use the [BaseChunkEvent](../../reference/griptape/events/base_chunk_event.md) to stream the completion results from Prompt Drivers. ```python --8<-- "docs/griptape-framework/misc/src/events_3.py" ``` -You can also use the [Stream](../../reference/griptape/utils/stream.md) utility to automatically wrap -[CompletionChunkEvent](../../reference/griptape/events/completion_chunk_event.md)s in a Python iterator. +You can also use the [TextChunkEvent](../../reference/griptape/events/text_chunk_event.md) and [ActionChunkEvent](../../reference/griptape/events/action_chunk_event.md) to further differentiate the different types of chunks for more customized output. ```python ---8<-- "docs/griptape-framework/misc/src/events_4.py" +--8<-- "docs/griptape-framework/misc/src/events_chunk_stream.py" ``` +If you want Griptape to handle the chunk events for you, use the [Stream](../../reference/griptape/utils/stream.md) utility to automatically wrap +[BaseChunkEvent](../../reference/griptape/events/base_chunk_event.md)s in a Python iterator. + +```python +--8<-- "docs/griptape-framework/misc/src/events_4.py" +``` ## Counting Tokens @@ -132,14 +137,14 @@ To count tokens, you can use Event Listeners and the [TokenCounter](../../refere total tokens: 273 ``` - ## Inspecting Payloads -You can use the [StartPromptEvent](../../reference/griptape/events/start_prompt_event.md) to inspect the Prompt Stack and final prompt string before it is sent to the LLM. +You can use the [StartPromptEvent](../../reference/griptape/events/start_prompt_event.md) to inspect the Prompt Stack and final prompt string before it is sent to the LLM. ```python --8<-- "docs/griptape-framework/misc/src/events_6.py" ``` + ``` ... Prompt Stack Messages: @@ -153,3 +158,23 @@ User: Write me a poem. Assistant: ... ``` + +## `EventListenerDriver.on_event` Return Value Behavior + +The value that gets returned from the [`EventListener.on_event`](../../reference/griptape/events/event_listener.md#griptape.events.event_listener.EventListener.on_event) will determine what gets sent to the `event_listener_driver`. + +### `EventListener.on_event` is None + +By default, the `EventListener.on_event` function is `None`. Any events that the `EventListener` is listening for will get sent to the `event_listener_driver` as-is. + +### Return `BaseEvent` or `dict` + +You can return a `BaseEvent` or `dict` object from `EventListener.on_event`, and it will get sent to the `event_listener_driver`. + +### Return `None` + +You can return `None` in the on_event function to prevent the event from getting sent to the `event_listener_driver`. + +```python +--8<-- "docs/griptape-framework/misc/src/events_no_publish.py" +``` diff --git a/docs/griptape-framework/misc/src/events_1.py b/docs/griptape-framework/misc/src/events_1.py index 993567cc6..ad9cb5647 100644 --- a/docs/griptape-framework/misc/src/events_1.py +++ b/docs/griptape-framework/misc/src/events_1.py @@ -12,14 +12,14 @@ from griptape.structures import Agent -def handler(event: BaseEvent) -> None: +def on_event(event: BaseEvent) -> None: print(event.__class__) EventBus.add_event_listeners( [ EventListener( - handler, + on_event, event_types=[ StartTaskEvent, FinishTaskEvent, diff --git a/docs/griptape-framework/misc/src/events_3.py b/docs/griptape-framework/misc/src/events_3.py index 7adac812f..beacf814a 100644 --- a/docs/griptape-framework/misc/src/events_3.py +++ b/docs/griptape-framework/misc/src/events_3.py @@ -1,7 +1,5 @@ -from typing import cast - from griptape.drivers import OpenAiChatPromptDriver -from griptape.events import CompletionChunkEvent, EventBus, EventListener +from griptape.events import BaseChunkEvent, EventBus, EventListener from griptape.structures import Pipeline from griptape.tasks import ToolkitTask from griptape.tools import PromptSummaryTool, WebScraperTool @@ -9,9 +7,9 @@ EventBus.add_event_listeners( [ EventListener( - lambda e: print(cast(CompletionChunkEvent, e).token, end="", flush=True), - event_types=[CompletionChunkEvent], - ) + lambda e: print(str(e), end="", flush=True), + event_types=[BaseChunkEvent], + ), ] ) diff --git a/docs/griptape-framework/misc/src/events_6.py b/docs/griptape-framework/misc/src/events_6.py index 25934442a..4cc21fa75 100644 --- a/docs/griptape-framework/misc/src/events_6.py +++ b/docs/griptape-framework/misc/src/events_6.py @@ -1,15 +1,15 @@ from griptape.events import BaseEvent, EventBus, EventListener, StartPromptEvent from griptape.structures import Agent -EventBus.add_event_listeners([EventListener(handler=lambda e: print(e), event_types=[StartPromptEvent])]) - -def handler(event: BaseEvent) -> None: +def on_event(event: BaseEvent) -> None: if isinstance(event, StartPromptEvent): print("Prompt Stack Messages:") for message in event.prompt_stack.messages: - print(f"{message.role}: {message.content}") + print(f"{message.role}: {message.to_text()}") + +EventBus.add_event_listeners([EventListener(on_event=on_event, event_types=[StartPromptEvent])]) agent = Agent() diff --git a/docs/griptape-framework/misc/src/events_chunk_stream.py b/docs/griptape-framework/misc/src/events_chunk_stream.py new file mode 100644 index 000000000..3ab5517f4 --- /dev/null +++ b/docs/griptape-framework/misc/src/events_chunk_stream.py @@ -0,0 +1,29 @@ +from griptape.drivers import OpenAiChatPromptDriver +from griptape.events import ActionChunkEvent, EventBus, EventListener, TextChunkEvent +from griptape.structures import Pipeline +from griptape.tasks import ToolkitTask +from griptape.tools import PromptSummaryTool, WebScraperTool + +EventBus.add_event_listeners( + [ + EventListener( + lambda e: print(str(e), end="", flush=True), + event_types=[TextChunkEvent], + ), + EventListener( + lambda e: print(str(e), end="", flush=True), + event_types=[ActionChunkEvent], + ), + ] +) + +pipeline = Pipeline() +pipeline.add_tasks( + ToolkitTask( + "Based on https://griptape.ai, tell me what griptape is.", + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", stream=True), + tools=[WebScraperTool(off_prompt=True), PromptSummaryTool(off_prompt=False)], + ) +) + +pipeline.run() diff --git a/docs/griptape-framework/misc/src/events_no_publish.py b/docs/griptape-framework/misc/src/events_no_publish.py new file mode 100644 index 000000000..77267b9bc --- /dev/null +++ b/docs/griptape-framework/misc/src/events_no_publish.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from typing import Optional + +from griptape.artifacts import ErrorArtifact, InfoArtifact +from griptape.drivers import GriptapeCloudEventListenerDriver +from griptape.events import BaseEvent, EventBus, EventListener, FinishStructureRunEvent +from griptape.structures import Agent + + +def handler_maybe_drop_events(event: FinishStructureRunEvent) -> Optional[BaseEvent | dict]: + if event.structure_id == "some_structure_id": + # Drop the event if the structure_id is "some_structure_id" + return None + if isinstance(event.output_task_output, InfoArtifact): + # Print the output of the task if it is an InfoArtifact + # and then drop the event + print(f"Info: {event.output_task_output}") + return None + if isinstance(event.output_task_output, ErrorArtifact): + # Print the output of the task if it is an ErrorArtifact + # and then convert it to a dictionary and return it + print(f"Error: {event.output_task_output}") + return { + "error": event.output_task_output.to_text(), + "exception_message": str(event.output_task_output.exception), + } + + return event + + +EventBus.add_event_listeners( + [ + EventListener( + handler_maybe_drop_events, + event_types=[FinishStructureRunEvent], + # By default, GriptapeCloudEventListenerDriver uses the api key provided + # in the GT_CLOUD_API_KEY environment variable. + event_listener_driver=GriptapeCloudEventListenerDriver(), + ), + ] +) + + +agent1 = Agent(id="some_structure_id") +agent1.run("Create a list of 8 questions for an interview with a science fiction author.") + +agent2 = Agent(id="another_structure_id") +agent2.run("Create a list of 10 questions for an interview with a theoretical physicist.") diff --git a/docs/griptape-framework/misc/tokenizers.md b/docs/griptape-framework/misc/tokenizers.md index f820d55a9..3a8f2391a 100644 --- a/docs/griptape-framework/misc/tokenizers.md +++ b/docs/griptape-framework/misc/tokenizers.md @@ -1,9 +1,9 @@ --- search: - boost: 2 + boost: 2 --- -## Overview +## Overview Tokenizers are used throughout Griptape to calculate the number of [tokens](https://learn.microsoft.com/en-us/semantic-kernel/prompt-engineering/tokens) in a piece of text. They are particularly useful for ensuring that the LLM token limits are not exceeded. @@ -19,6 +19,7 @@ Tokenizers are a low level abstraction that you will rarely interact with direct ``` ### Cohere + ```python --8<-- "docs/griptape-framework/misc/src/tokenizers_2.py" ``` @@ -36,17 +37,20 @@ Tokenizers are a low level abstraction that you will rarely interact with direct ``` ### Hugging Face + ```python --8<-- "docs/griptape-framework/misc/src/tokenizers_5.py" ``` ### Amazon Bedrock + ```python --8<-- "docs/griptape-framework/misc/src/tokenizers_6.py" ``` ### Simple -Not all LLM providers have a public tokenizer API. In this case, you can use the `SimpleTokenizer` to count tokens based on a simple heuristic. + +Not all LLM providers have a public tokenizer API. In this case, you can use the `SimpleTokenizer` to count tokens based on a simple heuristic. ```python --8<-- "docs/griptape-framework/misc/src/tokenizers_7.py" diff --git a/docs/griptape-framework/structures/agents.md b/docs/griptape-framework/structures/agents.md index 1b40fad2b..a337a476e 100644 --- a/docs/griptape-framework/structures/agents.md +++ b/docs/griptape-framework/structures/agents.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview diff --git a/docs/griptape-framework/structures/configs.md b/docs/griptape-framework/structures/configs.md index e192af79d..fa72a1314 100644 --- a/docs/griptape-framework/structures/configs.md +++ b/docs/griptape-framework/structures/configs.md @@ -1,13 +1,13 @@ --- search: - boost: 2 + boost: 2 --- ## Overview Griptape exposes a global singleton, [Defaults](../../reference/griptape/configs/defaults_config.md), which can be used to access and modify the default configurations of the framework. -To update the default configurations, simply update the fields on the `Defaults` object. +To update the default configurations, simply update the fields on the `Defaults` object. Framework objects will be created with the currently set default configurations, but you can always override at the individual class level. ```python @@ -16,7 +16,7 @@ Framework objects will be created with the currently set default configurations, ### Drivers Configs -The [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) class allows for the customization of Structures within Griptape, enabling specific settings such as Drivers to be defined for Tasks. +The [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) class allows for the customization of Structures within Griptape, enabling specific settings such as Drivers to be defined for Tasks. Griptape provides predefined [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md)'s for widely used services that provide APIs for most Driver types Griptape offers. @@ -43,6 +43,7 @@ The [Azure OpenAI Driver config](../../reference/griptape/configs/drivers/azure_ ``` #### Amazon Bedrock + The [Amazon Bedrock Driver config](../../reference/griptape/configs/drivers/amazon_bedrock_drivers_config.md) provides default Drivers for Amazon Bedrock's APIs. ```python @@ -50,6 +51,7 @@ The [Amazon Bedrock Driver config](../../reference/griptape/configs/drivers/amaz ``` #### Google + The [Google Driver config](../../reference/griptape/configs/drivers/google_drivers_config.md) provides default Drivers for Google's Gemini APIs. ```python @@ -62,7 +64,6 @@ The [Anthropic Driver config](../../reference/griptape/configs/drivers/anthropic !!! info Anthropic does not provide an embeddings API which means you will need to use another service for embeddings. - The `AnthropicDriversConfig` defaults to using `VoyageAiEmbeddingDriver` which integrates with [VoyageAI](https://www.voyageai.com/), the service used in Anthropic's [embeddings documentation](https://docs.anthropic.com/claude/docs/embeddings). To override the default embedding driver, see: [Override Default Structure Embedding Driver](../drivers/embedding-drivers.md#override-default-structure-embedding-driver). ```python @@ -80,7 +81,7 @@ The [Cohere Driver config](../../reference/griptape/configs/drivers/cohere_drive #### Custom You can create your own [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) by overriding relevant Drivers. -The [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) class includes "Dummy" Drivers for all types, which throw a [DummyError](../../reference/griptape/exceptions/dummy_exception.md) if invoked without being overridden. +The [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) class includes "Dummy" Drivers for all types, which throw a [DummyError](../../reference/griptape/exceptions/dummy_exception.md) if invoked without being overridden. This approach ensures that you are informed through clear error messages if you attempt to use Structures without proper Driver configurations. ```python @@ -95,6 +96,82 @@ Griptape provides a predefined [LoggingConfig](../../reference/griptape/configs/ --8<-- "docs/griptape-framework/structures/src/logging_config.py" ``` +#### Debug Logs + +You can enable debug logs to view more granular information such as request/response payloads. + +```python +import logging + +from griptape.configs import Defaults +from griptape.configs.defaults_config import LoggingConfig +from griptape.configs.logging import JsonFormatter +from griptape.drivers import OpenAiChatPromptDriver +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +logger = logging.getLogger(Defaults.logging_config.logger_name) +logger.setLevel(logging.DEBUG) +logger.handlers[0].setFormatter(JsonFormatter()) + +agent = Agent() + +agent.run("Hello world!") +``` + +``` +[10/09/24 15:30:04] INFO PromptTask 75ef1747a5824bc8ac838f3081aeb57d + Input: Hello world! + DEBUG { + "model": "gpt-4o", + "temperature": 0.1, + "user": "", + "seed": null, + "messages": [ + { + "role": "user", + "content": "Hello world!" + } + ] + } +[10/09/24 15:30:05] DEBUG { + "id": "chatcmpl-AGZTwg4T4YikR2KjF3AMIRxlIfcKa", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "logprobs": null, + "message": { + "content": "Hello! How can I assist you today?", + "refusal": null, + "role": "assistant", + "function_call": null, + "tool_calls": null + } + } + ], + "created": 1728513004, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion", + "service_tier": null, + "system_fingerprint": "fp_2f406b9113", + "usage": { + "completion_tokens": 9, + "prompt_tokens": 10, + "total_tokens": 19, + "prompt_tokens_details": { + "cached_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0 + } + } + } + INFO PromptTask 75ef1747a5824bc8ac838f3081aeb57d + Output: Hello! How can I assist you today? + +``` + ### Loading/Saving Configs ```python diff --git a/docs/griptape-framework/structures/conversation-memory.md b/docs/griptape-framework/structures/conversation-memory.md index 503a00b14..d8ec211ea 100644 --- a/docs/griptape-framework/structures/conversation-memory.md +++ b/docs/griptape-framework/structures/conversation-memory.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -60,4 +60,3 @@ You can choose to offset which runs are summarized with the ```python --8<-- "docs/griptape-framework/structures/src/conversation_memory_5.py" ``` - diff --git a/docs/griptape-framework/structures/observability.md b/docs/griptape-framework/structures/observability.md index 01e1af336..434059d3a 100644 --- a/docs/griptape-framework/structures/observability.md +++ b/docs/griptape-framework/structures/observability.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview diff --git a/docs/griptape-framework/structures/pipelines.md b/docs/griptape-framework/structures/pipelines.md index 7bcfc1348..28910863e 100644 --- a/docs/griptape-framework/structures/pipelines.md +++ b/docs/griptape-framework/structures/pipelines.md @@ -1,9 +1,10 @@ --- search: - boost: 2 + boost: 2 --- -## Overview +## Overview + A [Pipeline](../../reference/griptape/structures/pipeline.md) is very similar to an [Agent](../../reference/griptape/structures/agent.md), but allows for multiple tasks. You can access the final output of the Pipeline by using the [output](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.output) attribute. @@ -12,10 +13,10 @@ You can access the final output of the Pipeline by using the [output](../../refe Pipelines have access to the following [context](../../reference/griptape/structures/pipeline.md#griptape.structures.pipeline.Pipeline.context) variables in addition to the [base context](./tasks.md#context). -* `parent_output`: output from the parent. -* `parent`: parent task. -* `child`: child task. - +- `task_outputs`: dictionary containing mapping of all task IDs to their outputs. +- `parent_output`: output from the parent task if one exists, otherwise `None`. +- `parent`: parent task if one exists, otherwise `None`. +- `child`: child task if one exists, otherwise `None`. ## Pipeline diff --git a/docs/griptape-framework/structures/rulesets.md b/docs/griptape-framework/structures/rulesets.md index 12f14a96e..f7a1de482 100644 --- a/docs/griptape-framework/structures/rulesets.md +++ b/docs/griptape-framework/structures/rulesets.md @@ -1,6 +1,6 @@ --- search: - boost: 2 + boost: 2 --- ## Overview @@ -29,10 +29,6 @@ A [Ruleset](../../reference/griptape/rules/ruleset.md) can be used to define [Ru [JsonSchemaRule](../../reference/griptape/rules/json_schema_rule.md)s defines a structured format for the LLM's output by providing a JSON schema. This is particularly useful when you need the LLM to return well-formed data, such as JSON objects, with specific fields and data types. -!!! warning - `JsonSchemaRule` may break [ToolkitTask](../structures/tasks.md#toolkit) which relies on a specific [output token](https://github.com/griptape-ai/griptape/blob/e6a04c7b88cf9fa5d6bcf4c833ffebfab89a3258/griptape/tasks/toolkit_task.py#L28). - - ```python --8<-- "docs/griptape-framework/structures/src/json_schema_rule.py" ``` @@ -97,11 +93,12 @@ You can define a Ruleset at the Structure level if you need to have certain beha ### Rules -You can pass [rules](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.rules) directly to the Structure to have a Ruleset created for you. +You can pass [rules](../../reference/griptape/mixins/rule_mixin.md#griptape.mixins.rule_mixin.RuleMixin.rules) directly to the Structure to have a Ruleset created for you. ```python --8<-- "docs/griptape-framework/structures/src/rulesets_2.py" ``` + ``` [09/29/23 13:31:41] INFO PromptTask 51c0030b7a854ae5a9bef4595014915c Input: Respond to this question from the user: 'How do I bake a cake?' @@ -143,6 +140,7 @@ You can pass [rules](../../reference/griptape/mixins/rule_mixin.md#griptape.mixi ```python --8<-- "docs/griptape-framework/structures/src/rulesets_4.py" ``` + ``` [09/25/23 16:29:05] INFO PromptTask d1cc2c0b780d4b32b6309ceab11173f4 Input: How are you? diff --git a/docs/griptape-framework/structures/src/task_hooks.py b/docs/griptape-framework/structures/src/task_hooks.py new file mode 100644 index 000000000..e2e884a10 --- /dev/null +++ b/docs/griptape-framework/structures/src/task_hooks.py @@ -0,0 +1,38 @@ +import json +import re + +from griptape.structures import Agent +from griptape.tasks import PromptTask +from griptape.tasks.base_task import BaseTask + +SSN_PATTERN = re.compile(r"\b\d{3}-\d{2}-\d{4}\b") + +original_input = None + + +def on_before_run(task: BaseTask) -> None: + global original_input + + original_input = task.input.value + + if isinstance(task, PromptTask): + task.input = SSN_PATTERN.sub("xxx-xx-xxxx", task.input.value) + + +def on_after_run(task: BaseTask) -> None: + if task.output is not None: + task.output.value = json.dumps( + {"original_input": original_input, "masked_input": task.input.value, "output": task.output.value}, indent=2 + ) + + +agent = Agent( + tasks=[ + PromptTask( + "Respond to this user: {{ args[0] }}", + on_before_run=on_before_run, + on_after_run=on_after_run, + ) + ] +) +agent.run("Hello! My favorite color is blue, and my social security number is 123-45-6789.") diff --git a/docs/griptape-framework/structures/src/tasks_10.py b/docs/griptape-framework/structures/src/tasks_10.py index c94fa7919..e36a843df 100644 --- a/docs/griptape-framework/structures/src/tasks_10.py +++ b/docs/griptape-framework/structures/src/tasks_10.py @@ -14,7 +14,7 @@ def character_counter(task: CodeExecutionTask) -> BaseArtifact: pipeline.add_tasks( # take the first argument from the pipeline `run` method - CodeExecutionTask(run_fn=character_counter), + CodeExecutionTask(on_run=character_counter), # # take the output from the previous task and insert it into the prompt PromptTask("{{args[0]}} using {{ parent_output }} characters"), ) diff --git a/docs/griptape-framework/structures/src/tasks_16.py b/docs/griptape-framework/structures/src/tasks_16.py index 7496d2d9c..332187a00 100644 --- a/docs/griptape-framework/structures/src/tasks_16.py +++ b/docs/griptape-framework/structures/src/tasks_16.py @@ -112,7 +112,7 @@ def build_writer() -> Agent: """Perform a detailed examination of the newest developments in AI as of 2024. Pinpoint major trends, breakthroughs, and their implications for various industries.""", ), - driver=LocalStructureRunDriver(structure_factory_fn=build_researcher), + driver=LocalStructureRunDriver(create_structure=build_researcher), ), StructureRunTask( ( @@ -122,7 +122,7 @@ def build_writer() -> Agent: Keep the tone appealing and use simple language to make it less technical.""", "{{parent_output}}", ), - driver=LocalStructureRunDriver(structure_factory_fn=build_writer), + driver=LocalStructureRunDriver(create_structure=build_writer), ), ], ) diff --git a/docs/griptape-framework/structures/task-memory.md b/docs/griptape-framework/structures/task-memory.md index a3fc04dc5..07ff7cee3 100644 --- a/docs/griptape-framework/structures/task-memory.md +++ b/docs/griptape-framework/structures/task-memory.md @@ -1,15 +1,15 @@ --- search: - boost: 2 + boost: 2 --- ## Overview Task Memory is a powerful feature of Griptape that allows you to control where the data returned by [Tools](../tools/index.md) is stored. This is useful in the following scenarios: -* **Security requirements**: many organizations don't want data to leave their cloud for regulatory and security reasons. -* **Long textual content**: when textual content returned by Tools can't fit in the token limit, it's often useful to perform actions on it as a separate operation, not through the main LLM. -* **Non-textual content**: Tools can generate images, videos, PDFs, and other non-textual content that can be stored in Task Memory and acted upon later by other Tools. +- **Security requirements**: many organizations don't want data to leave their cloud for regulatory and security reasons. +- **Long textual content**: when textual content returned by Tools can't fit in the token limit, it's often useful to perform actions on it as a separate operation, not through the main LLM. +- **Non-textual content**: Tools can generate images, videos, PDFs, and other non-textual content that can be stored in Task Memory and acted upon later by other Tools. !!! tip Running into issue with Task Memory? Check out the [Task Memory Considerations](#task-memory-considerations) section for some common pitfalls. @@ -68,7 +68,7 @@ Let's explore what happens when `off_prompt` is set to `True`: ...Output truncated for brevity... ``` -When we set `off_prompt` to `True`, the Agent does not function as expected, even generating an error. This is because the Calculator output is being stored in Task Memory but the Agent has no way to access it. +When we set `off_prompt` to `True`, the Agent does not function as expected, even generating an error. This is because the Calculator output is being stored in Task Memory but the Agent has no way to access it. To fix this, we need a [Tool that can read from Task Memory](#tools-that-can-read-from-task-memory) such as the `PromptSummaryTool`. This is an example of [not providing a Task Memory compatible Tool](#not-providing-a-task-memory-compatible-tool). @@ -77,7 +77,7 @@ This is an example of [not providing a Task Memory compatible Tool](#not-providi The [PromptSummaryTool](../../griptape-tools/official-tools/prompt-summary-tool.md) is a Tool that allows an Agent to summarize the Artifacts in Task Memory. It has the following methods: Let's add `PromptSummaryTool` to the Agent and run the same task. -Note that on the `PromptSummaryTool` we've set `off_prompt` to `False` so that the results of the query can be returned directly to the LLM. +Note that on the `PromptSummaryTool` we've set `off_prompt` to `False` so that the results of the query can be returned directly to the LLM. If we had kept it as `True`, the results would have been stored back Task Memory which would've put us back to square one. See [Task Memory Looping](#task-memory-looping) for more information on this scenario. ```python @@ -137,6 +137,7 @@ Let's say we want to query the contents of a very large webpage. ``` When running this example, we get the following error: + ``` [04/26/24 13:20:02] ERROR ToolkitTask 67e2f907f95d4850ae79f9da67df54c1 Error code: 400 - {'error': {'message': "This model's maximum context length is 8192 tokens. However, your messages resulted in 73874 tokens. @@ -151,6 +152,7 @@ Note that we're setting `off_prompt` to `False` on the `QueryTool` so that the _ ``` And now we get the expected output: + ``` [08/12/24 14:56:18] INFO ToolkitTask d3ce58587dc944b0a30a205631b82944 Input: According to this page https://en.wikipedia.org/wiki/Elden_Ring, how many copies of Elden Ring have been sold? @@ -201,7 +203,7 @@ Because Task Memory splits up the storage and retrieval of data, you can use dif Here is an example where we use GPT-4 to orchestrate the Tools and store the data in Task Memory, and Anthropic's Claude 3 Haiku model to query the raw content. In this example, GPT-4 _never_ sees the contents of the page, only that it was stored in Task Memory. Even the query results generated by the Haiku model are stored in Task Memory so that the `FileManagerTool` can save the results to disk without GPT-4 ever seeing them. -```python +```python --8<-- "docs/griptape-framework/structures/src/task_memory_6.py" ``` @@ -282,12 +284,14 @@ Today, these include: Task Memory is a powerful feature of Griptape, but with great power comes great responsibility. Here are some things to keep in mind when using Task Memory: -### Tool Return Types -Griptape will only store Artifacts in Task Memory that have been explicitly defined in the `artifact_storages` parameter of the `TaskMemory` object. +### Tool Return Types + +Griptape will only store Artifacts in Task Memory that have been explicitly defined in the `artifact_storages` parameter of the `TaskMemory` object. If you try to store an Artifact that is not defined in `artifact_storages`, Griptape will raise an error. The exception to this is `InfoArtifact`s and `ErrorArtifact`s. Griptape will never store these Artifacts store in Task Memory. -By default, Griptape will store `TextArtifact`'s, `BlobArtifact`'s in Task Memory. Additionally, Griptape will also store the elements of `ListArtifact`'s as long as they are of a supported Artifact type. +By default, Griptape will store `TextArtifact`'s, `BlobArtifact`'s in Task Memory. Additionally, Griptape will also store the elements of `ListArtifact`'s as long as they are of a supported Artifact type. ### Not Providing a Task Memory Compatible Tool + When using Task Memory, make sure that you have at least one Tool that can read from Task Memory. If you don't, the data stored in Task Memory will be inaccessible to the Agent and it may hallucinate Tool Activities. ```python @@ -295,6 +299,7 @@ When using Task Memory, make sure that you have at least one Tool that can read ``` ### Task Memory Looping + An improper configuration of Tools can lead to the LLM using the Tools in a loop. For example, if you have a Tool that stores data in Task Memory and another Tool that queries that data from Task Memory ([Tools That Can Read From Task Memory](#tools-that-can-read-from-task-memory)), make sure that the query Tool does not store the data back in Task Memory. This can create a loop where the same data is stored and queried over and over again. @@ -303,9 +308,9 @@ This can create a loop where the same data is stored and queried over and over a ``` ### Task Memory May Not Be Necessary + Task Memory may not be necessary for all use cases. If the data returned by a Tool is not sensitive, not too large, and does not need to be acted upon by another Tool, you can leave the default of `off_prompt` to `False` and return the data directly to the LLM. ```python --8<-- "docs/griptape-framework/structures/src/task_memory_9.py" ``` - diff --git a/docs/griptape-framework/structures/tasks.md b/docs/griptape-framework/structures/tasks.md index f91937ec0..ef268a3ce 100644 --- a/docs/griptape-framework/structures/tasks.md +++ b/docs/griptape-framework/structures/tasks.md @@ -1,22 +1,23 @@ --- search: - boost: 2 + boost: 2 --- ## Overview A [Task](../../reference/griptape/tasks/index.md) is a purpose-built abstraction for the Large Language Model (LLM). Griptape offers various types of Tasks, each suitable for specific use cases. - ## Context -Tasks that take input have a field [input](../../reference/griptape/tasks/base_text_input_task.md#griptape.tasks.base_text_input_task.BaseTextInputTask.input) which lets you define the Task objective. + +Tasks that take input have a field [input](../../reference/griptape/tasks/base_text_input_task.md#griptape.tasks.base_text_input_task.BaseTextInputTask.input) which lets you define the Task objective. Within the [input](../../reference/griptape/tasks/base_text_input_task.md#griptape.tasks.base_text_input_task.BaseTextInputTask.input), you can access the following [context](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.context) variables: -* `args`: an array of arguments passed to the `.run()` method. -* `structure`: the structure that the task belongs to. -* user defined context variables +- `args`: an array of arguments passed to the `.run()` method. +- `structure`: the structure that the task belongs to. +- user defined context variables Additional [context](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.context) variables may be added based on the Structure running the task. + ```python --8<-- "docs/griptape-framework/structures/src/tasks_1.py" ``` @@ -53,6 +54,28 @@ Additional [context](../../reference/griptape/structures/structure.md#griptape.s sleeves, and let's get baking! 🍰🎉 ``` +## Hooks + +All Tasks implement [RunnableMixin](../../reference/griptape/mixins/runnable_mixin.md) which provides `on_before_run` and `on_after_run` hooks for the Task lifecycle. + +These hooks can be used to perform actions before and after the Task is run. For example, you can mask sensitive information before running the Task, and transform the output after the Task is run. + +```python +--8<-- "docs/griptape-framework/structures/src/task_hooks.py" +``` + +``` +[10/15/24 15:14:10] INFO PromptTask 63a0c734059c42808c87dff351adc8ab + Input: Respond to this user: Hello! My favorite color is blue, and my social security number is xxx-xx-xxxx. +[10/15/24 15:14:11] INFO PromptTask 63a0c734059c42808c87dff351adc8ab + Output: { + "original_input": "Respond to this user: Hello! My favorite color is blue, and my social security number is 123-45-6789.", + "masked_input": "Respond to this user: Hello! My favorite color is blue, and my social security number is xxx-xx-xxxx.", + "output": "Hello! It's great to hear that your favorite color is blue. However, it's important to keep your personal information, like your + social security number, private and secure. If you have any questions or need assistance, feel free to ask!" + } +``` + ## Prompt Task For general purpose prompting, use the [PromptTask](../../reference/griptape/tasks/prompt_task.md): @@ -160,7 +183,7 @@ This Task takes in one or more Tools which the LLM will decide to use through Ch ## Tool Task -Another way to use [Griptape Tools](../../griptape-framework/tools/index.md), is with a [Tool Task](../../reference/griptape/tasks/tool_task.md). +Another way to use [Griptape Tools](../../griptape-framework/tools/index.md), is with a [Tool Task](../../reference/griptape/tasks/tool_task.md). This Task takes in a single Tool which the LLM will use without Chain of Thought (CoT) reasoning. Because this Task does not use CoT, it is better suited for less capable models. ```python @@ -192,12 +215,12 @@ This Task takes in a single Tool which the LLM will use without Chain of Thought To extract information from text, use an [ExtractionTask](../../reference/griptape/tasks/extraction_task.md). This Task takes an [Extraction Engine](../../griptape-framework/engines/extraction-engines.md), and a set of arguments specific to the Engine. - ### CSV Extraction ```python --8<-- "docs/griptape-framework/structures/src/tasks_6.py" ``` + ``` [12/19/23 10:33:11] INFO ExtractionTask e87fb457edf8423ab8a78583badd7a11 Input: @@ -217,6 +240,7 @@ This Task takes an [Extraction Engine](../../griptape-framework/engines/extracti ```python --8<-- "docs/griptape-framework/structures/src/tasks_7.py" ``` + ``` [12/19/23 10:37:41] INFO ExtractionTask 3315cc77f94943a2a2dceccfe44f6a67 Input: @@ -284,7 +308,7 @@ This task takes a [RAG Engine](../../griptape-framework/engines/rag-engines.md), To execute an arbitrary Python function, use the [CodeExecutionTask](../../reference/griptape/tasks/code_execution_task.md). This task takes a python function, and authors can elect to return a custom artifact. -```python +```python --8<-- "docs/griptape-framework/structures/src/tasks_10.py" ``` @@ -349,6 +373,7 @@ This Task accepts two inputs: a query (represented by either a string or a [Text ``` ## Structure Run Task + The [Structure Run Task](../../reference/griptape/tasks/structure_run_task.md) runs another Structure with a given input. This Task is useful for orchestrating multiple specialized Structures in a single run. Note that the input to the Task is a tuple of arguments that will be passed to the Structure. @@ -364,7 +389,7 @@ This Task enables Structures to synthesize speech from text using [Text to Speec --8<-- "docs/griptape-framework/structures/src/tasks_17.py" ``` -## Audio Transcription Task +## Audio Transcription Task This Task enables Structures to transcribe speech from text using [Audio Transcription Engines](../../reference/griptape/engines/audio/audio_transcription_engine.md) and [Audio Transcription Drivers](../../reference/griptape/drivers/audio_transcription/index.md). diff --git a/docs/griptape-framework/structures/workflows.md b/docs/griptape-framework/structures/workflows.md index 9161268ae..f1eff199a 100644 --- a/docs/griptape-framework/structures/workflows.md +++ b/docs/griptape-framework/structures/workflows.md @@ -1,9 +1,9 @@ --- search: - boost: 2 + boost: 2 --- -## Overview +## Overview A [Workflow](../../reference/griptape/structures/workflow.md) is a non-sequential DAG that can be used for complex concurrent scenarios with tasks having multiple inputs. @@ -13,12 +13,14 @@ You can access the final output of the Workflow by using the [output](../../refe Workflows have access to the following [context](../../reference/griptape/structures/workflow.md#griptape.structures.workflow.Workflow.context) variables in addition to the [base context](./tasks.md#context): -* `parent_outputs`: dictionary containing mapping of parent IDs to their outputs. -* `parents_output_text`: string containing the concatenated outputs of all parent tasks. -* `parents`: parent tasks referenceable by IDs. -* `children`: child tasks referenceable by IDs. +- `task_outputs`: dictionary containing mapping of all task IDs to their outputs. +- `parent_outputs`: dictionary containing mapping of parent task IDs to their outputs. +- `parents_output_text`: string containing the concatenated outputs of all parent tasks. +- `parents`: dictionary containing mapping of parent task IDs to their task objects. +- `children`: dictionary containing mapping of child task IDs to their task objects. ## Workflow + Let's build a simple workflow. Let's say, we want to write a story in a fantasy world with some unique characters. We could setup a workflow that generates a world based on some keywords. Then we pass the world description to any number of child tasks that create characters. Finally, the last task pulls in information from all parent tasks and writes up a short story. ```python @@ -31,6 +33,7 @@ Note that we use the `StructureVisualizer` to get a visual representation of the !!! Info Output edited for brevity + ``` [09/08/23 10:26:21] INFO PromptTask world Input: Create a fictional world based on the following key words fantasy, ocean, tidal lock @@ -181,6 +184,7 @@ Imperatively insert parallel tasks between a parent and child: ``` output: + ``` [06/18/24 09:52:21] INFO PromptTask animal Input: Name an animal diff --git a/docs/griptape-framework/tools/index.md b/docs/griptape-framework/tools/index.md index f2adc0c97..4f7d06408 100644 --- a/docs/griptape-framework/tools/index.md +++ b/docs/griptape-framework/tools/index.md @@ -1,18 +1,19 @@ --- search: - boost: 2 + boost: 2 --- ## Overview One of the most powerful features of Griptape is the ability to use tools that can interact with the outside world. -Many of our [Prompt Drivers](../drivers/prompt-drivers.md) leverage the native function calling built into the LLMs. -For LLMs that don't support this, Griptape provides its own implementation using the [ReAct](https://arxiv.org/abs/2210.03629) technique. +Many of our [Prompt Drivers](../drivers/prompt-drivers.md) leverage the native function calling built into the LLMs. +For LLMs that don't support this, Griptape provides its own implementation using the [ReAct](https://arxiv.org/abs/2210.03629) technique. You can switch between the two strategies by setting `use_native_tools` to `True` (LLM-native tool calling) or `False` (Griptape tool calling) on your [Prompt Driver](../drivers/prompt-drivers.md). ## Tools -Here is an example of a Pipeline using Tools: + +Here is an example of a Pipeline using Tools: ```python --8<-- "docs/griptape-framework/tools/src/index_1.py" diff --git a/docs/griptape-tools/custom-tools/index.md b/docs/griptape-tools/custom-tools/index.md index 3715b7be6..f2ea484ff 100644 --- a/docs/griptape-tools/custom-tools/index.md +++ b/docs/griptape-tools/custom-tools/index.md @@ -3,7 +3,7 @@ Building your own tools is easy with Griptape! Tools are nothing more than Python classes that inherit from [BaseTool](../../reference/griptape/tools/base_tool.md). -Each method in the class is decorated with an [activity](../../reference/griptape/utils/decorators.md#griptape.utils.decorators.activity) decorator which informs the LLM how and when it should use that Tool Activity. +Each method in the class is decorated with an [activity](../../reference/griptape/utils/decorators.md#griptape.utils.decorators.activity) decorator which informs the LLM how and when it should use that Tool Activity. ## Random Number Generator Tool @@ -20,7 +20,7 @@ Check out other [Griptape Tools](https://github.com/griptape-ai/griptape/tree/ma Each Tool can also have its own dependencies. You can specify them in a `requirements.txt` file in the tool directory and Griptape will install them during Tool execution. To start, create a directory for your Tool inside your project. The directory must have the following structure: -* `tool.py` file with a tool Python class. -* `requirements.txt` file with tool Python dependencies. +- `tool.py` file with a tool Python class. +- `requirements.txt` file with tool Python dependencies. That's it! Import and use your Tool in your project as you would with any other Griptape Tool. diff --git a/docs/griptape-tools/index.md b/docs/griptape-tools/index.md index f483d493f..47bf71f9e 100644 --- a/docs/griptape-tools/index.md +++ b/docs/griptape-tools/index.md @@ -2,12 +2,19 @@ Tools give the LLM abilities to invoke outside APIs, reference data sets, and ge Griptape tools are special Python classes that LLMs can use to accomplish specific goals. Here is an example custom tool for generating a random number: +A tool can have many "activities" as denoted by the `@activity` decorator. Each activity has a description (used to provide context to the LLM), and the input schema that the LLM must follow in order to use the tool. + +When a function is decorated with `@activity`, the decorator injects keyword arguments into the function according to the schema. There are also two Griptape-provided keyword arguments: `params: dict` and `values: dict`. + +!!! info + If your schema defines any parameters named `params` or `values`, they will be overwritten by the Griptape-provided arguments. + +In the following example, all `@activity` decorated functions will result in the same value, but the method signature is defined in different ways. + ```python --8<-- "docs/griptape-tools/src/index_1.py" ``` -A tool can have many "activities" as denoted by the `@activity` decorator. Each activity has a description (used to provide context to the LLM), and the input schema that the LLM must follow in order to use the tool. - Output artifacts from all tool activities (except for `InfoArtifact` and `ErrorArtifact`) go to short-term `TaskMemory`. To disable that behavior set the `off_prompt` tool parameter to `False`: We provide a set of official Griptape Tools for accessing and processing data. You can also [build your own tools](./custom-tools/index.md). diff --git a/docs/griptape-tools/official-tools/aws-iam-tool.md b/docs/griptape-tools/official-tools/aws-iam-tool.md index 8be54afb5..3524c36b7 100644 --- a/docs/griptape-tools/official-tools/aws-iam-tool.md +++ b/docs/griptape-tools/official-tools/aws-iam-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to make AWS IAM API requests. ```python --8<-- "docs/griptape-tools/official-tools/src/aws_iam_tool_1.py" ``` + ``` [08/12/24 14:56:59] INFO ToolkitTask 12345abcd67890efghijk1112131415 Input: List all my IAM users diff --git a/docs/griptape-tools/official-tools/aws-s3-tool.md b/docs/griptape-tools/official-tools/aws-s3-tool.md index c6a972d76..9e44dda10 100644 --- a/docs/griptape-tools/official-tools/aws-s3-tool.md +++ b/docs/griptape-tools/official-tools/aws-s3-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to make AWS S3 API requests. ```python --8<-- "docs/griptape-tools/official-tools/src/aws_s3_tool_1.py" ``` + ``` [08/12/24 14:51:36] INFO ToolkitTask bfc329ebc7d34497b429ab0d18ff7e7b Input: List all my S3 buckets. diff --git a/docs/griptape-tools/official-tools/calculator-tool.md b/docs/griptape-tools/official-tools/calculator-tool.md index afe17a364..d8ba618b7 100644 --- a/docs/griptape-tools/official-tools/calculator-tool.md +++ b/docs/griptape-tools/official-tools/calculator-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to make simple calculations. ```python --8<-- "docs/griptape-tools/official-tools/src/calculator_tool_1.py" ``` + ``` [09/08/23 14:23:51] INFO Task bbc6002a5e5b4655bb52b6a550a1b2a5 Input: What is 10 raised to the power of 5? diff --git a/docs/griptape-tools/official-tools/computer-tool.md b/docs/griptape-tools/official-tools/computer-tool.md index f21d4bda9..67d0f260c 100644 --- a/docs/griptape-tools/official-tools/computer-tool.md +++ b/docs/griptape-tools/official-tools/computer-tool.md @@ -7,6 +7,7 @@ You can specify a local working directory and environment variables during tool ```python --8<-- "docs/griptape-tools/official-tools/src/computer_tool_1.py" ``` + ``` ❮ poetry run python src/docs/task-memory.py [08/12/24 15:13:56] INFO ToolkitTask 203ee958d1934811afe0bb86fb246e86 @@ -44,4 +45,4 @@ You can specify a local working directory and environment variables during tool file2.txt [08/12/24 15:14:00] INFO ToolkitTask 203ee958d1934811afe0bb86fb246e86 Output: file1.txt, file2.txt -``` +``` diff --git a/docs/griptape-tools/official-tools/date-time-tool.md b/docs/griptape-tools/official-tools/date-time-tool.md index bdc5ccbf4..2e1946d70 100644 --- a/docs/griptape-tools/official-tools/date-time-tool.md +++ b/docs/griptape-tools/official-tools/date-time-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to get current date and time. ```python --8<-- "docs/griptape-tools/official-tools/src/date_time_tool_1.py" ``` + ``` [09/11/23 15:26:02] INFO Task d0bf49dacd8849e695494578a333f6cc Input: {'description': 'What is the current date diff --git a/docs/griptape-tools/official-tools/extraction-tool.md b/docs/griptape-tools/official-tools/extraction-tool.md index 5b0486ffd..157a7161d 100644 --- a/docs/griptape-tools/official-tools/extraction-tool.md +++ b/docs/griptape-tools/official-tools/extraction-tool.md @@ -3,6 +3,7 @@ The [ExractionTool](../../reference/griptape/tools/extraction/tool.md) enables L ```python --8<-- "docs/griptape-tools/official-tools/src/extraction_tool_1.py" ``` + ``` [08/12/24 15:58:03] INFO ToolkitTask 43b3d209a83c470d8371b7ef4af175b4 Input: Load https://griptape.ai and extract key info diff --git a/docs/griptape-tools/official-tools/file-manager-tool.md b/docs/griptape-tools/official-tools/file-manager-tool.md index 2c27c86ea..19215cdfd 100644 --- a/docs/griptape-tools/official-tools/file-manager-tool.md +++ b/docs/griptape-tools/official-tools/file-manager-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to save and load files. ```python --8<-- "docs/griptape-tools/official-tools/src/file_manager_tool_1.py" ``` + ``` [09/12/23 12:07:56] INFO Task 16a1ce1847284ae3805485bad7d99116 Input: Can you get me the sample1.txt file? diff --git a/docs/griptape-tools/official-tools/google-calendar-tool.md b/docs/griptape-tools/official-tools/google-calendar-tool.md index e0b5d9cdc..313a97777 100644 --- a/docs/griptape-tools/official-tools/google-calendar-tool.md +++ b/docs/griptape-tools/official-tools/google-calendar-tool.md @@ -2,7 +2,6 @@ The [GoogleCalendarTool](../../reference/griptape/tools/google_calendar/tool.md) tool allows you to interact with Google Calendar. - ```python --8<-- "docs/griptape-tools/official-tools/src/google_calendar_tool_1.py" ``` diff --git a/docs/griptape-tools/official-tools/google-docs-tool.md b/docs/griptape-tools/official-tools/google-docs-tool.md index 1f02196b9..bc3e4e814 100644 --- a/docs/griptape-tools/official-tools/google-docs-tool.md +++ b/docs/griptape-tools/official-tools/google-docs-tool.md @@ -5,6 +5,7 @@ The [GoogleDocsTool](../../reference/griptape/tools/google_docs/tool.md) tool pr ```python --8<-- "docs/griptape-tools/official-tools/src/google_docs_tool_1.py" ``` + ``` [10/05/23 12:56:19] INFO ToolkitTask 90721b7478a74618a63d852d35be3b18 Input: Create doc with name 'test_creation' in diff --git a/docs/griptape-tools/official-tools/google-drive-tool.md b/docs/griptape-tools/official-tools/google-drive-tool.md index 18e10ec08..5d11ddd92 100644 --- a/docs/griptape-tools/official-tools/google-drive-tool.md +++ b/docs/griptape-tools/official-tools/google-drive-tool.md @@ -5,6 +5,7 @@ The [GoogleDriveTool](../../reference/griptape/tools/google_drive/tool.md) tool ```python --8<-- "docs/griptape-tools/official-tools/src/google_drive_tool_1.py" ``` + ``` [10/05/23 10:49:14] INFO ToolkitTask 2ae3bb7e828744f3a2631c29c6fce001 Input: Save the content 'Hi this is Tony' in a file diff --git a/docs/griptape-tools/official-tools/google-gmail-tool.md b/docs/griptape-tools/official-tools/google-gmail-tool.md index 1a9e6ea47..dd66856b6 100644 --- a/docs/griptape-tools/official-tools/google-gmail-tool.md +++ b/docs/griptape-tools/official-tools/google-gmail-tool.md @@ -5,6 +5,7 @@ The [GoogleGmailTool](../../reference/griptape/tools/google_gmail/tool.md) tool ```python --8<-- "docs/griptape-tools/official-tools/src/google_gmail_tool_1.py" ``` + ``` [10/05/23 13:24:05] INFO ToolkitTask 1f190f823d584053bfe9942f41b6cb2d Input: Create a draft email in Gmail to diff --git a/docs/griptape-tools/official-tools/prompt-summary-tool.md b/docs/griptape-tools/official-tools/prompt-summary-tool.md index 7afecf57b..23c35c367 100644 --- a/docs/griptape-tools/official-tools/prompt-summary-tool.md +++ b/docs/griptape-tools/official-tools/prompt-summary-tool.md @@ -1,9 +1,10 @@ -The [PromptSummaryTool](../../reference/griptape/tools/prompt_summary/tool.md) enables LLMs summarize text data. +The [PromptSummaryTool](../../reference/griptape/tools/prompt_summary/tool.md) enables LLMs summarize text data. ```python --8<-- "docs/griptape-tools/official-tools/src/prompt_summary_tool_1.py" ``` -``` + +```` [08/12/24 15:54:46] INFO ToolkitTask 8be73eb542c44418ba880399044c017a Input: How can I build Neovim from source for MacOS according to this https://github.com/neovim/neovim/blob/master/BUILD.md [08/12/24 15:54:47] INFO Subtask cd362a149e1d400997be93c1342d1663 @@ -102,4 +103,4 @@ The [PromptSummaryTool](../../reference/griptape/tools/prompt_summary/tool.md) e By following these steps, you should be able to build and install Neovim from source on macOS. For more detailed instructions and troubleshooting tips, refer to the [BUILD.md](https://github.com/neovim/neovim/blob/master/BUILD.md) file in the Neovim repository. -``` +```` diff --git a/docs/griptape-tools/official-tools/query-tool.md b/docs/griptape-tools/official-tools/query-tool.md index 4a4f2bf33..8b1b0a50e 100644 --- a/docs/griptape-tools/official-tools/query-tool.md +++ b/docs/griptape-tools/official-tools/query-tool.md @@ -3,6 +3,7 @@ The [QueryTool](../../reference/griptape/tools/query/tool.md) enables Agents to ```python --8<-- "docs/griptape-tools/official-tools/src/query_tool_1.py" ``` + ``` [08/12/24 15:49:23] INFO ToolkitTask a88abda2e5324bdf81a3e2b99c26b9df Input: Tell me about the architecture as described here: https://neovim.io/doc/user/vim_diff.html diff --git a/docs/griptape-tools/official-tools/rag-tool.md b/docs/griptape-tools/official-tools/rag-tool.md index 71613beab..f96f9c7fe 100644 --- a/docs/griptape-tools/official-tools/rag-tool.md +++ b/docs/griptape-tools/official-tools/rag-tool.md @@ -7,6 +7,7 @@ Here is an example of how it can be used with a local vector store driver: ```python --8<-- "docs/griptape-tools/official-tools/src/rag_tool_1.py" ``` + ``` [07/11/24 13:30:43] INFO ToolkitTask a6d057d5c71d4e9cb6863a2adb64b76c Input: what is Griptape? diff --git a/docs/griptape-tools/official-tools/rest-api-tool.md b/docs/griptape-tools/official-tools/rest-api-tool.md index 345f0589b..b122fa51b 100644 --- a/docs/griptape-tools/official-tools/rest-api-tool.md +++ b/docs/griptape-tools/official-tools/rest-api-tool.md @@ -2,9 +2,10 @@ This tool enables LLMs to call REST APIs. -The [RestApiTool](../../reference/griptape/tools/rest_api/tool.md) tool uses the following parameters: +The [RestApiTool](../../reference/griptape/tools/rest_api/tool.md) tool uses the following parameters: ### Example + The following example is built using [https://jsonplaceholder.typicode.com/guide/](https://jsonplaceholder.typicode.com/guide/). ```python diff --git a/docs/griptape-tools/official-tools/sql-tool.md b/docs/griptape-tools/official-tools/sql-tool.md index ed8aae2a0..a0b023038 100644 --- a/docs/griptape-tools/official-tools/sql-tool.md +++ b/docs/griptape-tools/official-tools/sql-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to execute SQL statements via [SQLAlchemy](https://www.sq ```python --8<-- "docs/griptape-tools/official-tools/src/sql_tool_1.py" ``` + ``` [08/12/24 14:59:31] INFO ToolkitTask e302f7315d1a4f939e0125103ff4f09f Input: SELECT * FROM people; diff --git a/docs/griptape-tools/official-tools/structure-run-tool.md b/docs/griptape-tools/official-tools/structure-run-tool.md index 7b73e5b52..37366b586 100644 --- a/docs/griptape-tools/official-tools/structure-run-tool.md +++ b/docs/griptape-tools/official-tools/structure-run-tool.md @@ -6,6 +6,7 @@ It requires you to provide a [Structure Run Driver](../../griptape-framework/dri ```python --8<-- "docs/griptape-tools/official-tools/src/structure_run_tool_1.py" ``` + ``` [05/02/24 13:50:03] INFO ToolkitTask 4e9458375bda4fbcadb77a94624ed64c Input: what is modular RAG? diff --git a/docs/griptape-tools/official-tools/variation-image-generation-tool.md b/docs/griptape-tools/official-tools/variation-image-generation-tool.md index bcc8c3f61..523dcf9f0 100644 --- a/docs/griptape-tools/official-tools/variation-image-generation-tool.md +++ b/docs/griptape-tools/official-tools/variation-image-generation-tool.md @@ -1,6 +1,6 @@ # Variation Image Generation Engine Tool -This Tool allows LLMs to generate variations of an input image from a text prompt. The input image can be provided either by its file path or by its [Task Memory](../../griptape-framework/structures/task-memory.md) reference. +This Tool allows LLMs to generate variations of an input image from a text prompt. The input image can be provided either by its file path or by its [Task Memory](../../griptape-framework/structures/task-memory.md) reference. ## Referencing an Image by File Path diff --git a/docs/griptape-tools/official-tools/web-scraper-tool.md b/docs/griptape-tools/official-tools/web-scraper-tool.md index 26b83f6e3..7001494ff 100644 --- a/docs/griptape-tools/official-tools/web-scraper-tool.md +++ b/docs/griptape-tools/official-tools/web-scraper-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to scrape web pages for full text, summaries, authors, ti ```python --8<-- "docs/griptape-tools/official-tools/src/web_scraper_tool_1.py" ``` + ``` [08/12/24 15:32:08] INFO ToolkitTask b14a4305365f4b17a4dcf235f84397e2 Input: Based on https://www.griptape.ai/, tell me what griptape is diff --git a/docs/griptape-tools/official-tools/web-search-tool.md b/docs/griptape-tools/official-tools/web-search-tool.md index 3f31fd4fd..86632a557 100644 --- a/docs/griptape-tools/official-tools/web-search-tool.md +++ b/docs/griptape-tools/official-tools/web-search-tool.md @@ -5,6 +5,7 @@ This tool enables LLMs to search the web. ```python --8<-- "docs/griptape-tools/official-tools/src/web_search_tool_1.py" ``` + ``` [09/08/23 15:37:25] INFO Task 2cf557f7f7cd4a20a7fa2f0c46af2f71 Input: Tell me how photosynthesis works diff --git a/docs/griptape-tools/src/index_1.py b/docs/griptape-tools/src/index_1.py index 7929574d4..618592b24 100644 --- a/docs/griptape-tools/src/index_1.py +++ b/docs/griptape-tools/src/index_1.py @@ -1,4 +1,7 @@ +from __future__ import annotations + import random +import typing from schema import Literal, Optional, Schema @@ -19,5 +22,38 @@ class RandomNumberGenerator(BaseTool): def generate(self, params: dict) -> TextArtifact: return TextArtifact(str(round(random.random(), params["values"].get("decimals")))) + @activity( + config={ + "description": "Can be used to generate random numbers", + "schema": Schema( + {Optional(Literal("decimals", description="Number of decimals to round the random number to")): int} + ), + } + ) + def generate_with_decimals(self, decimals: typing.Optional[int]) -> TextArtifact: + return TextArtifact(str(round(random.random(), decimals))) + + @activity( + config={ + "description": "Can be used to generate random numbers", + "schema": Schema( + {Optional(Literal("decimals", description="Number of decimals to round the random number to")): int} + ), + } + ) + def generate_with_values(self, values: dict) -> TextArtifact: + return TextArtifact(str(round(random.random(), values.get("decimals")))) + + @activity( + config={ + "description": "Can be used to generate random numbers", + "schema": Schema( + {Optional(Literal("decimals", description="Number of decimals to round the random number to")): int} + ), + } + ) + def generate_with_kwargs(self, **kwargs) -> TextArtifact: + return TextArtifact(str(round(random.random(), kwargs.get("decimals")))) + RandomNumberGenerator() diff --git a/docs/index.md b/docs/index.md index 2c6ee5d50..9f6128f35 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,6 +1,6 @@ # Griptape Docs -Welcome to Griptape Docs! This documentation is organized into the following sections: +Welcome to Griptape Docs! This documentation is organized into the following sections: ## Griptape Topic Guides diff --git a/griptape/artifacts/base_artifact.py b/griptape/artifacts/base_artifact.py index 61989ab54..4eb908251 100644 --- a/griptape/artifacts/base_artifact.py +++ b/griptape/artifacts/base_artifact.py @@ -25,7 +25,7 @@ class BaseArtifact(SerializableMixin, ABC): name: The name of the Artifact. Defaults to the id. value: The value of the Artifact. encoding: The encoding to use when encoding/decoding the value. - encoding_error_handler: The error handler to use when encoding/decoding the value. + encoding_error_handler: The error on_event to use when encoding/decoding the value. """ id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={"serializable": True}) diff --git a/griptape/common/__init__.py b/griptape/common/__init__.py index 6ac434649..b05ca8b2e 100644 --- a/griptape/common/__init__.py +++ b/griptape/common/__init__.py @@ -19,7 +19,9 @@ from .reference import Reference -from .observable import observable, Observable +from .observable import Observable + +from .decorators import observable __all__ = [ "BaseMessage", diff --git a/griptape/common/decorators.py b/griptape/common/decorators.py new file mode 100644 index 000000000..022093361 --- /dev/null +++ b/griptape/common/decorators.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from typing import Any, Callable + +import wrapt + + +def observable(*dargs: Any, **dkwargs: Any) -> Any: + @wrapt.decorator + def decorator(wrapped: Callable, instance: Any, args: Any, kwargs: Any) -> Any: + from griptape.common.observable import Observable + from griptape.observability.observability import Observability + + return Observability.observe( + Observable.Call( + func=wrapped, + instance=instance, + args=args, + kwargs=kwargs, + decorator_args=dargs, + decorator_kwargs=dkwargs, + ) + ) + + # Check if it's being called as @observable or @observable(...) + if len(dargs) == 1 and callable(dargs[0]) and not dkwargs: # pyright: ignore[reportArgumentType] + # Case when decorator is used without arguments + func = dargs[0] + dargs = () + dkwargs = {} + return decorator(func) # pyright: ignore[reportCallIssue] + else: + # Case when decorator is used with arguments + return decorator diff --git a/griptape/common/observable.py b/griptape/common/observable.py index aa675dfbe..76baa8a4b 100644 --- a/griptape/common/observable.py +++ b/griptape/common/observable.py @@ -1,17 +1,9 @@ from __future__ import annotations -import functools -from inspect import isfunction -from typing import Any, Callable, Optional, TypeVar, cast +from typing import Any, Callable, Optional from attrs import Factory, define, field -T = TypeVar("T", bound=Callable) - - -def observable(*args: T | Any, **kwargs: Any) -> T: - return cast(T, Observable(*args, **kwargs)) - class Observable: @define @@ -24,54 +16,8 @@ class Call: decorator_kwargs: dict[str, Any] = field(default=Factory(dict), kw_only=True) def __call__(self) -> Any: - # If self.func has a __self__ attribute, it is a bound method and we do not need to pass the instance. - args = (self.instance, *self.args) if self.instance and not hasattr(self.func, "__self__") else self.args - return self.func(*args, **self.kwargs) + return self.func(*self.args, **self.kwargs) @property def tags(self) -> Optional[list[str]]: return self.decorator_kwargs.get("tags") - - def __init__(self, *args, **kwargs) -> None: - self._instance = None - if len(args) == 1 and len(kwargs) == 0 and isfunction(args[0]): - # Parameterless call. In otherwords, the `@observable` annotation - # was not followed by parentheses. - self._func = args[0] - functools.update_wrapper(self, self._func) - self.decorator_args = () - self.decorator_kwargs = {} - else: - # Parameterized call. In otherwords, the `@observable` annotation - # was followed by parentheses, for example `@observable()`, - # `@observable("x")` or `@observable(y="y")`. - self._func = None - self.decorator_args = args - self.decorator_kwargs = kwargs - - def __get__(self, obj: Any, objtype: Any = None) -> Observable: - self._instance = obj - return self - - def __call__(self, *args, **kwargs) -> Any: - if self._func: - # Parameterless call (self._func was a set in __init__) - from griptape.observability.observability import Observability - - return Observability.observe( - Observable.Call( - func=self._func, - instance=self._instance, - args=args, - kwargs=kwargs, - decorator_args=self.decorator_args, - decorator_kwargs=self.decorator_kwargs, - ) - ) - else: - # Parameterized call, create and return the "real" observable decorator - func = args[0] - decorated_func = Observable(func) - decorated_func.decorator_args = self.decorator_args - decorated_func.decorator_kwargs = self.decorator_kwargs - return decorated_func diff --git a/griptape/configs/defaults_config.py b/griptape/configs/defaults_config.py index b81f50cdc..71c0bceae 100644 --- a/griptape/configs/defaults_config.py +++ b/griptape/configs/defaults_config.py @@ -2,12 +2,12 @@ from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define, field from griptape.mixins.singleton_mixin import SingletonMixin +from griptape.utils.decorators import lazy_property from .base_config import BaseConfig -from .drivers.openai_drivers_config import OpenAiDriversConfig from .logging.logging_config import LoggingConfig if TYPE_CHECKING: @@ -16,8 +16,18 @@ @define(kw_only=True) class _DefaultsConfig(BaseConfig, SingletonMixin): - logging_config: LoggingConfig = field(default=Factory(lambda: LoggingConfig())) - drivers_config: BaseDriversConfig = field(default=Factory(lambda: OpenAiDriversConfig())) + _logging_config: LoggingConfig = field(default=None) + _drivers_config: BaseDriversConfig = field(default=None) + + @lazy_property() + def logging_config(self) -> LoggingConfig: + return LoggingConfig() + + @lazy_property() + def drivers_config(self) -> BaseDriversConfig: + from griptape.configs.drivers.openai_drivers_config import OpenAiDriversConfig + + return OpenAiDriversConfig() Defaults = _DefaultsConfig() diff --git a/griptape/configs/drivers/anthropic_drivers_config.py b/griptape/configs/drivers/anthropic_drivers_config.py index e5a1f2719..6a0fa52d4 100644 --- a/griptape/configs/drivers/anthropic_drivers_config.py +++ b/griptape/configs/drivers/anthropic_drivers_config.py @@ -4,8 +4,6 @@ from griptape.drivers import ( AnthropicImageQueryDriver, AnthropicPromptDriver, - LocalVectorStoreDriver, - VoyageAiEmbeddingDriver, ) from griptape.utils.decorators import lazy_property @@ -16,14 +14,6 @@ class AnthropicDriversConfig(DriversConfig): def prompt_driver(self) -> AnthropicPromptDriver: return AnthropicPromptDriver(model="claude-3-5-sonnet-20240620") - @lazy_property() - def embedding_driver(self) -> VoyageAiEmbeddingDriver: - return VoyageAiEmbeddingDriver(model="voyage-large-2") - - @lazy_property() - def vector_store_driver(self) -> LocalVectorStoreDriver: - return LocalVectorStoreDriver(embedding_driver=VoyageAiEmbeddingDriver(model="voyage-large-2")) - @lazy_property() def image_query_driver(self) -> AnthropicImageQueryDriver: return AnthropicImageQueryDriver(model="claude-3-5-sonnet-20240620") diff --git a/griptape/configs/logging/__init__.py b/griptape/configs/logging/__init__.py index de7726060..418708d75 100644 --- a/griptape/configs/logging/__init__.py +++ b/griptape/configs/logging/__init__.py @@ -1,5 +1,6 @@ from .logging_config import LoggingConfig from .truncate_logging_filter import TruncateLoggingFilter from .newline_logging_filter import NewlineLoggingFilter +from .json_formatter import JsonFormatter -__all__ = ["LoggingConfig", "TruncateLoggingFilter", "NewlineLoggingFilter"] +__all__ = ["LoggingConfig", "TruncateLoggingFilter", "NewlineLoggingFilter", "JsonFormatter"] diff --git a/griptape/configs/logging/json_formatter.py b/griptape/configs/logging/json_formatter.py new file mode 100644 index 000000000..3477112a5 --- /dev/null +++ b/griptape/configs/logging/json_formatter.py @@ -0,0 +1,19 @@ +import json +import logging +from typing import Any + +from attrs import define, field + + +@define +class JsonFormatter(logging.Formatter): + indent: int = field(default=2, kw_only=True) + + def __attrs_pre_init__(self) -> None: + super().__init__() + + def format(self, record: Any) -> str: + if isinstance(record.msg, dict): + record.msg = json.dumps(record.msg, indent=self.indent) + + return super().format(record) diff --git a/griptape/drivers/__init__.py b/griptape/drivers/__init__.py index a4806fc72..4acbc9a19 100644 --- a/griptape/drivers/__init__.py +++ b/griptape/drivers/__init__.py @@ -112,6 +112,7 @@ from .file_manager.base_file_manager_driver import BaseFileManagerDriver from .file_manager.local_file_manager_driver import LocalFileManagerDriver from .file_manager.amazon_s3_file_manager_driver import AmazonS3FileManagerDriver +from .file_manager.griptape_cloud_file_manager_driver import GriptapeCloudFileManagerDriver from .rerank.base_rerank_driver import BaseRerankDriver from .rerank.cohere_rerank_driver import CohereRerankDriver @@ -230,6 +231,7 @@ "BaseFileManagerDriver", "LocalFileManagerDriver", "AmazonS3FileManagerDriver", + "GriptapeCloudFileManagerDriver", "BaseRerankDriver", "CohereRerankDriver", "BaseRulesetDriver", diff --git a/griptape/drivers/embedding/voyageai_embedding_driver.py b/griptape/drivers/embedding/voyageai_embedding_driver.py index bc4e78bf1..ebc92c491 100644 --- a/griptape/drivers/embedding/voyageai_embedding_driver.py +++ b/griptape/drivers/embedding/voyageai_embedding_driver.py @@ -10,7 +10,7 @@ from griptape.utils.decorators import lazy_property if TYPE_CHECKING: - import voyageai + from voyageai.client import Client @define @@ -34,7 +34,7 @@ class VoyageAiEmbeddingDriver(BaseEmbeddingDriver): kw_only=True, ) input_type: str = field(default="document", kw_only=True, metadata={"serializable": True}) - _client: voyageai.Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + _client: Client = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) @lazy_property() def client(self) -> Any: diff --git a/griptape/drivers/event_listener/base_event_listener_driver.py b/griptape/drivers/event_listener/base_event_listener_driver.py index f9cb55dc9..56d1d8c5e 100644 --- a/griptape/drivers/event_listener/base_event_listener_driver.py +++ b/griptape/drivers/event_listener/base_event_listener_driver.py @@ -1,13 +1,14 @@ from __future__ import annotations import logging -import threading from abc import ABC, abstractmethod from typing import TYPE_CHECKING from attrs import Factory, define, field +from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin +from griptape.utils import with_contextvars if TYPE_CHECKING: from griptape.events import BaseEvent @@ -16,10 +17,9 @@ @define -class BaseEventListenerDriver(FuturesExecutorMixin, ABC): +class BaseEventListenerDriver(FuturesExecutorMixin, ExponentialBackoffMixin, ABC): batched: bool = field(default=True, kw_only=True) batch_size: int = field(default=10, kw_only=True) - thread_lock: threading.Lock = field(default=Factory(lambda: threading.Lock())) _batch: list[dict] = field(default=Factory(list), kw_only=True) @@ -27,8 +27,21 @@ class BaseEventListenerDriver(FuturesExecutorMixin, ABC): def batch(self) -> list[dict]: return self._batch - def publish_event(self, event: BaseEvent | dict, *, flush: bool = False) -> None: - self.futures_executor.submit(self._safe_try_publish_event, event, flush=flush) + def publish_event(self, event: BaseEvent | dict) -> None: + event_payload = event if isinstance(event, dict) else event.to_dict() + + if self.batched: + self._batch.append(event_payload) + if len(self.batch) >= self.batch_size: + self.futures_executor.submit(with_contextvars(self._safe_publish_event_payload_batch), self.batch) + self._batch = [] + else: + self.futures_executor.submit(with_contextvars(self._safe_publish_event_payload), event_payload) + + def flush_events(self) -> None: + if self.batch: + self.futures_executor.submit(with_contextvars(self._safe_publish_event_payload_batch), self.batch) + self._batch = [] @abstractmethod def try_publish_event_payload(self, event_payload: dict) -> None: ... @@ -36,18 +49,18 @@ def try_publish_event_payload(self, event_payload: dict) -> None: ... @abstractmethod def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> None: ... - def _safe_try_publish_event(self, event: BaseEvent | dict, *, flush: bool) -> None: + def _safe_publish_event_payload(self, event_payload: dict) -> None: + try: + for attempt in self.retrying(): + with attempt: + self.try_publish_event_payload(event_payload) + except Exception: + logger.warning("Failed to publish event after %s attempts", self.max_attempts, exc_info=True) + + def _safe_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> None: try: - event_payload = event if isinstance(event, dict) else event.to_dict() - - if self.batched: - with self.thread_lock: - self._batch.append(event_payload) - if len(self.batch) >= self.batch_size or flush: - self.try_publish_event_payload_batch(self.batch) - self._batch = [] - return - else: - self.try_publish_event_payload(event_payload) - except Exception as e: - logger.error(e) + for attempt in self.retrying(): + with attempt: + self.try_publish_event_payload_batch(event_payload_batch) + except Exception: + logger.warning("Failed to publish event batch after %s attempts", self.max_attempts, exc_info=True) diff --git a/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py b/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py index 3e06eaa88..f48d469fa 100644 --- a/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py +++ b/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py @@ -42,7 +42,7 @@ def validate_run_id(self, _: Attribute, structure_run_id: str) -> None: "structure_run_id must be set either in the constructor or as an environment variable (GT_CLOUD_STRUCTURE_RUN_ID).", ) - def publish_event(self, event: BaseEvent | dict, *, flush: bool = False) -> None: + def publish_event(self, event: BaseEvent | dict) -> None: from griptape.observability.observability import Observability event_payload = event.to_dict() if isinstance(event, BaseEvent) else event @@ -51,7 +51,7 @@ def publish_event(self, event: BaseEvent | dict, *, flush: bool = False) -> None if span_id is not None: event_payload["span_id"] = span_id - super().publish_event(event_payload, flush=flush) + super().publish_event(event_payload) def try_publish_event_payload(self, event_payload: dict) -> None: self._post_event(self._get_event_request(event_payload)) diff --git a/griptape/drivers/event_listener/pusher_event_listener_driver.py b/griptape/drivers/event_listener/pusher_event_listener_driver.py index 33d160b46..263876777 100644 --- a/griptape/drivers/event_listener/pusher_event_listener_driver.py +++ b/griptape/drivers/event_listener/pusher_event_listener_driver.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional from attrs import define, field @@ -21,7 +21,7 @@ class PusherEventListenerDriver(BaseEventListenerDriver): channel: str = field(kw_only=True, metadata={"serializable": True}) event_name: str = field(kw_only=True, metadata={"serializable": True}) ssl: bool = field(default=True, kw_only=True, metadata={"serializable": True}) - _client: Pusher = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) + _client: Optional[Pusher] = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) @lazy_property() def client(self) -> Pusher: diff --git a/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py b/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py index 1e841866a..ec9037fd8 100644 --- a/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py +++ b/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py @@ -64,11 +64,12 @@ def try_load_file(self, path: str) -> bytes: raise FileNotFoundError from e raise e - def try_save_file(self, path: str, value: bytes) -> None: + def try_save_file(self, path: str, value: bytes) -> str: full_key = self._to_full_key(path) if self._is_a_directory(full_key): raise IsADirectoryError self.client.put_object(Bucket=self.bucket, Key=full_key, Body=value) + return f"s3://{self.bucket}/{full_key}" def _to_full_key(self, path: str) -> str: path = path.lstrip("/") diff --git a/griptape/drivers/file_manager/base_file_manager_driver.py b/griptape/drivers/file_manager/base_file_manager_driver.py index c904f1532..3c8a680da 100644 --- a/griptape/drivers/file_manager/base_file_manager_driver.py +++ b/griptape/drivers/file_manager/base_file_manager_driver.py @@ -5,7 +5,7 @@ from attrs import define, field -from griptape.artifacts import BlobArtifact, InfoArtifact, TextArtifact +from griptape.artifacts import BaseArtifact, BlobArtifact, InfoArtifact, TextArtifact @define @@ -42,9 +42,23 @@ def save_file(self, path: str, value: bytes | str) -> InfoArtifact: elif isinstance(value, (bytearray, memoryview)): raise ValueError(f"Unsupported type: {type(value)}") - self.try_save_file(path, value) + location = self.try_save_file(path, value) - return InfoArtifact("Successfully saved file") + return InfoArtifact(f"Successfully saved file at: {location}") @abstractmethod - def try_save_file(self, path: str, value: bytes) -> None: ... + def try_save_file(self, path: str, value: bytes) -> str: ... + + def load_artifact(self, path: str) -> BaseArtifact: + response = self.try_load_file(path) + return BaseArtifact.from_json( + response.decode() if self.encoding is None else response.decode(encoding=self.encoding) + ) + + def save_artifact(self, path: str, artifact: BaseArtifact) -> InfoArtifact: + artifact_json = artifact.to_json() + value = artifact_json.encode() if self.encoding is None else artifact_json.encode(encoding=self.encoding) + + location = self.try_save_file(path, value) + + return InfoArtifact(f"Successfully saved artifact at: {location}") diff --git a/griptape/drivers/file_manager/griptape_cloud_file_manager_driver.py b/griptape/drivers/file_manager/griptape_cloud_file_manager_driver.py new file mode 100644 index 000000000..5138a1fe4 --- /dev/null +++ b/griptape/drivers/file_manager/griptape_cloud_file_manager_driver.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +import logging +import os +from typing import TYPE_CHECKING, Optional +from urllib.parse import urljoin + +import requests +from attrs import Attribute, Factory, define, field + +from griptape.drivers import BaseFileManagerDriver +from griptape.utils import import_optional_dependency + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from azure.storage.blob import BlobClient + + +@define +class GriptapeCloudFileManagerDriver(BaseFileManagerDriver): + """GriptapeCloudFileManagerDriver can be used to list, load, and save files as Assets in Griptape Cloud Buckets. + + Attributes: + bucket_id: The ID of the Bucket to list, load, and save Assets in. If not provided, the driver will attempt to + retrieve the ID from the environment variable `GT_CLOUD_BUCKET_ID`. + workdir: The working directory. List, load, and save operations will be performed relative to this directory. + base_url: The base URL of the Griptape Cloud API. Defaults to the value of the environment variable + `GT_CLOUD_BASE_URL` or `https://cloud.griptape.ai`. + api_key: The API key to use for authenticating with the Griptape Cloud API. If not provided, the driver will + attempt to retrieve the API key from the environment variable `GT_CLOUD_API_KEY`. + + Raises: + ValueError: If `api_key` is not provided, if `workdir` does not start with "/"", or invalid `bucket_id` and/or `bucket_name` value(s) are provided. + """ + + bucket_id: Optional[str] = field(default=Factory(lambda: os.getenv("GT_CLOUD_BUCKET_ID")), kw_only=True) + workdir: str = field(default="/", kw_only=True) + base_url: str = field( + default=Factory(lambda: os.getenv("GT_CLOUD_BASE_URL", "https://cloud.griptape.ai")), + ) + api_key: Optional[str] = field(default=Factory(lambda: os.getenv("GT_CLOUD_API_KEY"))) + headers: dict = field( + default=Factory(lambda self: {"Authorization": f"Bearer {self.api_key}"}, takes_self=True), + init=False, + ) + + @workdir.validator # pyright: ignore[reportAttributeAccessIssue] + def validate_workdir(self, _: Attribute, workdir: str) -> None: + if not workdir.startswith("/"): + raise ValueError(f"{self.__class__.__name__} requires 'workdir' to be an absolute path, starting with `/`") + + @api_key.validator # pyright: ignore[reportAttributeAccessIssue] + def validate_api_key(self, _: Attribute, value: Optional[str]) -> str: + if value is None: + raise ValueError(f"{self.__class__.__name__} requires an API key") + return value + + @bucket_id.validator # pyright: ignore[reportAttributeAccessIssue] + def validate_bucket_id(self, _: Attribute, value: Optional[str]) -> str: + if value is None: + raise ValueError(f"{self.__class__.__name__} requires an Bucket ID") + return value + + def __attrs_post_init__(self) -> None: + try: + self._call_api(method="get", path=f"/buckets/{self.bucket_id}").json() + except requests.exceptions.HTTPError as e: + if e.response.status_code == 404: + raise ValueError(f"No Bucket found with ID: {self.bucket_id}") from e + raise ValueError(f"Unexpected error when retrieving Bucket with ID: {self.bucket_id}") from e + + def try_list_files(self, path: str, postfix: str = "") -> list[str]: + full_key = self._to_full_key(path) + + if not self._is_a_directory(full_key): + raise NotADirectoryError + + data = {"prefix": full_key} + if postfix: + data["postfix"] = postfix + # TODO: GTC SDK: Pagination + list_assets_response = self._call_api( + method="list", path=f"/buckets/{self.bucket_id}/assets", json=data, raise_for_status=False + ).json() + + return [asset["name"] for asset in list_assets_response.get("assets", [])] + + def try_load_file(self, path: str) -> bytes: + full_key = self._to_full_key(path) + + if self._is_a_directory(full_key): + raise IsADirectoryError + + try: + blob_client = self._get_blob_client(full_key=full_key) + except requests.exceptions.HTTPError as e: + if e.response.status_code == 404: + raise FileNotFoundError from e + raise e + + try: + return blob_client.download_blob().readall() + except import_optional_dependency("azure.core.exceptions").ResourceNotFoundError as e: + raise FileNotFoundError from e + + def try_save_file(self, path: str, value: bytes) -> str: + full_key = self._to_full_key(path) + + if self._is_a_directory(full_key): + raise IsADirectoryError + + try: + self._call_api(method="get", path=f"/buckets/{self.bucket_id}/assets/{full_key}", raise_for_status=True) + except requests.exceptions.HTTPError as e: + if e.response.status_code == 404: + logger.info("Asset '%s' not found, attempting to create", full_key) + data = {"name": full_key} + self._call_api(method="put", path=f"/buckets/{self.bucket_id}/assets", json=data, raise_for_status=True) + else: + raise e + + blob_client = self._get_blob_client(full_key=full_key) + + blob_client.upload_blob(data=value, overwrite=True) + return f"buckets/{self.bucket_id}/assets/{full_key}" + + def _get_blob_client(self, full_key: str) -> BlobClient: + url_response = self._call_api( + method="post", path=f"/buckets/{self.bucket_id}/asset-urls/{full_key}", raise_for_status=True + ).json() + sas_url = url_response["url"] + return import_optional_dependency("azure.storage.blob").BlobClient.from_blob_url(blob_url=sas_url) + + def _get_url(self, path: str) -> str: + path = path.lstrip("/") + return urljoin(self.base_url, f"/api/{path}") + + def _call_api( + self, method: str, path: str, json: Optional[dict] = None, *, raise_for_status: bool = True + ) -> requests.Response: + res = requests.request(method, self._get_url(path), json=json, headers=self.headers) + if raise_for_status: + res.raise_for_status() + return res + + def _is_a_directory(self, path: str) -> bool: + return path == "" or path.endswith("/") + + def _to_full_key(self, path: str) -> str: + path = path.lstrip("/") + full_key = f"{self.workdir}/{path}" + return full_key.lstrip("/") diff --git a/griptape/drivers/file_manager/local_file_manager_driver.py b/griptape/drivers/file_manager/local_file_manager_driver.py index b383ff7d7..69ef3ae1f 100644 --- a/griptape/drivers/file_manager/local_file_manager_driver.py +++ b/griptape/drivers/file_manager/local_file_manager_driver.py @@ -34,12 +34,13 @@ def try_load_file(self, path: str) -> bytes: raise IsADirectoryError return Path(full_path).read_bytes() - def try_save_file(self, path: str, value: bytes) -> None: + def try_save_file(self, path: str, value: bytes) -> str: full_path = self._full_path(path) if self._is_dir(full_path): raise IsADirectoryError os.makedirs(os.path.dirname(full_path), exist_ok=True) Path(full_path).write_bytes(value) + return full_path def _full_path(self, path: str) -> str: full_path = path if self.workdir is None else os.path.join(self.workdir, path.lstrip("/")) diff --git a/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py index 6c1783519..fed70fc87 100644 --- a/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py @@ -128,13 +128,14 @@ def load(self) -> tuple[list[Run], dict[str, Any]]: runs = [ Run( - id=m["metadata"].pop("run_id"), - meta=m["metadata"], - input=BaseArtifact.from_json(m["input"]), - output=BaseArtifact.from_json(m["output"]), + **({"id": message["metadata"].pop("run_id", None)} if "run_id" in message.get("metadata") else {}), + meta=message["metadata"], + input=BaseArtifact.from_json(message["input"]), + output=BaseArtifact.from_json(message["output"]), ) - for m in messages_response.get("messages", []) + for message in messages_response.get("messages", []) ] + return runs, thread_response.get("metadata", {}) def _get_url(self, path: str) -> str: diff --git a/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py b/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py index be34d2a8c..34459e1c5 100644 --- a/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py +++ b/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Any from attrs import Factory, define, field @@ -28,6 +29,7 @@ ToolAction, observable, ) +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import AmazonBedrockTokenizer, BaseTokenizer from griptape.utils import import_optional_dependency @@ -41,6 +43,8 @@ from griptape.common import PromptStack from griptape.tools import BaseTool +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class AmazonBedrockPromptDriver(BasePromptDriver): @@ -60,7 +64,10 @@ def client(self) -> Any: @observable def try_run(self, prompt_stack: PromptStack) -> Message: - response = self.client.converse(**self._base_params(prompt_stack)) + params = self._base_params(prompt_stack) + logger.debug(params) + response = self.client.converse(**params) + logger.debug(response) usage = response["usage"] output_message = response["output"]["message"] @@ -73,11 +80,14 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: - response = self.client.converse_stream(**self._base_params(prompt_stack)) + params = self._base_params(prompt_stack) + logger.debug(params) + response = self.client.converse_stream(**params) stream = response.get("stream") if stream is not None: for event in stream: + logger.debug(event) if "contentBlockDelta" in event or "contentBlockStart" in event: yield DeltaMessage(content=self.__to_prompt_stack_delta_message_content(event)) elif "metadata" in event: @@ -107,6 +117,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: if prompt_stack.tools and self.use_native_tools else {} ), + **self.extra_params, } def __to_bedrock_messages(self, messages: list[Message]) -> list[dict]: diff --git a/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py b/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py index 2dcf55307..d98ac9fd4 100644 --- a/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py +++ b/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.py @@ -1,12 +1,14 @@ from __future__ import annotations import json +import logging from typing import TYPE_CHECKING, Any, Optional from attrs import Attribute, Factory, define, field from griptape.artifacts import TextArtifact from griptape.common import DeltaMessage, Message, PromptStack, TextMessageContent, observable +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import HuggingFaceTokenizer from griptape.utils import import_optional_dependency @@ -19,6 +21,8 @@ from griptape.common import PromptStack +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class AmazonSageMakerJumpstartPromptDriver(BasePromptDriver): @@ -52,6 +56,7 @@ def try_run(self, prompt_stack: PromptStack) -> Message: "inputs": self.prompt_stack_to_string(prompt_stack), "parameters": {**self._base_params(prompt_stack)}, } + logger.debug(payload) response = self.client.invoke_endpoint( EndpointName=self.endpoint, @@ -66,6 +71,7 @@ def try_run(self, prompt_stack: PromptStack) -> Message: ) decoded_body = json.loads(response["Body"].read().decode("utf8")) + logger.debug(decoded_body) if isinstance(decoded_body, list): if decoded_body: @@ -99,6 +105,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: "eos_token_id": self.tokenizer.tokenizer.eos_token_id, "stop_strings": self.tokenizer.stop_sequences, "return_full_text": False, + **self.extra_params, } def _prompt_stack_to_messages(self, prompt_stack: PromptStack) -> list[dict]: diff --git a/griptape/drivers/prompt/anthropic_prompt_driver.py b/griptape/drivers/prompt/anthropic_prompt_driver.py index 1c7b376f8..3341006a1 100644 --- a/griptape/drivers/prompt/anthropic_prompt_driver.py +++ b/griptape/drivers/prompt/anthropic_prompt_driver.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Optional from attrs import Factory, define, field @@ -29,6 +30,7 @@ ToolAction, observable, ) +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import AnthropicTokenizer, BaseTokenizer from griptape.utils import import_optional_dependency @@ -43,6 +45,9 @@ from griptape.tools.base_tool import BaseTool +logger = logging.getLogger(Defaults.logging_config.logger_name) + + @define class AnthropicPromptDriver(BasePromptDriver): """Anthropic Prompt Driver. @@ -72,7 +77,11 @@ def client(self) -> Client: @observable def try_run(self, prompt_stack: PromptStack) -> Message: - response = self.client.messages.create(**self._base_params(prompt_stack)) + params = self._base_params(prompt_stack) + logger.debug(params) + response = self.client.messages.create(**params) + + logger.debug(response.model_dump()) return Message( content=[self.__to_prompt_stack_message_content(content) for content in response.content], @@ -82,9 +91,12 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: - events = self.client.messages.create(**self._base_params(prompt_stack), stream=True) + params = {**self._base_params(prompt_stack), "stream": True} + logger.debug(params) + events = self.client.messages.create(**params) for event in events: + logger.debug(event) if event.type == "content_block_delta" or event.type == "content_block_start": yield DeltaMessage(content=self.__to_prompt_stack_delta_message_content(event)) elif event.type == "message_start": @@ -112,6 +124,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: else {} ), **({"system": system_message} if system_message else {}), + **self.extra_params, } def __to_anthropic_messages(self, messages: list[Message]) -> list[dict]: diff --git a/griptape/drivers/prompt/base_prompt_driver.py b/griptape/drivers/prompt/base_prompt_driver.py index 778b6f474..1524d7ed9 100644 --- a/griptape/drivers/prompt/base_prompt_driver.py +++ b/griptape/drivers/prompt/base_prompt_driver.py @@ -16,7 +16,13 @@ TextMessageContent, observable, ) -from griptape.events import CompletionChunkEvent, EventBus, FinishPromptEvent, StartPromptEvent +from griptape.events import ( + ActionChunkEvent, + EventBus, + FinishPromptEvent, + StartPromptEvent, + TextChunkEvent, +) from griptape.mixins.exponential_backoff_mixin import ExponentialBackoffMixin from griptape.mixins.serializable_mixin import SerializableMixin @@ -39,6 +45,7 @@ class BasePromptDriver(SerializableMixin, ExponentialBackoffMixin, ABC): tokenizer: An instance of `BaseTokenizer` to when calculating tokens. stream: Whether to stream the completion or not. `CompletionChunkEvent`s will be published to the `Structure` if one is provided. use_native_tools: Whether to use LLM's native function calling capabilities. Must be supported by the model. + extra_params: Extra parameters to pass to the model. """ temperature: float = field(default=0.1, metadata={"serializable": True}) @@ -48,6 +55,7 @@ class BasePromptDriver(SerializableMixin, ExponentialBackoffMixin, ABC): tokenizer: BaseTokenizer stream: bool = field(default=False, kw_only=True, metadata={"serializable": True}) use_native_tools: bool = field(default=False, kw_only=True, metadata={"serializable": True}) + extra_params: dict = field(factory=dict, kw_only=True, metadata={"serializable": True}) def before_run(self, prompt_stack: PromptStack) -> None: EventBus.publish_event(StartPromptEvent(model=self.model, prompt_stack=prompt_stack)) @@ -127,12 +135,17 @@ def __process_stream(self, prompt_stack: PromptStack) -> Message: else: delta_contents[content.index] = [content] if isinstance(content, TextDeltaMessageContent): - EventBus.publish_event(CompletionChunkEvent(token=content.text)) + EventBus.publish_event(TextChunkEvent(token=content.text, index=content.index)) elif isinstance(content, ActionCallDeltaMessageContent): - if content.tag is not None and content.name is not None and content.path is not None: - EventBus.publish_event(CompletionChunkEvent(token=str(content))) - elif content.partial_input is not None: - EventBus.publish_event(CompletionChunkEvent(token=content.partial_input)) + EventBus.publish_event( + ActionChunkEvent( + partial_input=content.partial_input, + tag=content.tag, + name=content.name, + path=content.path, + index=content.index, + ), + ) # Build a complete content from the content deltas return self.__build_message(list(delta_contents.values()), usage) diff --git a/griptape/drivers/prompt/cohere_prompt_driver.py b/griptape/drivers/prompt/cohere_prompt_driver.py index b31c78ea3..8b42b4083 100644 --- a/griptape/drivers/prompt/cohere_prompt_driver.py +++ b/griptape/drivers/prompt/cohere_prompt_driver.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Any from attrs import Factory, define, field @@ -20,6 +21,7 @@ observable, ) from griptape.common.prompt_stack.contents.action_call_delta_message_content import ActionCallDeltaMessageContent +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import BaseTokenizer, CohereTokenizer from griptape.utils import import_optional_dependency @@ -33,6 +35,8 @@ from griptape.tools import BaseTool +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define(kw_only=True) class CoherePromptDriver(BasePromptDriver): @@ -59,7 +63,11 @@ def client(self) -> Client: @observable def try_run(self, prompt_stack: PromptStack) -> Message: - result = self.client.chat(**self._base_params(prompt_stack)) + params = self._base_params(prompt_stack) + logger.debug(params) + + result = self.client.chat(**params) + logger.debug(result.model_dump()) usage = result.meta.tokens return Message( @@ -70,9 +78,12 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: - result = self.client.chat_stream(**self._base_params(prompt_stack)) + params = self._base_params(prompt_stack) + logger.debug(params) + result = self.client.chat_stream(**params) for event in result: + logger.debug(event.model_dump()) if event.event_type == "stream-end": usage = event.response.meta.tokens @@ -117,6 +128,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: else {} ), **({"preamble": preamble} if preamble else {}), + **self.extra_params, } def __to_cohere_messages(self, messages: list[Message]) -> list[dict]: diff --git a/griptape/drivers/prompt/google_prompt_driver.py b/griptape/drivers/prompt/google_prompt_driver.py index 4afdad5c6..2a6bdbf6d 100644 --- a/griptape/drivers/prompt/google_prompt_driver.py +++ b/griptape/drivers/prompt/google_prompt_driver.py @@ -1,6 +1,7 @@ from __future__ import annotations import json +import logging from typing import TYPE_CHECKING, Optional from attrs import Factory, define, field @@ -23,6 +24,7 @@ ToolAction, observable, ) +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import BaseTokenizer, GoogleTokenizer from griptape.utils import import_optional_dependency, remove_key_in_dict_recursively @@ -37,6 +39,8 @@ from griptape.tools import BaseTool +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class GooglePromptDriver(BasePromptDriver): @@ -72,10 +76,10 @@ def client(self) -> GenerativeModel: @observable def try_run(self, prompt_stack: PromptStack) -> Message: messages = self.__to_google_messages(prompt_stack) - response: GenerateContentResponse = self.client.generate_content( - messages, - **self._base_params(prompt_stack), - ) + params = self._base_params(prompt_stack) + logging.debug((messages, params)) + response: GenerateContentResponse = self.client.generate_content(messages, **params) + logging.debug(response.to_dict()) usage_metadata = response.usage_metadata @@ -91,14 +95,16 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: messages = self.__to_google_messages(prompt_stack) + params = {**self._base_params(prompt_stack), "stream": True} + logging.debug((messages, params)) response: GenerateContentResponse = self.client.generate_content( messages, - **self._base_params(prompt_stack), - stream=True, + **params, ) prompt_token_count = None for chunk in response: + logger.debug(chunk.to_dict()) usage_metadata = chunk.usage_metadata content = self.__to_prompt_stack_delta_message_content(chunk.parts[0]) if chunk.parts else None @@ -139,6 +145,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, + **self.extra_params, }, ), **( diff --git a/griptape/drivers/prompt/huggingface_hub_prompt_driver.py b/griptape/drivers/prompt/huggingface_hub_prompt_driver.py index 68267f755..c2c45c3ae 100644 --- a/griptape/drivers/prompt/huggingface_hub_prompt_driver.py +++ b/griptape/drivers/prompt/huggingface_hub_prompt_driver.py @@ -1,10 +1,12 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING from attrs import Factory, define, field from griptape.common import DeltaMessage, Message, PromptStack, TextDeltaMessageContent, observable +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import HuggingFaceTokenizer from griptape.utils import import_optional_dependency @@ -15,6 +17,8 @@ from huggingface_hub import InferenceClient +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class HuggingFaceHubPromptDriver(BasePromptDriver): @@ -23,7 +27,6 @@ class HuggingFaceHubPromptDriver(BasePromptDriver): Attributes: api_token: Hugging Face Hub API token. use_gpu: Use GPU during model run. - params: Custom model run parameters. model: Hugging Face Hub model name. client: Custom `InferenceApi`. tokenizer: Custom `HuggingFaceTokenizer`. @@ -31,7 +34,6 @@ class HuggingFaceHubPromptDriver(BasePromptDriver): api_token: str = field(kw_only=True, metadata={"serializable": True}) max_tokens: int = field(default=250, kw_only=True, metadata={"serializable": True}) - params: dict = field(factory=dict, kw_only=True, metadata={"serializable": True}) model: str = field(kw_only=True, metadata={"serializable": True}) tokenizer: HuggingFaceTokenizer = field( default=Factory( @@ -52,13 +54,14 @@ def client(self) -> InferenceClient: @observable def try_run(self, prompt_stack: PromptStack) -> Message: prompt = self.prompt_stack_to_string(prompt_stack) + full_params = self._base_params(prompt_stack) + logger.debug((prompt, full_params)) response = self.client.text_generation( prompt, - return_full_text=False, - max_new_tokens=self.max_tokens, - **self.params, + **full_params, ) + logger.debug(response) input_tokens = len(self.__prompt_stack_to_tokens(prompt_stack)) output_tokens = len(self.tokenizer.tokenizer.encode(response)) @@ -71,19 +74,16 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: prompt = self.prompt_stack_to_string(prompt_stack) + full_params = {**self._base_params(prompt_stack), "stream": True} + logger.debug((prompt, full_params)) - response = self.client.text_generation( - prompt, - return_full_text=False, - max_new_tokens=self.max_tokens, - stream=True, - **self.params, - ) + response = self.client.text_generation(prompt, **full_params) input_tokens = len(self.__prompt_stack_to_tokens(prompt_stack)) full_text = "" for token in response: + logger.debug(token) full_text += token yield DeltaMessage(content=TextDeltaMessageContent(token, index=0)) @@ -93,6 +93,13 @@ def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: def prompt_stack_to_string(self, prompt_stack: PromptStack) -> str: return self.tokenizer.tokenizer.decode(self.__prompt_stack_to_tokens(prompt_stack)) + def _base_params(self, prompt_stack: PromptStack) -> dict: + return { + "return_full_text": False, + "max_new_tokens": self.max_tokens, + **self.extra_params, + } + def _prompt_stack_to_messages(self, prompt_stack: PromptStack) -> list[dict]: messages = [] for message in prompt_stack.messages: diff --git a/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py b/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py index 1978b339a..a197523df 100644 --- a/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py +++ b/griptape/drivers/prompt/huggingface_pipeline_prompt_driver.py @@ -1,11 +1,13 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING from attrs import Factory, define, field from griptape.artifacts import TextArtifact from griptape.common import DeltaMessage, Message, PromptStack, TextMessageContent, observable +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import HuggingFaceTokenizer from griptape.utils import import_optional_dependency @@ -16,19 +18,19 @@ from transformers import TextGenerationPipeline +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class HuggingFacePipelinePromptDriver(BasePromptDriver): """Hugging Face Pipeline Prompt Driver. Attributes: - params: Custom model run parameters. model: Hugging Face Hub model name. """ max_tokens: int = field(default=250, kw_only=True, metadata={"serializable": True}) model: str = field(kw_only=True, metadata={"serializable": True}) - params: dict = field(factory=dict, kw_only=True, metadata={"serializable": True}) tokenizer: HuggingFaceTokenizer = field( default=Factory( lambda self: HuggingFaceTokenizer(model=self.model, max_output_tokens=self.max_tokens), @@ -52,15 +54,17 @@ def pipeline(self) -> TextGenerationPipeline: @observable def try_run(self, prompt_stack: PromptStack) -> Message: messages = self._prompt_stack_to_messages(prompt_stack) - - result = self.pipeline( - messages, - max_new_tokens=self.max_tokens, - temperature=self.temperature, - do_sample=True, - **self.params, + full_params = self._base_params(prompt_stack) + logger.debug( + ( + messages, + full_params, + ) ) + result = self.pipeline(messages, **full_params) + logger.debug(result) + if isinstance(result, list): if len(result) == 1: generated_text = result[0]["generated_text"][-1]["content"] @@ -85,6 +89,14 @@ def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: def prompt_stack_to_string(self, prompt_stack: PromptStack) -> str: return self.tokenizer.tokenizer.decode(self.__prompt_stack_to_tokens(prompt_stack)) + def _base_params(self, prompt_stack: PromptStack) -> dict: + return { + "max_new_tokens": self.max_tokens, + "temperature": self.temperature, + "do_sample": True, + **self.extra_params, + } + def _prompt_stack_to_messages(self, prompt_stack: PromptStack) -> list[dict]: messages = [] diff --git a/griptape/drivers/prompt/ollama_prompt_driver.py b/griptape/drivers/prompt/ollama_prompt_driver.py index 5f9e32e2f..ca6813c23 100644 --- a/griptape/drivers/prompt/ollama_prompt_driver.py +++ b/griptape/drivers/prompt/ollama_prompt_driver.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging from collections.abc import Iterator from typing import TYPE_CHECKING, Any, Optional @@ -19,11 +20,14 @@ ToolAction, observable, ) +from griptape.configs import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import SimpleTokenizer from griptape.utils import import_optional_dependency from griptape.utils.decorators import lazy_property +logger = logging.getLogger(Defaults.logging_config.logger_name) + if TYPE_CHECKING: from ollama import Client @@ -72,7 +76,10 @@ def client(self) -> Client: @observable def try_run(self, prompt_stack: PromptStack) -> Message: - response = self.client.chat(**self._base_params(prompt_stack)) + params = self._base_params(prompt_stack) + logger.debug(params) + response = self.client.chat(**params) + logger.debug(response) if isinstance(response, dict): return Message( @@ -84,10 +91,13 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: - stream = self.client.chat(**self._base_params(prompt_stack), stream=True) + params = {**self._base_params(prompt_stack), "stream": True} + logger.debug(params) + stream = self.client.chat(**params) if isinstance(stream, Iterator): for chunk in stream: + logger.debug(chunk) yield DeltaMessage(content=TextDeltaMessageContent(chunk["message"]["content"])) else: raise Exception("invalid model response") @@ -106,6 +116,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: and not self.stream # Tool calling is only supported when not streaming else {} ), + **self.extra_params, } def _prompt_stack_to_messages(self, prompt_stack: PromptStack) -> list[dict]: diff --git a/griptape/drivers/prompt/openai_chat_prompt_driver.py b/griptape/drivers/prompt/openai_chat_prompt_driver.py index ec10ab72e..8a1098b4a 100644 --- a/griptape/drivers/prompt/openai_chat_prompt_driver.py +++ b/griptape/drivers/prompt/openai_chat_prompt_driver.py @@ -1,6 +1,7 @@ from __future__ import annotations import json +import logging from typing import TYPE_CHECKING, Optional import openai @@ -23,6 +24,7 @@ ToolAction, observable, ) +from griptape.configs.defaults_config import Defaults from griptape.drivers import BasePromptDriver from griptape.tokenizers import BaseTokenizer, OpenAiTokenizer from griptape.utils.decorators import lazy_property @@ -36,6 +38,9 @@ from griptape.tools import BaseTool +logger = logging.getLogger(Defaults.logging_config.logger_name) + + @define class OpenAiChatPromptDriver(BasePromptDriver): """OpenAI Chat Prompt Driver. @@ -95,8 +100,11 @@ def client(self) -> openai.OpenAI: @observable def try_run(self, prompt_stack: PromptStack) -> Message: - result = self.client.chat.completions.create(**self._base_params(prompt_stack)) + params = self._base_params(prompt_stack) + logger.debug(params) + result = self.client.chat.completions.create(**params) + logger.debug(result.model_dump()) if len(result.choices) == 1: message = result.choices[0].message @@ -113,9 +121,12 @@ def try_run(self, prompt_stack: PromptStack) -> Message: @observable def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: - result = self.client.chat.completions.create(**self._base_params(prompt_stack), stream=True) + params = self._base_params(prompt_stack) + logger.debug({"stream": True, **params}) + result = self.client.chat.completions.create(**params, stream=True) for chunk in result: + logger.debug(chunk.model_dump()) if chunk.usage is not None: yield DeltaMessage( usage=DeltaMessage.Usage( @@ -143,6 +154,7 @@ def _base_params(self, prompt_stack: PromptStack) -> dict: **({"stop": self.tokenizer.stop_sequences} if self.tokenizer.stop_sequences else {}), **({"max_tokens": self.max_tokens} if self.max_tokens is not None else {}), **({"stream_options": {"include_usage": True}} if self.stream else {}), + **self.extra_params, } if self.response_format is not None: @@ -189,6 +201,9 @@ def __to_openai_messages(self, messages: list[Message]) -> list[dict]: ] ], } + # Some OpenAi-compatible services don't accept an empty array for content + if not openai_message["content"]: + openai_message["content"] = "" # Action calls must be attached to the message, not sent as content. action_call_content = [ diff --git a/griptape/drivers/sql/snowflake_sql_driver.py b/griptape/drivers/sql/snowflake_sql_driver.py index d1b4310b5..82d6a525c 100644 --- a/griptape/drivers/sql/snowflake_sql_driver.py +++ b/griptape/drivers/sql/snowflake_sql_driver.py @@ -15,16 +15,16 @@ @define class SnowflakeSqlDriver(BaseSqlDriver): - connection_func: Callable[[], SnowflakeConnection] = field(kw_only=True) + get_connection: Callable[[], SnowflakeConnection] = field(kw_only=True) _engine: Engine = field(default=None, kw_only=True, alias="engine", metadata={"serializable": False}) - @connection_func.validator # pyright: ignore[reportFunctionMemberAccess] - def validate_connection_func(self, _: Attribute, connection_func: Callable[[], SnowflakeConnection]) -> None: - snowflake_connection = connection_func() + @get_connection.validator # pyright: ignore[reportFunctionMemberAccess] + def validate_get_connection(self, _: Attribute, get_connection: Callable[[], SnowflakeConnection]) -> None: + snowflake_connection = get_connection() snowflake = import_optional_dependency("snowflake") if not isinstance(snowflake_connection, snowflake.connector.SnowflakeConnection): - raise ValueError("The connection_func must return a SnowflakeConnection") + raise ValueError("The get_connection function must return a SnowflakeConnection") if not snowflake_connection.schema or not snowflake_connection.database: raise ValueError("Provide a schema and database for the Snowflake connection") @@ -32,7 +32,7 @@ def validate_connection_func(self, _: Attribute, connection_func: Callable[[], S def engine(self) -> Engine: return import_optional_dependency("sqlalchemy").create_engine( "snowflake://not@used/db", - creator=self.connection_func, + creator=self.get_connection, ) def execute_query(self, query: str) -> Optional[list[BaseSqlDriver.RowResult]]: diff --git a/griptape/drivers/structure_run/local_structure_run_driver.py b/griptape/drivers/structure_run/local_structure_run_driver.py index c0049b29a..b2335e3c3 100644 --- a/griptape/drivers/structure_run/local_structure_run_driver.py +++ b/griptape/drivers/structure_run/local_structure_run_driver.py @@ -14,18 +14,18 @@ @define class LocalStructureRunDriver(BaseStructureRunDriver): - structure_factory_fn: Callable[[], Structure] = field(kw_only=True) + create_structure: Callable[[], Structure] = field(kw_only=True) def try_run(self, *args: BaseArtifact) -> BaseArtifact: old_env = os.environ.copy() try: os.environ.update(self.env) - structure_factory_fn = self.structure_factory_fn().run(*[arg.value for arg in args]) + structure = self.create_structure().run(*[arg.value for arg in args]) finally: os.environ.clear() os.environ.update(old_env) - if structure_factory_fn.output_task.output is not None: - return structure_factory_fn.output_task.output + if structure.output_task.output is not None: + return structure.output_task.output else: return InfoArtifact("No output found in response") diff --git a/griptape/drivers/vector/base_vector_store_driver.py b/griptape/drivers/vector/base_vector_store_driver.py index e2a394bf4..13aa3f193 100644 --- a/griptape/drivers/vector/base_vector_store_driver.py +++ b/griptape/drivers/vector/base_vector_store_driver.py @@ -11,6 +11,7 @@ from griptape.artifacts import BaseArtifact, ListArtifact, TextArtifact from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin from griptape.mixins.serializable_mixin import SerializableMixin +from griptape.utils import with_contextvars if TYPE_CHECKING: from griptape.drivers import BaseEmbeddingDriver @@ -47,7 +48,9 @@ def upsert_text_artifacts( if isinstance(artifacts, list): return utils.execute_futures_list( [ - self.futures_executor.submit(self.upsert_text_artifact, a, namespace=None, meta=meta, **kwargs) + self.futures_executor.submit( + with_contextvars(self.upsert_text_artifact), a, namespace=None, meta=meta, **kwargs + ) for a in artifacts ], ) @@ -61,7 +64,7 @@ def upsert_text_artifacts( futures_dict[namespace].append( self.futures_executor.submit( - self.upsert_text_artifact, a, namespace=namespace, meta=meta, **kwargs + with_contextvars(self.upsert_text_artifact), a, namespace=namespace, meta=meta, **kwargs ) ) diff --git a/griptape/drivers/vector/local_vector_store_driver.py b/griptape/drivers/vector/local_vector_store_driver.py index 36203d540..557937431 100644 --- a/griptape/drivers/vector/local_vector_store_driver.py +++ b/griptape/drivers/vector/local_vector_store_driver.py @@ -19,7 +19,7 @@ class LocalVectorStoreDriver(BaseVectorStoreDriver): entries: dict[str, BaseVectorStoreDriver.Entry] = field(factory=dict) persist_file: Optional[str] = field(default=None) - relatedness_fn: Callable = field(default=lambda x, y: dot(x, y) / (norm(x) * norm(y))) + calculate_relatedness: Callable = field(default=lambda x, y: dot(x, y) / (norm(x) * norm(y))) thread_lock: threading.Lock = field(default=Factory(lambda: threading.Lock())) def __attrs_post_init__(self) -> None: @@ -95,7 +95,7 @@ def query( entries = self.entries entries_and_relatednesses = [ - (entry, self.relatedness_fn(query_embedding, entry.vector)) for entry in list(entries.values()) + (entry, self.calculate_relatedness(query_embedding, entry.vector)) for entry in list(entries.values()) ] entries_and_relatednesses.sort(key=operator.itemgetter(1), reverse=True) diff --git a/griptape/engines/extraction/csv_extraction_engine.py b/griptape/engines/extraction/csv_extraction_engine.py index 7fb2a164b..7f4647d65 100644 --- a/griptape/engines/extraction/csv_extraction_engine.py +++ b/griptape/engines/extraction/csv_extraction_engine.py @@ -18,9 +18,9 @@ @define class CsvExtractionEngine(BaseExtractionEngine): column_names: list[str] = field(kw_only=True) - system_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/system.j2")), kw_only=True) - user_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/user.j2")), kw_only=True) - formatter_fn: Callable[[dict], str] = field( + generate_system_template: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/system.j2")), kw_only=True) + generate_user_template: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/user.j2")), kw_only=True) + format_row: Callable[[dict], str] = field( default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True ) @@ -45,7 +45,7 @@ def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[TextArtif with io.StringIO(text) as f: for row in csv.reader(f): - rows.append(TextArtifact(self.formatter_fn(dict(zip(column_names, [x.strip() for x in row]))))) + rows.append(TextArtifact(self.format_row(dict(zip(column_names, [x.strip() for x in row]))))) return rows @@ -57,11 +57,11 @@ def _extract_rec( rulesets: Optional[list[Ruleset]] = None, ) -> list[TextArtifact]: artifacts_text = self.chunk_joiner.join([a.value for a in artifacts]) - system_prompt = self.system_template_generator.render( + system_prompt = self.generate_system_template.render( column_names=self.column_names, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets), ) - user_prompt = self.user_template_generator.render( + user_prompt = self.generate_user_template.render( text=artifacts_text, ) @@ -86,7 +86,7 @@ def _extract_rec( return rows else: chunks = self.chunker.chunk(artifacts_text) - partial_text = self.user_template_generator.render( + partial_text = self.generate_user_template.render( text=chunks[0].value, ) diff --git a/griptape/engines/extraction/json_extraction_engine.py b/griptape/engines/extraction/json_extraction_engine.py index c817efd5f..f2c56a62b 100644 --- a/griptape/engines/extraction/json_extraction_engine.py +++ b/griptape/engines/extraction/json_extraction_engine.py @@ -21,10 +21,8 @@ class JsonExtractionEngine(BaseExtractionEngine): JSON_PATTERN = r"(?s)[^\[]*(\[.*\])" template_schema: dict = field(kw_only=True) - system_template_generator: J2 = field( - default=Factory(lambda: J2("engines/extraction/json/system.j2")), kw_only=True - ) - user_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/json/user.j2")), kw_only=True) + generate_system_template: J2 = field(default=Factory(lambda: J2("engines/extraction/json/system.j2")), kw_only=True) + generate_user_template: J2 = field(default=Factory(lambda: J2("engines/extraction/json/user.j2")), kw_only=True) def extract_artifacts( self, @@ -54,11 +52,11 @@ def _extract_rec( rulesets: Optional[list[Ruleset]] = None, ) -> list[JsonArtifact]: artifacts_text = self.chunk_joiner.join([a.value for a in artifacts]) - system_prompt = self.system_template_generator.render( + system_prompt = self.generate_system_template.render( json_template_schema=json.dumps(self.template_schema), rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets), ) - user_prompt = self.user_template_generator.render( + user_prompt = self.generate_user_template.render( text=artifacts_text, ) @@ -82,7 +80,7 @@ def _extract_rec( return extractions else: chunks = self.chunker.chunk(artifacts_text) - partial_text = self.user_template_generator.render( + partial_text = self.generate_user_template.render( text=chunks[0].value, ) diff --git a/griptape/engines/rag/modules/query/translate_query_rag_module.py b/griptape/engines/rag/modules/query/translate_query_rag_module.py index f1f9ca0ec..e92d95e2b 100644 --- a/griptape/engines/rag/modules/query/translate_query_rag_module.py +++ b/griptape/engines/rag/modules/query/translate_query_rag_module.py @@ -17,7 +17,7 @@ class TranslateQueryRagModule(BaseQueryRagModule): prompt_driver: BasePromptDriver = field() language: str = field() generate_user_template: Callable[[str, str], str] = field( - default=Factory(lambda self: self.default_user_template_generator, takes_self=True), + default=Factory(lambda self: self.default_generate_user_template, takes_self=True), ) def run(self, context: RagContext) -> RagContext: @@ -28,5 +28,5 @@ def run(self, context: RagContext) -> RagContext: return context - def default_user_template_generator(self, query: str, language: str) -> str: + def default_generate_user_template(self, query: str, language: str) -> str: return J2("engines/rag/modules/query/translate/user.j2").render(query=query, language=language) diff --git a/griptape/engines/rag/modules/response/footnote_prompt_response_rag_module.py b/griptape/engines/rag/modules/response/footnote_prompt_response_rag_module.py index 3687d1942..ea07c5007 100644 --- a/griptape/engines/rag/modules/response/footnote_prompt_response_rag_module.py +++ b/griptape/engines/rag/modules/response/footnote_prompt_response_rag_module.py @@ -15,7 +15,7 @@ @define(kw_only=True) class FootnotePromptResponseRagModule(PromptResponseRagModule): - def default_system_template_generator(self, context: RagContext, artifacts: list[TextArtifact]) -> str: + def default_generate_system_template(self, context: RagContext, artifacts: list[TextArtifact]) -> str: return J2("engines/rag/modules/response/footnote_prompt/system.j2").render( text_chunk_artifacts=artifacts, references=utils.references_from_artifacts(artifacts), diff --git a/griptape/engines/rag/modules/response/prompt_response_rag_module.py b/griptape/engines/rag/modules/response/prompt_response_rag_module.py index b62a0eba3..2e4f39947 100644 --- a/griptape/engines/rag/modules/response/prompt_response_rag_module.py +++ b/griptape/engines/rag/modules/response/prompt_response_rag_module.py @@ -22,7 +22,7 @@ class PromptResponseRagModule(BaseResponseRagModule, RuleMixin): answer_token_offset: int = field(default=400) metadata: Optional[str] = field(default=None) generate_system_template: Callable[[RagContext, list[TextArtifact]], str] = field( - default=Factory(lambda self: self.default_system_template_generator, takes_self=True), + default=Factory(lambda self: self.default_generate_system_template, takes_self=True), ) def run(self, context: RagContext) -> BaseArtifact: @@ -53,7 +53,7 @@ def run(self, context: RagContext) -> BaseArtifact: else: raise ValueError("Prompt driver did not return a TextArtifact") - def default_system_template_generator(self, context: RagContext, artifacts: list[TextArtifact]) -> str: + def default_generate_system_template(self, context: RagContext, artifacts: list[TextArtifact]) -> str: params: dict[str, Any] = {"text_chunks": [c.to_text() for c in artifacts]} if len(self.rulesets) > 0: diff --git a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py index 0348a2094..46128c9f6 100644 --- a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py +++ b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py @@ -25,7 +25,7 @@ class TextLoaderRetrievalRagModule(BaseRetrievalRagModule): vector_store_driver: BaseVectorStoreDriver = field() source: Any = field() query_params: dict[str, Any] = field(factory=dict) - process_query_output_fn: Callable[[list[BaseVectorStoreDriver.Entry]], Sequence[TextArtifact]] = field( + process_query_output: Callable[[list[BaseVectorStoreDriver.Entry]], Sequence[TextArtifact]] = field( default=Factory(lambda: lambda es: [e.to_artifact() for e in es]), ) @@ -43,4 +43,4 @@ def run(self, context: RagContext) -> Sequence[TextArtifact]: self.vector_store_driver.upsert_text_artifacts({namespace: chunks}) - return self.process_query_output_fn(self.vector_store_driver.query(context.query, **query_params)) + return self.process_query_output(self.vector_store_driver.query(context.query, **query_params)) diff --git a/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py b/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py index ddff2549c..42ae5876f 100644 --- a/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py +++ b/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py @@ -22,11 +22,11 @@ class VectorStoreRetrievalRagModule(BaseRetrievalRagModule): default=Factory(lambda: Defaults.drivers_config.vector_store_driver) ) query_params: dict[str, Any] = field(factory=dict) - process_query_output_fn: Callable[[list[BaseVectorStoreDriver.Entry]], Sequence[TextArtifact]] = field( + process_query_output: Callable[[list[BaseVectorStoreDriver.Entry]], Sequence[TextArtifact]] = field( default=Factory(lambda: lambda es: [e.to_artifact() for e in es]), ) def run(self, context: RagContext) -> Sequence[TextArtifact]: query_params = utils.dict_merge(self.query_params, self.get_context_param(context, "query_params")) - return self.process_query_output_fn(self.vector_store_driver.query(context.query, **query_params)) + return self.process_query_output(self.vector_store_driver.query(context.query, **query_params)) diff --git a/griptape/engines/rag/stages/response_rag_stage.py b/griptape/engines/rag/stages/response_rag_stage.py index de286317c..06d163944 100644 --- a/griptape/engines/rag/stages/response_rag_stage.py +++ b/griptape/engines/rag/stages/response_rag_stage.py @@ -7,6 +7,7 @@ from griptape import utils from griptape.engines.rag.stages import BaseRagStage +from griptape.utils import with_contextvars if TYPE_CHECKING: from griptape.engines.rag import RagContext @@ -32,7 +33,7 @@ def run(self, context: RagContext) -> RagContext: logging.info("ResponseRagStage: running %s retrieval modules in parallel", len(self.response_modules)) results = utils.execute_futures_list( - [self.futures_executor.submit(r.run, context) for r in self.response_modules] + [self.futures_executor.submit(with_contextvars(r.run), context) for r in self.response_modules] ) context.outputs = results diff --git a/griptape/engines/rag/stages/retrieval_rag_stage.py b/griptape/engines/rag/stages/retrieval_rag_stage.py index 6ce9fb19f..3e2e78b6d 100644 --- a/griptape/engines/rag/stages/retrieval_rag_stage.py +++ b/griptape/engines/rag/stages/retrieval_rag_stage.py @@ -9,6 +9,7 @@ from griptape import utils from griptape.artifacts import TextArtifact from griptape.engines.rag.stages import BaseRagStage +from griptape.utils import with_contextvars if TYPE_CHECKING: from griptape.engines.rag import RagContext @@ -36,7 +37,7 @@ def run(self, context: RagContext) -> RagContext: logging.info("RetrievalRagStage: running %s retrieval modules in parallel", len(self.retrieval_modules)) results = utils.execute_futures_list( - [self.futures_executor.submit(r.run, context) for r in self.retrieval_modules] + [self.futures_executor.submit(with_contextvars(r.run), context) for r in self.retrieval_modules] ) # flatten the list of lists diff --git a/griptape/engines/summary/prompt_summary_engine.py b/griptape/engines/summary/prompt_summary_engine.py index 3cc3dd470..29b7e97af 100644 --- a/griptape/engines/summary/prompt_summary_engine.py +++ b/griptape/engines/summary/prompt_summary_engine.py @@ -20,8 +20,8 @@ class PromptSummaryEngine(BaseSummaryEngine): chunk_joiner: str = field(default="\n\n", kw_only=True) max_token_multiplier: float = field(default=0.5, kw_only=True) - system_template_generator: J2 = field(default=Factory(lambda: J2("engines/summary/system.j2")), kw_only=True) - user_template_generator: J2 = field(default=Factory(lambda: J2("engines/summary/user.j2")), kw_only=True) + generate_system_template: J2 = field(default=Factory(lambda: J2("engines/summary/system.j2")), kw_only=True) + generate_user_template: J2 = field(default=Factory(lambda: J2("engines/summary/user.j2")), kw_only=True) prompt_driver: BasePromptDriver = field( default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True ) @@ -67,12 +67,12 @@ def summarize_artifacts_rec( artifacts_text = self.chunk_joiner.join([a.to_text() for a in artifacts]) - system_prompt = self.system_template_generator.render( + system_prompt = self.generate_system_template.render( summary=summary, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets), ) - user_prompt = self.user_template_generator.render(text=artifacts_text) + user_prompt = self.generate_user_template.render(text=artifacts_text) if ( self.prompt_driver.tokenizer.count_input_tokens_left(user_prompt + system_prompt) @@ -94,7 +94,7 @@ def summarize_artifacts_rec( else: chunks = self.chunker.chunk(artifacts_text) - partial_text = self.user_template_generator.render(text=chunks[0].value) + partial_text = self.generate_user_template.render(text=chunks[0].value) return self.summarize_artifacts_rec( chunks[1:], diff --git a/griptape/events/__init__.py b/griptape/events/__init__.py index b3e2f3a79..e8a14d750 100644 --- a/griptape/events/__init__.py +++ b/griptape/events/__init__.py @@ -10,7 +10,9 @@ from .finish_prompt_event import FinishPromptEvent from .start_structure_run_event import StartStructureRunEvent from .finish_structure_run_event import FinishStructureRunEvent -from .completion_chunk_event import CompletionChunkEvent +from .base_chunk_event import BaseChunkEvent +from .text_chunk_event import TextChunkEvent +from .action_chunk_event import ActionChunkEvent from .event_listener import EventListener from .start_image_generation_event import StartImageGenerationEvent from .finish_image_generation_event import FinishImageGenerationEvent @@ -37,7 +39,9 @@ "FinishPromptEvent", "StartStructureRunEvent", "FinishStructureRunEvent", - "CompletionChunkEvent", + "BaseChunkEvent", + "TextChunkEvent", + "ActionChunkEvent", "EventListener", "StartImageGenerationEvent", "FinishImageGenerationEvent", diff --git a/griptape/events/action_chunk_event.py b/griptape/events/action_chunk_event.py new file mode 100644 index 000000000..d51bc017f --- /dev/null +++ b/griptape/events/action_chunk_event.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from typing import Optional + +from attrs import define, field + +from griptape.events.base_chunk_event import BaseChunkEvent + + +@define +class ActionChunkEvent(BaseChunkEvent): + partial_input: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) + tag: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) + name: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) + path: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) + + def __str__(self) -> str: + parts = [] + + if self.name: + parts.append(self.name) + if self.path: + parts.append(f".{self.path}") + if self.tag: + parts.append(f" ({self.tag})") + + if self.partial_input: + if parts: + parts.append(f"\n{self.partial_input}") + else: + parts.append(self.partial_input) + + return "".join(parts) diff --git a/griptape/events/base_chunk_event.py b/griptape/events/base_chunk_event.py new file mode 100644 index 000000000..c94fc9e2d --- /dev/null +++ b/griptape/events/base_chunk_event.py @@ -0,0 +1,13 @@ +from abc import abstractmethod + +from attrs import define, field + +from griptape.events.base_event import BaseEvent + + +@define +class BaseChunkEvent(BaseEvent): + index: int = field(default=0, metadata={"serializable": True}) + + @abstractmethod + def __str__(self) -> str: ... diff --git a/griptape/events/completion_chunk_event.py b/griptape/events/completion_chunk_event.py deleted file mode 100644 index 48b479625..000000000 --- a/griptape/events/completion_chunk_event.py +++ /dev/null @@ -1,8 +0,0 @@ -from attrs import define, field - -from griptape.events.base_event import BaseEvent - - -@define -class CompletionChunkEvent(BaseEvent): - token: str = field(kw_only=True, metadata={"serializable": True}) diff --git a/griptape/events/event_bus.py b/griptape/events/event_bus.py index b7954480e..f658b9390 100644 --- a/griptape/events/event_bus.py +++ b/griptape/events/event_bus.py @@ -1,9 +1,9 @@ from __future__ import annotations -import threading +from contextvars import ContextVar from typing import TYPE_CHECKING -from attrs import Factory, define, field +from attrs import define from griptape.mixins.singleton_mixin import SingletonMixin @@ -11,14 +11,21 @@ from griptape.events import BaseEvent, EventListener +# Context Vars must be declared at the top module level. +# Also, in-place modifications do not trigger the context var's `set` method +# so we must reassign the context var with the new value when adding or removing event listeners. +_event_listeners: ContextVar[list[EventListener]] = ContextVar("event_listeners", default=[]) + + @define class _EventBus(SingletonMixin): - _event_listeners: list[EventListener] = field(factory=list, kw_only=True, alias="_event_listeners") - _thread_lock: threading.Lock = field(default=Factory(lambda: threading.Lock()), alias="_thread_lock") - @property def event_listeners(self) -> list[EventListener]: - return self._event_listeners + return _event_listeners.get() + + @event_listeners.setter + def event_listeners(self, event_listeners: list[EventListener]) -> None: + _event_listeners.set(event_listeners) def add_event_listeners(self, event_listeners: list[EventListener]) -> list[EventListener]: return [self.add_event_listener(event_listener) for event_listener in event_listeners] @@ -28,24 +35,21 @@ def remove_event_listeners(self, event_listeners: list[EventListener]) -> None: self.remove_event_listener(event_listener) def add_event_listener(self, event_listener: EventListener) -> EventListener: - with self._thread_lock: - if event_listener not in self._event_listeners: - self._event_listeners.append(event_listener) + if event_listener not in self.event_listeners: + self.event_listeners = self.event_listeners + [event_listener] return event_listener def remove_event_listener(self, event_listener: EventListener) -> None: - with self._thread_lock: - if event_listener in self._event_listeners: - self._event_listeners.remove(event_listener) + if event_listener in self.event_listeners: + self.event_listeners = [listener for listener in self.event_listeners if listener != event_listener] def publish_event(self, event: BaseEvent, *, flush: bool = False) -> None: - for event_listener in self._event_listeners: + for event_listener in self.event_listeners: event_listener.publish_event(event, flush=flush) def clear_event_listeners(self) -> None: - with self._thread_lock: - self._event_listeners.clear() + self.event_listeners = [] EventBus = _EventBus() diff --git a/griptape/events/event_listener.py b/griptape/events/event_listener.py index 1fad4a1de..a7eaf3ab1 100644 --- a/griptape/events/event_listener.py +++ b/griptape/events/event_listener.py @@ -1,22 +1,34 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable, Generic, Optional, TypeVar -from attrs import Factory, define, field +from attrs import define, field + +from .base_event import BaseEvent if TYPE_CHECKING: from griptape.drivers import BaseEventListenerDriver - from .base_event import BaseEvent +T = TypeVar("T", bound=BaseEvent) -@define -class EventListener: - handler: Callable[[BaseEvent], Optional[dict]] = field(default=Factory(lambda: lambda event: event.to_dict())) - event_types: Optional[list[type[BaseEvent]]] = field(default=None, kw_only=True) - driver: Optional[BaseEventListenerDriver] = field(default=None, kw_only=True) - _last_event_listeners: Optional[list[EventListener]] = field(default=None) +@define +class EventListener(Generic[T]): + """An event listener that listens for events and handles them. + + Attributes: + on_event: The on_event function that will be called when an event is published. + The on_event function should accept an event and return either the event or a dictionary. + If the on_event returns None, the event will not be published. + event_types: A list of event types that the event listener should listen for. + If not provided, the event listener will listen for all event types. + event_listener_driver: The driver that will be used to publish events. + """ + + on_event: Optional[Callable[[T], Optional[BaseEvent | dict]]] = field(default=None) + event_types: Optional[list[type[T]]] = field(default=None, kw_only=True) + event_listener_driver: Optional[BaseEventListenerDriver] = field(default=None, kw_only=True) def __enter__(self) -> EventListener: from griptape.events import EventBus @@ -30,15 +42,16 @@ def __exit__(self, type, value, traceback) -> None: # noqa: ANN001, A002 EventBus.remove_event_listener(self) - self._last_event_listeners = None - - def publish_event(self, event: BaseEvent, *, flush: bool = False) -> None: + def publish_event(self, event: T, *, flush: bool = False) -> None: event_types = self.event_types - if event_types is None or type(event) in event_types: - event_payload = self.handler(event) - if self.driver is not None: - if event_payload is not None and isinstance(event_payload, dict): - self.driver.publish_event(event_payload, flush=flush) - else: - self.driver.publish_event(event, flush=flush) + if event_types is None or any(isinstance(event, event_type) for event_type in event_types): + handled_event = event + if self.on_event is not None: + handled_event = self.on_event(event) + + if self.event_listener_driver is not None and handled_event is not None: + self.event_listener_driver.publish_event(handled_event) + + if self.event_listener_driver is not None and flush: + self.event_listener_driver.flush_events() diff --git a/griptape/events/text_chunk_event.py b/griptape/events/text_chunk_event.py new file mode 100644 index 000000000..7d3880bf2 --- /dev/null +++ b/griptape/events/text_chunk_event.py @@ -0,0 +1,11 @@ +from attrs import define, field + +from griptape.events.base_chunk_event import BaseChunkEvent + + +@define +class TextChunkEvent(BaseChunkEvent): + token: str = field(kw_only=True, metadata={"serializable": True}) + + def __str__(self) -> str: + return self.token diff --git a/griptape/loaders/base_loader.py b/griptape/loaders/base_loader.py index f7340283b..63324e10c 100644 --- a/griptape/loaders/base_loader.py +++ b/griptape/loaders/base_loader.py @@ -7,6 +7,7 @@ from griptape.artifacts import BaseArtifact from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin +from griptape.utils import with_contextvars from griptape.utils.futures import execute_futures_dict from griptape.utils.hash import bytes_to_hash, str_to_hash @@ -61,7 +62,10 @@ def load_collection( sources_by_key = {self.to_key(source): source for source in sources} return execute_futures_dict( - {key: self.futures_executor.submit(self.load, source) for key, source in sources_by_key.items()}, + { + key: self.futures_executor.submit(with_contextvars(self.load), source) + for key, source in sources_by_key.items() + }, ) def to_key(self, source: S) -> str: diff --git a/griptape/loaders/csv_loader.py b/griptape/loaders/csv_loader.py index 4487d7aec..c7e3a139e 100644 --- a/griptape/loaders/csv_loader.py +++ b/griptape/loaders/csv_loader.py @@ -14,7 +14,7 @@ class CsvLoader(BaseFileLoader[ListArtifact[TextArtifact]]): delimiter: str = field(default=",", kw_only=True) encoding: str = field(default="utf-8", kw_only=True) - formatter_fn: Callable[[dict], str] = field( + format_row: Callable[[dict], str] = field( default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True ) @@ -22,5 +22,5 @@ def parse(self, data: bytes) -> ListArtifact[TextArtifact]: reader = csv.DictReader(StringIO(data.decode(self.encoding)), delimiter=self.delimiter) return ListArtifact( - [TextArtifact(self.formatter_fn(row), meta={"row_num": row_num}) for row_num, row in enumerate(reader)] + [TextArtifact(self.format_row(row), meta={"row_num": row_num}) for row_num, row in enumerate(reader)] ) diff --git a/griptape/loaders/sql_loader.py b/griptape/loaders/sql_loader.py index 0c6e8bdf9..e63f7af81 100644 --- a/griptape/loaders/sql_loader.py +++ b/griptape/loaders/sql_loader.py @@ -12,7 +12,7 @@ @define class SqlLoader(BaseLoader[str, list[BaseSqlDriver.RowResult], ListArtifact[TextArtifact]]): sql_driver: BaseSqlDriver = field(kw_only=True) - formatter_fn: Callable[[dict], str] = field( + format_row: Callable[[dict], str] = field( default=lambda value: "\n".join(f"{key}: {val}" for key, val in value.items()), kw_only=True ) @@ -20,4 +20,4 @@ def fetch(self, source: str) -> list[BaseSqlDriver.RowResult]: return self.sql_driver.execute_query(source) or [] def parse(self, data: list[BaseSqlDriver.RowResult]) -> ListArtifact[TextArtifact]: - return ListArtifact([TextArtifact(self.formatter_fn(row.cells)) for row in data]) + return ListArtifact([TextArtifact(self.format_row(row.cells)) for row in data]) diff --git a/griptape/memory/structure/base_conversation_memory.py b/griptape/memory/structure/base_conversation_memory.py index e2095b460..448f3fed0 100644 --- a/griptape/memory/structure/base_conversation_memory.py +++ b/griptape/memory/structure/base_conversation_memory.py @@ -28,9 +28,7 @@ class BaseConversationMemory(SerializableMixin, ABC): def __attrs_post_init__(self) -> None: if self.autoload: - runs, meta = self.conversation_memory_driver.load() - self.runs.extend(runs) - self.meta = dict_merge(self.meta, meta) + self.load_runs() def before_add_run(self) -> None: pass @@ -43,6 +41,9 @@ def add_run(self, run: Run) -> BaseConversationMemory: return self def after_add_run(self) -> None: + if self.max_runs: + while len(self.runs) > self.max_runs: + self.runs.pop(0) self.conversation_memory_driver.store(self.runs, self.meta) @abstractmethod @@ -51,6 +52,13 @@ def try_add_run(self, run: Run) -> None: ... @abstractmethod def to_prompt_stack(self, last_n: Optional[int] = None) -> PromptStack: ... + def load_runs(self) -> list[Run]: + runs, meta = self.conversation_memory_driver.load() + self.runs.extend(runs) + self.meta = dict_merge(self.meta, meta) + + return self.runs + def add_to_prompt_stack( self, prompt_driver: BasePromptDriver, prompt_stack: PromptStack, index: Optional[int] = None ) -> PromptStack: diff --git a/griptape/memory/structure/conversation_memory.py b/griptape/memory/structure/conversation_memory.py index 34f96e414..8bd519726 100644 --- a/griptape/memory/structure/conversation_memory.py +++ b/griptape/memory/structure/conversation_memory.py @@ -13,10 +13,6 @@ class ConversationMemory(BaseConversationMemory): def try_add_run(self, run: Run) -> None: self.runs.append(run) - if self.max_runs: - while len(self.runs) > self.max_runs: - self.runs.pop(0) - def to_prompt_stack(self, last_n: Optional[int] = None) -> PromptStack: prompt_stack = PromptStack() runs = self.runs[-last_n:] if last_n else self.runs diff --git a/griptape/memory/structure/summary_conversation_memory.py b/griptape/memory/structure/summary_conversation_memory.py index 055057d34..a8aa7fa34 100644 --- a/griptape/memory/structure/summary_conversation_memory.py +++ b/griptape/memory/structure/summary_conversation_memory.py @@ -7,7 +7,7 @@ from griptape.common import Message, PromptStack from griptape.configs import Defaults -from griptape.memory.structure import ConversationMemory +from griptape.memory.structure.base_conversation_memory import BaseConversationMemory from griptape.utils import J2 if TYPE_CHECKING: @@ -16,15 +16,15 @@ @define -class SummaryConversationMemory(ConversationMemory): +class SummaryConversationMemory(BaseConversationMemory): offset: int = field(default=1, kw_only=True, metadata={"serializable": True}) prompt_driver: BasePromptDriver = field( kw_only=True, default=Factory(lambda: Defaults.drivers_config.prompt_driver) ) summary: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) summary_index: int = field(default=0, kw_only=True, metadata={"serializable": True}) - summary_template_generator: J2 = field(default=Factory(lambda: J2("memory/conversation/summary.j2")), kw_only=True) - summarize_conversation_template_generator: J2 = field( + summary_get_template: J2 = field(default=Factory(lambda: J2("memory/conversation/summary.j2")), kw_only=True) + summarize_conversation_get_template: J2 = field( default=Factory(lambda: J2("memory/conversation/summarize_conversation.j2")), kw_only=True, ) @@ -32,7 +32,7 @@ class SummaryConversationMemory(ConversationMemory): def to_prompt_stack(self, last_n: Optional[int] = None) -> PromptStack: stack = PromptStack() if self.summary: - stack.add_user_message(self.summary_template_generator.render(summary=self.summary)) + stack.add_user_message(self.summary_get_template.render(summary=self.summary)) for r in self.unsummarized_runs(last_n): stack.add_user_message(r.input) @@ -54,8 +54,7 @@ def unsummarized_runs(self, last_n: Optional[int] = None) -> list[Run]: return summary_index_runs def try_add_run(self, run: Run) -> None: - super().try_add_run(run) - + self.runs.append(run) unsummarized_runs = self.unsummarized_runs() runs_to_summarize = unsummarized_runs[: max(0, len(unsummarized_runs) - self.offset)] @@ -66,7 +65,7 @@ def try_add_run(self, run: Run) -> None: def summarize_runs(self, previous_summary: str | None, runs: list[Run]) -> str | None: try: if len(runs) > 0: - summary = self.summarize_conversation_template_generator.render(summary=previous_summary, runs=runs) + summary = self.summarize_conversation_get_template.render(summary=previous_summary, runs=runs) return self.prompt_driver.run( prompt_stack=PromptStack(messages=[Message(summary, role=Message.USER_ROLE)]), ).to_text() diff --git a/griptape/memory/task/task_memory.py b/griptape/memory/task/task_memory.py index 1aa60dba3..b5bc35378 100644 --- a/griptape/memory/task/task_memory.py +++ b/griptape/memory/task/task_memory.py @@ -8,6 +8,7 @@ from griptape.memory.meta import ActionSubtaskMetaEntry from griptape.memory.task.storage import BlobArtifactStorage, TextArtifactStorage from griptape.mixins.activity_mixin import ActivityMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.memory.task.storage import BaseArtifactStorage @@ -15,8 +16,12 @@ @define -class TaskMemory(ActivityMixin): - name: str = field(default=Factory(lambda self: self.__class__.__name__, takes_self=True), kw_only=True) +class TaskMemory(ActivityMixin, SerializableMixin): + name: str = field( + default=Factory(lambda self: self.__class__.__name__, takes_self=True), + kw_only=True, + metadata={"serializable": True}, + ) artifact_storages: dict[type, BaseArtifactStorage] = field( default=Factory( lambda: { @@ -26,8 +31,10 @@ class TaskMemory(ActivityMixin): ), kw_only=True, ) - namespace_storage: dict[str, BaseArtifactStorage] = field(factory=dict, kw_only=True) - namespace_metadata: dict[str, Any] = field(factory=dict, kw_only=True) + namespace_storage: dict[str, BaseArtifactStorage] = field( + factory=dict, kw_only=True, metadata={"serializable": True} + ) + namespace_metadata: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) @artifact_storages.validator # pyright: ignore[reportAttributeAccessIssue] def validate_artifact_storages(self, _: Attribute, artifact_storage: dict[type, BaseArtifactStorage]) -> None: diff --git a/griptape/mixins/futures_executor_mixin.py b/griptape/mixins/futures_executor_mixin.py index 84a3b6f25..5f3eb5324 100644 --- a/griptape/mixins/futures_executor_mixin.py +++ b/griptape/mixins/futures_executor_mixin.py @@ -1,5 +1,6 @@ from __future__ import annotations +import contextlib from abc import ABC from concurrent import futures from typing import Callable, Optional @@ -9,12 +10,12 @@ @define(slots=False, kw_only=True) class FuturesExecutorMixin(ABC): - futures_executor_fn: Callable[[], futures.Executor] = field( + create_futures_executor: Callable[[], futures.Executor] = field( default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), ) futures_executor: Optional[futures.Executor] = field( - default=Factory(lambda self: self.futures_executor_fn(), takes_self=True) + default=Factory(lambda self: self.create_futures_executor(), takes_self=True) ) def __del__(self) -> None: @@ -23,4 +24,6 @@ def __del__(self) -> None: if executor is not None: self.futures_executor = None - executor.shutdown(wait=True) + with contextlib.suppress(Exception): + # don't raise exceptions in __del__ + executor.shutdown(wait=True) diff --git a/griptape/mixins/runnable_mixin.py b/griptape/mixins/runnable_mixin.py new file mode 100644 index 000000000..4571e2108 --- /dev/null +++ b/griptape/mixins/runnable_mixin.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any, Callable, Generic, Optional, TypeVar, cast + +from attrs import define, field + +# Generics magic that allows us to reference the type of the class that is implementing the mixin +T = TypeVar("T", bound="RunnableMixin") + + +@define() +class RunnableMixin(ABC, Generic[T]): + """Mixin for classes that can be "run". + + Implementing classes should pass themselves as the generic type to ensure that the correct type is used in the callbacks. + + Attributes: + on_before_run: Optional callback that is called at the very beginning of the `run` method. + on_after_run: Optional callback that is called at the very end of the `run` method. + """ + + on_before_run: Optional[Callable[[T], None]] = field(kw_only=True, default=None) + on_after_run: Optional[Callable[[T], None]] = field(kw_only=True, default=None) + + def before_run(self, *args, **kwargs) -> Any: + if self.on_before_run is not None: + self.on_before_run(cast(T, self)) + + @abstractmethod + def run(self, *args, **kwargs) -> Any: ... + + def after_run(self, *args, **kwargs) -> Any: + if self.on_after_run is not None: + self.on_after_run(cast(T, self)) diff --git a/griptape/mixins/serializable_mixin.py b/griptape/mixins/serializable_mixin.py index e8f772cab..35269b36e 100644 --- a/griptape/mixins/serializable_mixin.py +++ b/griptape/mixins/serializable_mixin.py @@ -22,19 +22,26 @@ class SerializableMixin(Generic[T]): kw_only=True, metadata={"serializable": True}, ) + module_name: str = field( + default=Factory(lambda self: self.__class__.__module__, takes_self=True), + kw_only=True, + metadata={"serializable": False}, + ) @classmethod - def get_schema(cls: type[T], subclass_name: Optional[str] = None) -> Schema: + def get_schema(cls: type[T], subclass_name: Optional[str] = None, *, module_name: Optional[str] = None) -> Schema: """Generates a Marshmallow schema for the class. Args: subclass_name: An optional subclass name. Required if the class is abstract. + module_name: An optional module name. Defaults to the class's module. """ if ABC in cls.__bases__: if subclass_name is None: raise ValueError(f"Type field is required for abstract class: {cls.__name__}") - subclass_cls = cls._import_cls_rec(cls.__module__, subclass_name) + module_name = module_name or cls.__module__ + subclass_cls = cls._import_cls_rec(module_name, subclass_name) schema_class = BaseSchema.from_attrs_cls(subclass_cls) else: @@ -44,7 +51,7 @@ def get_schema(cls: type[T], subclass_name: Optional[str] = None) -> Schema: @classmethod def from_dict(cls: type[T], data: dict) -> T: - return cast(T, cls.get_schema(subclass_name=data.get("type")).load(data)) + return cast(T, cls.get_schema(subclass_name=data.get("type"), module_name=data.get("module_name")).load(data)) @classmethod def from_json(cls: type[T], data: str) -> T: diff --git a/griptape/rules/json_schema_rule.py b/griptape/rules/json_schema_rule.py index 1bd418464..ce41f26db 100644 --- a/griptape/rules/json_schema_rule.py +++ b/griptape/rules/json_schema_rule.py @@ -2,7 +2,7 @@ import json -from attrs import define, field +from attrs import Factory, define, field from griptape.rules import BaseRule from griptape.utils import J2 @@ -11,7 +11,7 @@ @define(frozen=True) class JsonSchemaRule(BaseRule): value: dict = field() - template_generator: J2 = field(default=J2("rules/json_schema.j2")) + generate_template: J2 = field(default=Factory(lambda: J2("rules/json_schema.j2"))) def to_text(self) -> str: - return self.template_generator.render(json_schema=json.dumps(self.value)) + return self.generate_template.render(json_schema=json.dumps(self.value)) diff --git a/griptape/schemas/__init__.py b/griptape/schemas/__init__.py index 81d2f4bee..e4d3d5974 100644 --- a/griptape/schemas/__init__.py +++ b/griptape/schemas/__init__.py @@ -4,5 +4,7 @@ from .bytes_field import Bytes +from .union_field import Union -__all__ = ["BaseSchema", "PolymorphicSchema", "Bytes"] + +__all__ = ["BaseSchema", "PolymorphicSchema", "Bytes", "Union"] diff --git a/griptape/schemas/base_schema.py b/griptape/schemas/base_schema.py index a078af76f..9762bf83d 100644 --- a/griptape/schemas/base_schema.py +++ b/griptape/schemas/base_schema.py @@ -2,12 +2,14 @@ from abc import ABC from collections.abc import Sequence +from enum import Enum from typing import Any, Literal, TypeVar, Union, _SpecialForm, get_args, get_origin import attrs from marshmallow import INCLUDE, Schema, fields from griptape.schemas.bytes_field import Bytes +from griptape.schemas.union_field import Union as UnionField class BaseSchema(Schema): @@ -59,21 +61,63 @@ def _get_field_for_type(cls, field_type: type) -> fields.Field | fields.Nested: # Resolve TypeVars to their bound type if isinstance(field_class, TypeVar): field_class = field_class.__bound__ - - if attrs.has(field_class): - if ABC in field_class.__bases__: - return fields.Nested(PolymorphicSchema(inner_class=field_class), allow_none=optional) - else: - return fields.Nested(cls.from_attrs_cls(field_class), allow_none=optional) - elif cls.is_list_sequence(field_class): + if field_class is None: + return fields.Constant(None, allow_none=True) + if cls._is_union(field_type): + return cls._handle_union(field_type, optional=optional) + elif attrs.has(field_class): + schema = PolymorphicSchema if ABC in field_class.__bases__ else cls.from_attrs_cls + return fields.Nested(schema(field_class), allow_none=optional) + elif cls._is_enum(field_type): + return fields.String(allow_none=optional) + elif cls._is_list_sequence(field_class): if args: - return fields.List(cls_or_instance=cls._get_field_for_type(args[0]), allow_none=optional) + return cls._handle_list(args[0], optional=optional) else: raise ValueError(f"Missing type for list field: {field_type}") - else: - field_class = cls.DATACLASS_TYPE_MAPPING[field_class] + field_class = cls.DATACLASS_TYPE_MAPPING.get(field_class) + if field_class is None: + raise ValueError(f"Unsupported field type: {field_type}") + return field_class(allow_none=optional) + + @classmethod + def _handle_list(cls, list_type: type, *, optional: bool) -> fields.Field: + """Handle List Fields, including Union Types. - return field_class(allow_none=optional) + Args: + list_type: The List type to handle. + optional: Whether the List can be none. + + Returns: + A marshmallow List field. + """ + if cls._is_union(list_type): + union_field = cls._handle_union(list_type, optional=optional) + return fields.List(cls_or_instance=union_field, allow_none=optional) + list_field = cls._get_field_for_type(list_type) + if isinstance(list_field, fields.Constant) and list_field.constant is None: + raise ValueError(f"List elements cannot be None: {list_type}") + return fields.List(cls_or_instance=list_field, allow_none=optional) + + @classmethod + def _handle_union(cls, union_type: type, *, optional: bool) -> fields.Field: + """Handle Union Fields, including Unions with List Types. + + Args: + union_type: The Union Type to handle. + optional: Whether the Union can be None. + + Returns: + A marshmallow Union field. + """ + candidate_fields = [cls._get_field_for_type(arg) for arg in get_args(union_type) if arg is not type(None)] + optional_args = [arg is None for arg in get_args(union_type)] + if optional_args: + optional = True + if not candidate_fields: + raise ValueError(f"Unsupported UnionType field: {union_type}") + + return UnionField(fields=candidate_fields, allow_none=optional) @classmethod def _get_field_type_info(cls, field_type: type) -> tuple[type, tuple[type, ...], bool]: @@ -129,8 +173,11 @@ def _resolve_types(cls, attrs_cls: type) -> None: BaseVectorStoreDriver, ) from griptape.events import EventListener - from griptape.memory.structure import Run + from griptape.memory import TaskMemory + from griptape.memory.structure import BaseConversationMemory, Run + from griptape.memory.task.storage import BaseArtifactStorage from griptape.structures import Structure + from griptape.tasks import BaseTask from griptape.tokenizers import BaseTokenizer from griptape.tools import BaseTool from griptape.utils import import_optional_dependency, is_dependency_installed @@ -154,6 +201,7 @@ def _resolve_types(cls, attrs_cls: type) -> None: "BaseMessageContent": BaseMessageContent, "BaseDeltaMessageContent": BaseDeltaMessageContent, "BaseTool": BaseTool, + "BaseTask": BaseTask, "Usage": Message.Usage, "Structure": Structure, "BaseTokenizer": BaseTokenizer, @@ -161,6 +209,10 @@ def _resolve_types(cls, attrs_cls: type) -> None: "Reference": Reference, "Run": Run, "Sequence": Sequence, + "TaskMemory": TaskMemory, + "State": BaseTask.State, + "BaseConversationMemory": BaseConversationMemory, + "BaseArtifactStorage": BaseArtifactStorage, # Third party modules "Client": import_optional_dependency("cohere").Client if is_dependency_installed("cohere") else Any, "GenerativeModel": import_optional_dependency("google.generativeai").GenerativeModel @@ -178,7 +230,7 @@ def _resolve_types(cls, attrs_cls: type) -> None: ) @classmethod - def is_list_sequence(cls, field_type: type | _SpecialForm) -> bool: + def _is_list_sequence(cls, field_type: type | _SpecialForm) -> bool: if isinstance(field_type, type): if issubclass(field_type, str) or issubclass(field_type, bytes) or issubclass(field_type, tuple): return False @@ -186,3 +238,11 @@ def is_list_sequence(cls, field_type: type | _SpecialForm) -> bool: return issubclass(field_type, Sequence) else: return False + + @classmethod + def _is_union(cls, field_type: type) -> bool: + return field_type is Union or get_origin(field_type) is Union + + @classmethod + def _is_enum(cls, field_type: type) -> bool: + return isinstance(field_type, type) and issubclass(field_type, Enum) diff --git a/griptape/schemas/polymorphic_schema.py b/griptape/schemas/polymorphic_schema.py index 2e556b2c7..39749a431 100644 --- a/griptape/schemas/polymorphic_schema.py +++ b/griptape/schemas/polymorphic_schema.py @@ -116,7 +116,7 @@ def _load(self, data: Any, *, partial: Any = None, unknown: Any = None, **kwargs if data_type is None: raise ValidationError({self.type_field: ["Missing data for required field."]}) - type_schema = self.inner_class.get_schema(data_type) + type_schema = self.inner_class.get_schema(data_type, module_name=data.get("module_name")) if not type_schema: raise ValidationError({self.type_field: [f"Unsupported value: {data_type}"]}) diff --git a/griptape/schemas/union_field.py b/griptape/schemas/union_field.py new file mode 100644 index 000000000..95d54991f --- /dev/null +++ b/griptape/schemas/union_field.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +from typing import Any + +import marshmallow +import marshmallow.error_store +import marshmallow.exceptions + + +class MarshmallowUnionError(Exception): + """Base error for marshmallow_union.""" + + +class ExceptionGroupError(MarshmallowUnionError): + """Collection of possibly multiple exceptions.""" + + def __init__(self, msg: str, errors: Any) -> None: + self.msg = msg + self.errors = errors + super().__init__(msg, errors) + + +class Union(marshmallow.fields.Field): + """Field that accepts any one of multiple fields. + + Source: https://github.com/adamboche/python-marshmallow-union + Each argument will be tried until one succeeds. + + Args: + fields: The list of candidate fields to try. + reverse_serialize_candidates: Whether to try the candidates in reverse order when serializing. + """ + + def __init__( + self, + fields: list[marshmallow.fields.Field], + *, + reverse_serialize_candidates: bool = False, + **kwargs: Any, + ) -> None: + self._candidate_fields = fields + self._reverse_serialize_candidates = reverse_serialize_candidates + super().__init__(**kwargs) + + def _serialize(self, value: Any, attr: str | None, obj: str, **kwargs: Any) -> Any: + """Pulls the value for the given key from the object, applies the field's formatting and returns the result. + + Args: + value: The value to be serialized. + attr: The attribute or key to get from the object. + obj: The object to pull the key from. + kwargs: Field-specific keyword arguments. + + Raises: + marshmallow.exceptions.ValidationError: In case of formatting problem + """ + error_store = kwargs.pop("error_store", marshmallow.error_store.ErrorStore()) + fields = ( + list(reversed(self._candidate_fields)) if self._reverse_serialize_candidates else self._candidate_fields + ) + + for candidate_field in fields: + try: + # pylint: disable=protected-access + return candidate_field._serialize(value, attr, obj, error_store=error_store, **kwargs) + except (TypeError, ValueError) as e: + error_store.store_error({attr: str(e)}) + + raise ExceptionGroupError("All serializers raised exceptions.", error_store.errors) + + def _deserialize(self, value: Any, attr: str | None = None, data: Any = None, **kwargs: Any) -> Any: + """Deserialize ``value``. + + Args: + value: The value to be deserialized. + attr: The attribute/key in `data` to be deserialized. + data: The raw input data passed to the `Schema.load`. + kwargs: Field-specific keyword arguments. + + Raises: + ValidationError: If an invalid value is passed or if a required value is missing. + """ + errors = [] + for candidate_field in self._candidate_fields: + try: + return candidate_field.deserialize(value, attr, data, **kwargs) + except marshmallow.exceptions.ValidationError as exc: + errors.append(exc.messages) + + raise marshmallow.exceptions.ValidationError(message=errors, field_name=attr or "") diff --git a/griptape/structures/agent.py b/griptape/structures/agent.py index 77f3e0618..be1b73e34 100644 --- a/griptape/structures/agent.py +++ b/griptape/structures/agent.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable, Optional, Union from attrs import Attribute, Factory, define, field @@ -19,7 +19,7 @@ @define class Agent(Structure): - input: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact] = field( + input: Union[str, list, tuple, BaseArtifact, Callable[[BaseTask], BaseArtifact]] = field( default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), ) stream: bool = field(default=Factory(lambda: Defaults.drivers_config.prompt_driver.stream), kw_only=True) @@ -74,6 +74,6 @@ def add_tasks(self, *tasks: BaseTask | list[BaseTask]) -> list[BaseTask]: @observable def try_run(self, *args) -> Agent: - self.task.execute() + self.task.run() return self diff --git a/griptape/structures/pipeline.py b/griptape/structures/pipeline.py index a5134a964..dcf8503f7 100644 --- a/griptape/structures/pipeline.py +++ b/griptape/structures/pipeline.py @@ -59,7 +59,8 @@ def context(self, task: BaseTask) -> dict[str, Any]: context.update( { - "parent_output": task.parents[0].output.to_text() if task.parents and task.parents[0].output else None, + "parent_output": task.parents[0].output if task.parents else None, + "task_outputs": self.task_outputs, "parent": task.parents[0] if task.parents else None, "child": task.children[0] if task.children else None, }, @@ -71,7 +72,7 @@ def __run_from_task(self, task: Optional[BaseTask]) -> None: if task is None: return else: - if isinstance(task.execute(), ErrorArtifact) and self.fail_fast: + if isinstance(task.run(), ErrorArtifact) and self.fail_fast: return else: self.__run_from_task(next(iter(task.children), None)) diff --git a/griptape/structures/structure.py b/griptape/structures/structure.py index d2e6d2f3f..9ccd04497 100644 --- a/griptape/structures/structure.py +++ b/griptape/structures/structure.py @@ -2,7 +2,7 @@ import uuid from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Optional, Union from attrs import Factory, define, field @@ -12,6 +12,8 @@ from griptape.memory.meta import MetaMemory from griptape.memory.structure import ConversationMemory, Run from griptape.mixins.rule_mixin import RuleMixin +from griptape.mixins.runnable_mixin import RunnableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.artifacts import BaseArtifact @@ -20,19 +22,22 @@ @define -class Structure(ABC, RuleMixin): - id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True) - _tasks: list[BaseTask | list[BaseTask]] = field(factory=list, kw_only=True, alias="tasks") +class Structure(RuleMixin, SerializableMixin, RunnableMixin["Structure"], ABC): + id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={"serializable": True}) + _tasks: list[Union[BaseTask, list[BaseTask]]] = field( + factory=list, kw_only=True, alias="tasks", metadata={"serializable": True} + ) conversation_memory: Optional[BaseConversationMemory] = field( default=Factory(lambda: ConversationMemory()), kw_only=True, + metadata={"serializable": True}, ) task_memory: TaskMemory = field( default=Factory(lambda self: TaskMemory(), takes_self=True), kw_only=True, ) meta_memory: MetaMemory = field(default=Factory(lambda: MetaMemory()), kw_only=True) - fail_fast: bool = field(default=True, kw_only=True) + fail_fast: bool = field(default=True, kw_only=True, metadata={"serializable": True}) _execution_args: tuple = () def __attrs_post_init__(self) -> None: @@ -72,6 +77,10 @@ def output(self) -> BaseArtifact: raise ValueError("Structure's output Task has no output. Run the Structure to generate output.") return self.output_task.output + @property + def task_outputs(self) -> dict[str, Optional[BaseArtifact]]: + return {task.id: task.output for task in self.tasks} + @property def finished_tasks(self) -> list[BaseTask]: return [s for s in self.tasks if s.is_finished()] @@ -131,6 +140,7 @@ def resolve_relationships(self) -> None: @observable def before_run(self, args: Any) -> None: + super().before_run(args) self._execution_args = args [task.reset() for task in self.tasks] @@ -147,6 +157,7 @@ def before_run(self, args: Any) -> None: @observable def after_run(self) -> None: + super().after_run() if self.conversation_memory and self.output_task.output is not None: run = Run(input=self.input_task.input, output=self.output_task.output) diff --git a/griptape/structures/workflow.py b/griptape/structures/workflow.py index 1c65c59c4..e5e346044 100644 --- a/griptape/structures/workflow.py +++ b/griptape/structures/workflow.py @@ -10,6 +10,7 @@ from griptape.common import observable from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin from griptape.structures import Structure +from griptape.utils import with_contextvars if TYPE_CHECKING: from griptape.artifacts import BaseArtifact @@ -107,8 +108,8 @@ def try_run(self, *args) -> Workflow: ordered_tasks = self.order_tasks() for task in ordered_tasks: - if task.can_execute(): - future = self.futures_executor.submit(task.execute) + if task.can_run(): + future = self.futures_executor.submit(with_contextvars(task.run)) futures_list[future] = task # Wait for all tasks to complete @@ -125,6 +126,7 @@ def context(self, task: BaseTask) -> dict[str, Any]: context.update( { + "task_outputs": self.task_outputs, "parent_outputs": task.parent_outputs, "parents_output_text": task.parents_output_text, "parents": {parent.id: parent for parent in task.parents}, diff --git a/griptape/tasks/actions_subtask.py b/griptape/tasks/actions_subtask.py index 9057fc127..700ba9960 100644 --- a/griptape/tasks/actions_subtask.py +++ b/griptape/tasks/actions_subtask.py @@ -3,7 +3,7 @@ import json import logging import re -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable, Optional, Union import schema from attrs import define, field @@ -15,7 +15,7 @@ from griptape.events import EventBus, FinishActionsSubtaskEvent, StartActionsSubtaskEvent from griptape.mixins.actions_subtask_origin_mixin import ActionsSubtaskOriginMixin from griptape.tasks import BaseTask -from griptape.utils import remove_null_values_in_dict_recursively +from griptape.utils import remove_null_values_in_dict_recursively, with_contextvars if TYPE_CHECKING: from griptape.memory import TaskMemory @@ -33,7 +33,7 @@ class ActionsSubtask(BaseTask): thought: Optional[str] = field(default=None, kw_only=True) actions: list[ToolAction] = field(factory=list, kw_only=True) output: Optional[BaseArtifact] = field(default=None, init=False) - _input: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact] = field( + _input: Union[str, list, tuple, BaseArtifact, Callable[[BaseTask], BaseArtifact]] = field( default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), alias="input", ) @@ -113,14 +113,14 @@ def before_run(self) -> None: ] logger.info("".join(parts)) - def run(self) -> BaseArtifact: + def try_run(self) -> BaseArtifact: try: if any(isinstance(a.output, ErrorArtifact) for a in self.actions): errors = [a.output.value for a in self.actions if isinstance(a.output, ErrorArtifact)] self.output = ErrorArtifact("\n\n".join(errors)) else: - results = self.execute_actions(self.actions) + results = self.run_actions(self.actions) actions_output = [] for result in results: @@ -138,13 +138,15 @@ def run(self) -> BaseArtifact: else: return ErrorArtifact("no tool output") - def execute_actions(self, actions: list[ToolAction]) -> list[tuple[str, BaseArtifact]]: - return utils.execute_futures_list([self.futures_executor.submit(self.execute_action, a) for a in actions]) + def run_actions(self, actions: list[ToolAction]) -> list[tuple[str, BaseArtifact]]: + return utils.execute_futures_list( + [self.futures_executor.submit(with_contextvars(self.run_action), a) for a in actions] + ) - def execute_action(self, action: ToolAction) -> tuple[str, BaseArtifact]: + def run_action(self, action: ToolAction) -> tuple[str, BaseArtifact]: if action.tool is not None: if action.path is not None: - output = action.tool.execute(getattr(action.tool, action.path), self, action) + output = action.tool.run(getattr(action.tool, action.path), self, action) else: output = ErrorArtifact("action path not found") else: @@ -197,8 +199,8 @@ def actions_to_json(self) -> str: def _process_task_input( self, - task_input: str | tuple | list | BaseArtifact | Callable[[BaseTask], BaseArtifact], - ) -> TextArtifact | ListArtifact: + task_input: Union[str, tuple, list, BaseArtifact, Callable[[BaseTask], BaseArtifact]], + ) -> Union[TextArtifact, ListArtifact]: if isinstance(task_input, (TextArtifact, ListArtifact)): return task_input elif isinstance(task_input, ActionArtifact): @@ -217,14 +219,18 @@ def __init_from_prompt(self, value: str) -> None: actions_matches = re.findall(self.ACTIONS_PATTERN, value, re.DOTALL) answer_matches = re.findall(self.ANSWER_PATTERN, value, re.MULTILINE) - if self.thought is None and thought_matches: - self.thought = thought_matches[-1] + self.actions = self.__parse_actions(actions_matches) - self.__parse_actions(actions_matches) + if thought_matches: + self.thought = thought_matches[-1] - # If there are no actions to take but an answer is provided, set the answer as the output. - if len(self.actions) == 0 and self.output is None and answer_matches: - self.output = TextArtifact(answer_matches[-1]) + if not self.actions and self.output is None: + if answer_matches: + # A direct answer is provided, set it as the output. + self.output = TextArtifact(answer_matches[-1]) + else: + # The LLM failed to follow the ReAct prompt, set the LLM's raw response as the output. + self.output = TextArtifact(value) def __init_from_artifacts(self, artifacts: ListArtifact) -> None: """Parses the input Artifacts to extract the thought and actions. @@ -243,23 +249,30 @@ def __init_from_artifacts(self, artifacts: ListArtifact) -> None: if isinstance(artifact, ActionArtifact) ] - thoughts = [artifact.value for artifact in artifacts.value if isinstance(artifact, TextArtifact)] - if thoughts: - self.thought = thoughts[0] + # When parsing from Artifacts we can't determine the thought unless there are also Actions + if self.actions: + thoughts = [artifact.value for artifact in artifacts.value if isinstance(artifact, TextArtifact)] + if thoughts: + self.thought = thoughts[0] + else: + if self.output is None: + self.output = TextArtifact(artifacts.to_text()) - def __parse_actions(self, actions_matches: list[str]) -> None: + def __parse_actions(self, actions_matches: list[str]) -> list[ToolAction]: if len(actions_matches) == 0: - return + return [] try: data = actions_matches[-1] actions_list: list[dict] = json.loads(data, strict=False) - self.actions = [self.__process_action_object(action_object) for action_object in actions_list] + return [self.__process_action_object(action_object) for action_object in actions_list] except json.JSONDecodeError as e: logger.exception("Subtask %s\nInvalid actions JSON: %s", self.origin_task.id, e) self.output = ErrorArtifact(f"Actions JSON decoding error: {e}", exception=e) + return [] + def __process_action_object(self, action_object: dict) -> ToolAction: # Load action tag; throw exception if the key is not present action_tag = action_object["tag"] diff --git a/griptape/tasks/audio_transcription_task.py b/griptape/tasks/audio_transcription_task.py index 3d83cf7e7..819f166ec 100644 --- a/griptape/tasks/audio_transcription_task.py +++ b/griptape/tasks/audio_transcription_task.py @@ -18,5 +18,5 @@ class AudioTranscriptionTask(BaseAudioInputTask): kw_only=True, ) - def run(self) -> TextArtifact: + def try_run(self) -> TextArtifact: return self.audio_transcription_engine.run(self.input) diff --git a/griptape/tasks/base_audio_input_task.py b/griptape/tasks/base_audio_input_task.py index 8a834db56..0459fed03 100644 --- a/griptape/tasks/base_audio_input_task.py +++ b/griptape/tasks/base_audio_input_task.py @@ -2,7 +2,7 @@ import logging from abc import ABC -from typing import Callable +from typing import Callable, Union from attrs import define, field @@ -16,7 +16,7 @@ @define class BaseAudioInputTask(RuleMixin, BaseTask, ABC): - _input: AudioArtifact | Callable[[BaseTask], AudioArtifact] = field(alias="input") + _input: Union[AudioArtifact, Callable[[BaseTask], AudioArtifact]] = field(alias="input") @property def input(self) -> AudioArtifact: diff --git a/griptape/tasks/base_task.py b/griptape/tasks/base_task.py index 3fca55c30..9c936caa9 100644 --- a/griptape/tasks/base_task.py +++ b/griptape/tasks/base_task.py @@ -8,13 +8,14 @@ from attrs import Factory, define, field -from griptape.artifacts import ErrorArtifact +from griptape.artifacts import BaseArtifact, ErrorArtifact from griptape.configs import Defaults from griptape.events import EventBus, FinishTaskEvent, StartTaskEvent from griptape.mixins.futures_executor_mixin import FuturesExecutorMixin +from griptape.mixins.runnable_mixin import RunnableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: - from griptape.artifacts import BaseArtifact from griptape.memory.meta import BaseMetaEntry from griptape.structures import Structure @@ -22,21 +23,21 @@ @define -class BaseTask(FuturesExecutorMixin, ABC): +class BaseTask(FuturesExecutorMixin, SerializableMixin, RunnableMixin["BaseTask"], ABC): class State(Enum): PENDING = 1 EXECUTING = 2 FINISHED = 3 - id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True) - state: State = field(default=State.PENDING, kw_only=True) - parent_ids: list[str] = field(factory=list, kw_only=True) - child_ids: list[str] = field(factory=list, kw_only=True) - max_meta_memory_entries: Optional[int] = field(default=20, kw_only=True) + id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={"serializable": True}) + state: State = field(default=State.PENDING, kw_only=True, metadata={"serializable": True}) + parent_ids: list[str] = field(factory=list, kw_only=True, metadata={"serializable": True}) + child_ids: list[str] = field(factory=list, kw_only=True, metadata={"serializable": True}) + max_meta_memory_entries: Optional[int] = field(default=20, kw_only=True, metadata={"serializable": True}) structure: Optional[Structure] = field(default=None, kw_only=True) output: Optional[BaseArtifact] = field(default=None, init=False) - context: dict[str, Any] = field(factory=dict, kw_only=True) + context: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) def __rshift__(self, other: BaseTask) -> BaseTask: self.add_child(other) @@ -69,8 +70,8 @@ def children(self) -> list[BaseTask]: raise ValueError("Structure must be set to access children") @property - def parent_outputs(self) -> dict[str, str]: - return {parent.id: parent.output.to_text() if parent.output else "" for parent in self.parents} + def parent_outputs(self) -> dict[str, BaseArtifact]: + return {parent.id: parent.output for parent in self.parents if parent.output} @property def parents_output_text(self) -> str: @@ -100,7 +101,7 @@ def add_parent(self, parent: BaseTask) -> BaseTask: if self.id not in parent.child_ids: parent.child_ids.append(self.id) - if self.structure is not None: + if self.structure is not None and parent not in self.structure.tasks: self.structure.add_task(parent) return self @@ -116,7 +117,7 @@ def add_child(self, child: BaseTask) -> BaseTask: if self.id not in child.parent_ids: child.parent_ids.append(self.id) - if self.structure is not None: + if self.structure is not None and child not in self.structure.tasks: self.structure.add_task(child) return self @@ -136,6 +137,7 @@ def is_executing(self) -> bool: return self.state == BaseTask.State.EXECUTING def before_run(self) -> None: + super().before_run() if self.structure is not None: EventBus.publish_event( StartTaskEvent( @@ -147,25 +149,13 @@ def before_run(self) -> None: ), ) - def after_run(self) -> None: - if self.structure is not None: - EventBus.publish_event( - FinishTaskEvent( - task_id=self.id, - task_parent_ids=self.parent_ids, - task_child_ids=self.child_ids, - task_input=self.input, - task_output=self.output, - ), - ) - - def execute(self) -> Optional[BaseArtifact]: + def run(self) -> BaseArtifact: try: self.state = BaseTask.State.EXECUTING self.before_run() - self.output = self.run() + self.output = self.try_run() self.after_run() except Exception as e: @@ -177,7 +167,20 @@ def execute(self) -> Optional[BaseArtifact]: return self.output - def can_execute(self) -> bool: + def after_run(self) -> None: + super().after_run() + if self.structure is not None: + EventBus.publish_event( + FinishTaskEvent( + task_id=self.id, + task_parent_ids=self.parent_ids, + task_child_ids=self.child_ids, + task_input=self.input, + task_output=self.output, + ), + ) + + def can_run(self) -> bool: return self.state == BaseTask.State.PENDING and all(parent.is_finished() for parent in self.parents) def reset(self) -> BaseTask: @@ -187,7 +190,7 @@ def reset(self) -> BaseTask: return self @abstractmethod - def run(self) -> BaseArtifact: ... + def try_run(self) -> BaseArtifact: ... @property def full_context(self) -> dict[str, Any]: diff --git a/griptape/tasks/base_text_input_task.py b/griptape/tasks/base_text_input_task.py index dfed85bcf..b8321b4f4 100644 --- a/griptape/tasks/base_text_input_task.py +++ b/griptape/tasks/base_text_input_task.py @@ -2,7 +2,7 @@ import logging from abc import ABC -from typing import Callable +from typing import Callable, Union from attrs import define, field @@ -19,7 +19,7 @@ class BaseTextInputTask(RuleMixin, BaseTask, ABC): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" - _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field( + _input: Union[str, TextArtifact, Callable[[BaseTask], TextArtifact]] = field( default=DEFAULT_INPUT_TEMPLATE, alias="input", ) diff --git a/griptape/tasks/code_execution_task.py b/griptape/tasks/code_execution_task.py index d627382fd..5ce311be7 100644 --- a/griptape/tasks/code_execution_task.py +++ b/griptape/tasks/code_execution_task.py @@ -12,7 +12,7 @@ @define class CodeExecutionTask(BaseTextInputTask): - run_fn: Callable[[CodeExecutionTask], BaseArtifact] = field(kw_only=True) + on_run: Callable[[CodeExecutionTask], BaseArtifact] = field(kw_only=True) - def run(self) -> BaseArtifact: - return self.run_fn(self) + def try_run(self) -> BaseArtifact: + return self.on_run(self) diff --git a/griptape/tasks/extraction_task.py b/griptape/tasks/extraction_task.py index 43096dced..35cc83003 100644 --- a/griptape/tasks/extraction_task.py +++ b/griptape/tasks/extraction_task.py @@ -17,5 +17,5 @@ class ExtractionTask(BaseTextInputTask): extraction_engine: BaseExtractionEngine = field(kw_only=True) args: dict = field(kw_only=True, factory=dict) - def run(self) -> ListArtifact | ErrorArtifact: + def try_run(self) -> ListArtifact | ErrorArtifact: return self.extraction_engine.extract_artifacts(ListArtifact([self.input]), rulesets=self.rulesets, **self.args) diff --git a/griptape/tasks/image_query_task.py b/griptape/tasks/image_query_task.py index 1c77bbc0a..abd14ec56 100644 --- a/griptape/tasks/image_query_task.py +++ b/griptape/tasks/image_query_task.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable +from typing import Callable, Union from attrs import Factory, define, field @@ -25,12 +25,12 @@ class ImageQueryTask(BaseTask): """ image_query_engine: ImageQueryEngine = field(default=Factory(lambda: ImageQueryEngine()), kw_only=True) - _input: ( - tuple[str, list[ImageArtifact]] - | tuple[TextArtifact, list[ImageArtifact]] - | Callable[[BaseTask], ListArtifact] - | ListArtifact - ) = field(default=None, alias="input") + _input: Union[ + tuple[str, list[ImageArtifact]], + tuple[TextArtifact, list[ImageArtifact]], + Callable[[BaseTask], ListArtifact], + ListArtifact, + ] = field(default=None, alias="input") @property def input(self) -> ListArtifact: @@ -55,14 +55,16 @@ def input(self) -> ListArtifact: def input( self, value: ( - tuple[str, list[ImageArtifact]] - | tuple[TextArtifact, list[ImageArtifact]] - | Callable[[BaseTask], ListArtifact] + Union[ + tuple[str, list[ImageArtifact]], + tuple[TextArtifact, list[ImageArtifact]], + Callable[[BaseTask], ListArtifact], + ] ), ) -> None: self._input = value - def run(self) -> TextArtifact: + def try_run(self) -> TextArtifact: query = self.input.value[0] if all(isinstance(artifact, ImageArtifact) for artifact in self.input.value[1:]): diff --git a/griptape/tasks/inpainting_image_generation_task.py b/griptape/tasks/inpainting_image_generation_task.py index 649f9e3fb..88f868c72 100644 --- a/griptape/tasks/inpainting_image_generation_task.py +++ b/griptape/tasks/inpainting_image_generation_task.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable +from typing import Callable, Union from attrs import Factory, define, field @@ -32,9 +32,9 @@ class InpaintingImageGenerationTask(BaseImageGenerationTask): default=Factory(lambda: InpaintingImageGenerationEngine()), kw_only=True, ) - _input: ( - tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact] | ListArtifact - ) = field(default=None, alias="input") + _input: Union[ + tuple[Union[str, TextArtifact], ImageArtifact, ImageArtifact], Callable[[BaseTask], ListArtifact], ListArtifact + ] = field(default=None, alias="input") @property def input(self) -> ListArtifact: @@ -59,7 +59,7 @@ def input( ) -> None: self._input = value - def run(self) -> ImageArtifact: + def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] diff --git a/griptape/tasks/outpainting_image_generation_task.py b/griptape/tasks/outpainting_image_generation_task.py index 019f74fa1..60fbff457 100644 --- a/griptape/tasks/outpainting_image_generation_task.py +++ b/griptape/tasks/outpainting_image_generation_task.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable +from typing import Callable, Union from attrs import Factory, define, field @@ -32,9 +32,9 @@ class OutpaintingImageGenerationTask(BaseImageGenerationTask): default=Factory(lambda: OutpaintingImageGenerationEngine()), kw_only=True, ) - _input: ( - tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact] | ListArtifact - ) = field(default=None, alias="input") + _input: Union[ + tuple[Union[str, TextArtifact], ImageArtifact, ImageArtifact], Callable[[BaseTask], ListArtifact], ListArtifact + ] = field(default=None, alias="input") @property def input(self) -> ListArtifact: @@ -59,7 +59,7 @@ def input( ) -> None: self._input = value - def run(self) -> ImageArtifact: + def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] diff --git a/griptape/tasks/prompt_image_generation_task.py b/griptape/tasks/prompt_image_generation_task.py index d2ebf79c2..a76c8d0d8 100644 --- a/griptape/tasks/prompt_image_generation_task.py +++ b/griptape/tasks/prompt_image_generation_task.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable +from typing import Callable, Union from attrs import Factory, define, field @@ -29,7 +29,7 @@ class PromptImageGenerationTask(BaseImageGenerationTask): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" - _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field( + _input: Union[str, TextArtifact, Callable[[BaseTask], TextArtifact]] = field( default=DEFAULT_INPUT_TEMPLATE, alias="input" ) image_generation_engine: PromptImageGenerationEngine = field( @@ -50,7 +50,7 @@ def input(self) -> TextArtifact: def input(self, value: TextArtifact) -> None: self._input = value - def run(self) -> ImageArtifact: + def try_run(self) -> ImageArtifact: image_artifact = self.image_generation_engine.run( prompts=[self.input.to_text()], rulesets=self.rulesets, diff --git a/griptape/tasks/prompt_task.py b/griptape/tasks/prompt_task.py index 127fabf48..598eacf57 100644 --- a/griptape/tasks/prompt_task.py +++ b/griptape/tasks/prompt_task.py @@ -1,7 +1,7 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Callable, Optional, Union from attrs import Factory, define, field @@ -25,10 +25,10 @@ class PromptTask(RuleMixin, BaseTask): default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True ) generate_system_template: Callable[[PromptTask], str] = field( - default=Factory(lambda self: self.default_system_template_generator, takes_self=True), + default=Factory(lambda self: self.default_generate_system_template, takes_self=True), kw_only=True, ) - _input: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact] = field( + _input: Union[str, list, tuple, BaseArtifact, Callable[[BaseTask], BaseArtifact]] = field( default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), alias="input", ) @@ -79,7 +79,7 @@ def prompt_stack(self) -> PromptStack: return stack - def default_system_template_generator(self, _: PromptTask) -> str: + def default_generate_system_template(self, _: PromptTask) -> str: return J2("tasks/prompt_task/system.j2").render( rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), ) @@ -94,7 +94,7 @@ def after_run(self) -> None: logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) - def run(self) -> BaseArtifact: + def try_run(self) -> BaseArtifact: message = self.prompt_driver.run(self.prompt_stack) return message.to_artifact() diff --git a/griptape/tasks/rag_task.py b/griptape/tasks/rag_task.py index b7ea8d7c7..3244e6c1a 100644 --- a/griptape/tasks/rag_task.py +++ b/griptape/tasks/rag_task.py @@ -11,7 +11,7 @@ class RagTask(BaseTextInputTask): rag_engine: RagEngine = field(kw_only=True, default=Factory(lambda: RagEngine())) - def run(self) -> BaseArtifact: + def try_run(self) -> BaseArtifact: outputs = self.rag_engine.process_query(self.input.to_text()).outputs if len(outputs) > 0: diff --git a/griptape/tasks/structure_run_task.py b/griptape/tasks/structure_run_task.py index 6860958aa..db5f8bc49 100644 --- a/griptape/tasks/structure_run_task.py +++ b/griptape/tasks/structure_run_task.py @@ -22,7 +22,7 @@ class StructureRunTask(PromptTask): driver: BaseStructureRunDriver = field(kw_only=True) - def run(self) -> BaseArtifact: + def try_run(self) -> BaseArtifact: if isinstance(self.input, ListArtifact): return self.driver.run(*self.input.value) else: diff --git a/griptape/tasks/text_summary_task.py b/griptape/tasks/text_summary_task.py index 4861510d6..5cb7510ac 100644 --- a/griptape/tasks/text_summary_task.py +++ b/griptape/tasks/text_summary_task.py @@ -16,5 +16,5 @@ class TextSummaryTask(BaseTextInputTask): summary_engine: BaseSummaryEngine = field(default=Factory(lambda: PromptSummaryEngine()), kw_only=True) - def run(self) -> TextArtifact: + def try_run(self) -> TextArtifact: return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.rulesets)) diff --git a/griptape/tasks/text_to_speech_task.py b/griptape/tasks/text_to_speech_task.py index c131d69bc..ef67ca44d 100644 --- a/griptape/tasks/text_to_speech_task.py +++ b/griptape/tasks/text_to_speech_task.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING, Callable, Union from attrs import Factory, define, field @@ -18,7 +18,7 @@ class TextToSpeechTask(BaseAudioGenerationTask): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" - _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field(default=DEFAULT_INPUT_TEMPLATE) + _input: Union[str, TextArtifact, Callable[[BaseTask], TextArtifact]] = field(default=DEFAULT_INPUT_TEMPLATE) text_to_speech_engine: TextToSpeechEngine = field(default=Factory(lambda: TextToSpeechEngine()), kw_only=True) @property @@ -34,7 +34,7 @@ def input(self) -> TextArtifact: def input(self, value: TextArtifact) -> None: self._input = value - def run(self) -> AudioArtifact: + def try_run(self) -> AudioArtifact: audio_artifact = self.text_to_speech_engine.run(prompts=[self.input.to_text()], rulesets=self.rulesets) if self.output_dir or self.output_file: diff --git a/griptape/tasks/tool_task.py b/griptape/tasks/tool_task.py index 38d6e1512..07b762167 100644 --- a/griptape/tasks/tool_task.py +++ b/griptape/tasks/tool_task.py @@ -25,7 +25,7 @@ class ToolTask(PromptTask, ActionsSubtaskOriginMixin): ACTION_PATTERN = r"(?s)[^{]*({.*})" - tool: BaseTool = field(kw_only=True) + tool: BaseTool = field(kw_only=True, metadata={"serializable": True}) subtask: Optional[ActionsSubtask] = field(default=None, kw_only=True) task_memory: Optional[TaskMemory] = field(default=None, kw_only=True) @@ -49,7 +49,7 @@ def preprocess(self, structure: Structure) -> ToolTask: return self - def default_system_template_generator(self, _: PromptTask) -> str: + def default_generate_system_template(self, _: PromptTask) -> str: return J2("tasks/tool_task/system.j2").render( rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), action_schema=utils.minify_json(json.dumps(self.tool.schema())), @@ -60,7 +60,7 @@ def default_system_template_generator(self, _: PromptTask) -> str: def actions_schema(self) -> Schema: return self._actions_schema_for_tools([self.tool]) - def run(self) -> BaseArtifact: + def try_run(self) -> BaseArtifact: result = self.prompt_driver.run(prompt_stack=self.prompt_stack) if self.prompt_driver.use_native_tools: diff --git a/griptape/tasks/toolkit_task.py b/griptape/tasks/toolkit_task.py index 2a4a926bd..088ccd52d 100644 --- a/griptape/tasks/toolkit_task.py +++ b/griptape/tasks/toolkit_task.py @@ -32,11 +32,11 @@ class ToolkitTask(PromptTask, ActionsSubtaskOriginMixin): task_memory: Optional[TaskMemory] = field(default=None, kw_only=True) subtasks: list[ActionsSubtask] = field(factory=list) generate_assistant_subtask_template: Callable[[ActionsSubtask], str] = field( - default=Factory(lambda self: self.default_assistant_subtask_template_generator, takes_self=True), + default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), kw_only=True, ) generate_user_subtask_template: Callable[[ActionsSubtask], str] = field( - default=Factory(lambda self: self.default_user_subtask_template_generator, takes_self=True), + default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), kw_only=True, ) response_stop_sequence: str = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True) @@ -127,7 +127,7 @@ def preprocess(self, structure: Structure) -> ToolkitTask: return self - def default_system_template_generator(self, _: PromptTask) -> str: + def default_generate_system_template(self, _: PromptTask) -> str: schema = self.actions_schema().json_schema("Actions Schema") schema["minItems"] = 1 # The `schema` library doesn't support `minItems` so we must add it manually. @@ -140,13 +140,13 @@ def default_system_template_generator(self, _: PromptTask) -> str: stop_sequence=self.response_stop_sequence, ) - def default_assistant_subtask_template_generator(self, subtask: ActionsSubtask) -> str: + def default_generate_assistant_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/toolkit_task/assistant_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, ) - def default_user_subtask_template_generator(self, subtask: ActionsSubtask) -> str: + def default_generate_user_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/toolkit_task/user_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, @@ -165,7 +165,7 @@ def set_default_tools_memory(self, memory: TaskMemory) -> None: if tool.output_memory is None and tool.off_prompt: tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in tool.activities()} - def run(self) -> BaseArtifact: + def try_run(self) -> BaseArtifact: from griptape.tasks import ActionsSubtask self.subtasks.clear() @@ -180,9 +180,6 @@ def run(self) -> BaseArtifact: if subtask.output is None: if len(self.subtasks) >= self.max_subtasks: subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task") - elif not subtask.actions: - # handle case when the LLM failed to follow the ReAct prompt and didn't return a proper action - subtask.output = subtask.input else: subtask.before_run() subtask.run() diff --git a/griptape/tasks/variation_image_generation_task.py b/griptape/tasks/variation_image_generation_task.py index ddc16178b..c0db1a64b 100644 --- a/griptape/tasks/variation_image_generation_task.py +++ b/griptape/tasks/variation_image_generation_task.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable +from typing import Callable, Union from attrs import Factory, define, field @@ -32,8 +32,8 @@ class VariationImageGenerationTask(BaseImageGenerationTask): default=Factory(lambda: VariationImageGenerationEngine()), kw_only=True, ) - _input: tuple[str | TextArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact] | ListArtifact = field( - default=None, alias="input" + _input: Union[tuple[Union[str, TextArtifact], ImageArtifact], Callable[[BaseTask], ListArtifact], ListArtifact] = ( + field(default=None, alias="input") ) @property @@ -56,7 +56,7 @@ def input(self) -> ListArtifact: def input(self, value: tuple[str | TextArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact]) -> None: self._input = value - def run(self) -> ImageArtifact: + def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] diff --git a/griptape/templates/tasks/toolkit_task/system.j2 b/griptape/templates/tasks/toolkit_task/system.j2 index 09c4e9af9..e9ec48739 100644 --- a/griptape/templates/tasks/toolkit_task/system.j2 +++ b/griptape/templates/tasks/toolkit_task/system.j2 @@ -6,17 +6,16 @@ You must use the following format when executing actions: Thought: Actions: {{ stop_sequence }}: -...repeat Thought/Actions/{{ stop_sequence }} as many times as you need -"Thought", "Actions", "{{ stop_sequence }}" must always start on a new line. If {{ stop_sequence }} contains an error, you MUST ALWAYS try to fix the error with another Thought/Actions/{{ stop_sequence }}. +"Thought", "Actions", "{{ stop_sequence }}" must always start on a new line. -{% endif %} You must use the following format when providing your final answer: Answer: +{% endif %} +Repeat executing actions as many times as you need. +If an action's output contains an error, you MUST ALWAYS try to fix the error by executing another action. Be truthful. ALWAYS be proactive and NEVER ask the user for more information input. Keep using actions until you have your final answer. NEVER make up actions, action names, or action paths. NEVER make up facts. NEVER reference tags in other action input values. - -Actions might store their output in memory as artifacts (with `memory_name` and `artifact_namespace`). If action output is stored in memory, ALWAYS try to pass it to another action. NEVER make up memory names or artifact namespaces. {% if meta_memory %} {{ meta_memory }} diff --git a/griptape/tokenizers/voyageai_tokenizer.py b/griptape/tokenizers/voyageai_tokenizer.py index d5007cf7d..4b9807eb6 100644 --- a/griptape/tokenizers/voyageai_tokenizer.py +++ b/griptape/tokenizers/voyageai_tokenizer.py @@ -8,7 +8,7 @@ from griptape.utils import import_optional_dependency if TYPE_CHECKING: - from voyageai import Client + from voyageai.client import Client @define() diff --git a/griptape/tools/aws_iam/tool.py b/griptape/tools/aws_iam/tool.py index 6c1bed054..d5b20d56a 100644 --- a/griptape/tools/aws_iam/tool.py +++ b/griptape/tools/aws_iam/tool.py @@ -46,7 +46,7 @@ def get_user_policy(self, params: dict) -> TextArtifact | ErrorArtifact: return ErrorArtifact(f"error returning policy document: {e}") @activity(config={"description": "Can be used to list AWS MFA Devices"}) - def list_mfa_devices(self, _: dict) -> ListArtifact | ErrorArtifact: + def list_mfa_devices(self) -> ListArtifact | ErrorArtifact: try: devices = self.client.list_mfa_devices() return ListArtifact([TextArtifact(str(d)) for d in devices["MFADevices"]]) @@ -76,7 +76,7 @@ def list_user_policies(self, params: dict) -> ListArtifact | ErrorArtifact: return ErrorArtifact(f"error listing iam user policies: {e}") @activity(config={"description": "Can be used to list AWS IAM users."}) - def list_users(self, _: dict) -> ListArtifact | ErrorArtifact: + def list_users(self) -> ListArtifact | ErrorArtifact: try: users = self.client.list_users() return ListArtifact([TextArtifact(str(u)) for u in users["Users"]]) diff --git a/griptape/tools/aws_s3/tool.py b/griptape/tools/aws_s3/tool.py index b352da2d5..fb0c2fdf2 100644 --- a/griptape/tools/aws_s3/tool.py +++ b/griptape/tools/aws_s3/tool.py @@ -79,7 +79,7 @@ def get_object_acl(self, params: dict) -> TextArtifact | ErrorArtifact: return ErrorArtifact(f"error getting object acl: {e}") @activity(config={"description": "Can be used to list all AWS S3 buckets."}) - def list_s3_buckets(self, _: dict) -> ListArtifact | ErrorArtifact: + def list_s3_buckets(self) -> ListArtifact | ErrorArtifact: try: buckets = self.client.list_buckets() diff --git a/griptape/tools/base_tool.py b/griptape/tools/base_tool.py index 81f127791..c0c391bbd 100644 --- a/griptape/tools/base_tool.py +++ b/griptape/tools/base_tool.py @@ -7,8 +7,10 @@ import subprocess import sys from abc import ABC +from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Optional +import pkg_resources import schema from attrs import Attribute, Factory, define, field from schema import Literal, Or, Schema @@ -16,6 +18,8 @@ from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact, TextArtifact from griptape.common import observable from griptape.mixins.activity_mixin import ActivityMixin +from griptape.mixins.runnable_mixin import RunnableMixin +from griptape.mixins.serializable_mixin import SerializableMixin if TYPE_CHECKING: from griptape.common import ToolAction @@ -24,7 +28,7 @@ @define -class BaseTool(ActivityMixin, ABC): +class BaseTool(ActivityMixin, SerializableMixin, RunnableMixin["BaseTool"], ABC): """Abstract class for all tools to inherit from for. Attributes: @@ -39,16 +43,26 @@ class BaseTool(ActivityMixin, ABC): REQUIREMENTS_FILE = "requirements.txt" - name: str = field(default=Factory(lambda self: self.__class__.__name__, takes_self=True), kw_only=True) - input_memory: Optional[list[TaskMemory]] = field(default=None, kw_only=True) - output_memory: Optional[dict[str, list[TaskMemory]]] = field(default=None, kw_only=True) - install_dependencies_on_init: bool = field(default=True, kw_only=True) - dependencies_install_directory: Optional[str] = field(default=None, kw_only=True) - verbose: bool = field(default=False, kw_only=True) - off_prompt: bool = field(default=False, kw_only=True) + name: str = field( + default=Factory(lambda self: self.__class__.__name__, takes_self=True), + kw_only=True, + metadata={"serializable": True}, + ) + input_memory: Optional[list[TaskMemory]] = field(default=None, kw_only=True, metadata={"serializable": True}) + output_memory: Optional[dict[str, list[TaskMemory]]] = field( + default=None, kw_only=True, metadata={"serializable": True} + ) + install_dependencies_on_init: bool = field(default=True, kw_only=True, metadata={"serializable": True}) + dependencies_install_directory: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) + verbose: bool = field(default=False, kw_only=True, metadata={"serializable": True}) + off_prompt: bool = field(default=False, kw_only=True, metadata={"serializable": True}) def __attrs_post_init__(self) -> None: - if self.install_dependencies_on_init: + if ( + self.install_dependencies_on_init + and self.has_requirements + and not self.are_requirements_met(self.requirements_path) + ): self.install_dependencies(os.environ.copy()) @output_memory.validator # pyright: ignore[reportAttributeAccessIssue] @@ -77,6 +91,10 @@ def abs_file_path(self) -> str: def abs_dir_path(self) -> str: return os.path.dirname(self.abs_file_path) + @property + def has_requirements(self) -> bool: + return os.path.exists(self.requirements_path) + # This method has to remain a method and can't be decorated with @property because # of the max depth recursion issue in `self.activities`. def schema(self) -> dict: @@ -105,11 +123,11 @@ def activity_schemas(self) -> list[Schema]: return schemas - def execute(self, activity: Callable, subtask: ActionsSubtask, action: ToolAction) -> BaseArtifact: + def run(self, activity: Callable, subtask: ActionsSubtask, action: ToolAction) -> BaseArtifact: try: output = self.before_run(activity, subtask, action) - output = self.run(activity, subtask, action, output) + output = self.try_run(activity, subtask, action, output) output = self.after_run(activity, subtask, action, output) except Exception as e: @@ -118,10 +136,12 @@ def execute(self, activity: Callable, subtask: ActionsSubtask, action: ToolActio return output def before_run(self, activity: Callable, subtask: ActionsSubtask, action: ToolAction) -> Optional[dict]: + super().before_run() + return action.input @observable(tags=["Tool.run()"]) - def run( + def try_run( self, activity: Callable, subtask: ActionsSubtask, @@ -146,6 +166,8 @@ def after_run( action: ToolAction, value: BaseArtifact, ) -> BaseArtifact: + super().after_run() + if value: if self.output_memory: output_memories = self.output_memory[getattr(activity, "name")] or [] @@ -216,3 +238,13 @@ def to_native_tool_name(self, activity: Callable) -> str: raise ValueError("Activity name can only contain letters, numbers, and underscores.") return f"{tool_name}_{activity_name}" + + def are_requirements_met(self, requirements_path: str) -> bool: + requirements = Path(requirements_path).read_text().splitlines() + + try: + pkg_resources.require(requirements) + + return True + except (pkg_resources.DistributionNotFound, pkg_resources.VersionConflict): + return False diff --git a/griptape/tools/date_time/tool.py b/griptape/tools/date_time/tool.py index 5181dbe3e..dfdfcc578 100644 --- a/griptape/tools/date_time/tool.py +++ b/griptape/tools/date_time/tool.py @@ -9,7 +9,7 @@ class DateTimeTool(BaseTool): @activity(config={"description": "Can be used to return current date and time."}) - def get_current_datetime(self, _: dict) -> BaseArtifact: + def get_current_datetime(self) -> BaseArtifact: try: current_datetime = datetime.now() diff --git a/griptape/tools/vector_store/tool.py b/griptape/tools/vector_store/tool.py index 71902b1c7..eee854f6d 100644 --- a/griptape/tools/vector_store/tool.py +++ b/griptape/tools/vector_store/tool.py @@ -21,7 +21,7 @@ class VectorStoreTool(BaseTool): description: LLM-friendly vector DB description. vector_store_driver: `BaseVectorStoreDriver`. query_params: Optional dictionary of vector store driver query parameters. - process_query_output_fn: Optional lambda for processing vector store driver query output `Entry`s. + process_query_output: Optional lambda for processing vector store driver query output `Entry`s. """ DEFAULT_TOP_N = 5 @@ -29,7 +29,7 @@ class VectorStoreTool(BaseTool): description: str = field() vector_store_driver: BaseVectorStoreDriver = field() query_params: dict[str, Any] = field(factory=dict) - process_query_output_fn: Callable[[list[BaseVectorStoreDriver.Entry]], BaseArtifact] = field( + process_query_output: Callable[[list[BaseVectorStoreDriver.Entry]], BaseArtifact] = field( default=Factory(lambda: lambda es: ListArtifact([e.to_artifact() for e in es])), ) @@ -50,6 +50,6 @@ def search(self, params: dict) -> BaseArtifact: query = params["values"]["query"] try: - return self.process_query_output_fn(self.vector_store_driver.query(query, **self.query_params)) + return self.process_query_output(self.vector_store_driver.query(query, **self.query_params)) except Exception as e: return ErrorArtifact(f"error querying vector store: {e}") diff --git a/griptape/tools/web_search/tool.py b/griptape/tools/web_search/tool.py index 557c26a52..cbe4dcbf6 100644 --- a/griptape/tools/web_search/tool.py +++ b/griptape/tools/web_search/tool.py @@ -30,12 +30,10 @@ class WebSearchTool(BaseTool): ), }, ) - def search(self, props: dict) -> ListArtifact | ErrorArtifact: - values = props["values"] - query = values["query"] - extra_keys = {k: values[k] for k in values.keys() - {"query"}} + def search(self, values: dict) -> ListArtifact | ErrorArtifact: + query = values.pop("query") try: - return self.web_search_driver.search(query, **extra_keys) + return self.web_search_driver.search(query, **values) except Exception as e: return ErrorArtifact(f"Error searching '{query}' with {self.web_search_driver.__class__.__name__}: {e}") diff --git a/griptape/utils/__init__.py b/griptape/utils/__init__.py index 77e3f3b0a..3f44c8926 100644 --- a/griptape/utils/__init__.py +++ b/griptape/utils/__init__.py @@ -17,6 +17,7 @@ from .structure_visualizer import StructureVisualizer from .reference_utils import references_from_artifacts from .file_utils import get_mime_type +from .contextvars_utils import with_contextvars def minify_json(value: str) -> str: @@ -47,4 +48,5 @@ def minify_json(value: str) -> str: "StructureVisualizer", "references_from_artifacts", "get_mime_type", + "with_contextvars", ] diff --git a/griptape/utils/chat.py b/griptape/utils/chat.py index 21e045db7..8bbf38cd7 100644 --- a/griptape/utils/chat.py +++ b/griptape/utils/chat.py @@ -4,6 +4,8 @@ from typing import TYPE_CHECKING, Callable, Optional from attrs import Factory, define, field +from rich import print as rprint +from rich.prompt import Prompt from griptape.utils.stream import Stream @@ -13,6 +15,24 @@ @define class Chat: + """Utility for running a chat with a Structure. + + Attributes: + structure: The Structure to run. + exit_keywords: Keywords that will exit the chat. + exiting_text: Text to display when exiting the chat. + processing_text: Text to display while processing the user's input. + intro_text: Text to display when the chat starts. + prompt_prefix: Prefix for the user's input. + response_prefix: Prefix for the assistant's response. + handle_input: Function to get the user's input. + handle_output: Function to output text. Takes a `text` argument for the text to output. + Also takes a `stream` argument which will be set to True when streaming Prompt Tasks are present. + """ + + class ChatPrompt(Prompt): + prompt_suffix = "" # We don't want rich's default prompt suffix + structure: Structure = field() exit_keywords: list[str] = field(default=["exit"], kw_only=True) exiting_text: str = field(default="Exiting...", kw_only=True) @@ -20,22 +40,23 @@ class Chat: intro_text: Optional[str] = field(default=None, kw_only=True) prompt_prefix: str = field(default="User: ", kw_only=True) response_prefix: str = field(default="Assistant: ", kw_only=True) - output_fn: Callable[[str], None] = field( - default=Factory(lambda self: self.default_output_fn, takes_self=True), + handle_input: Callable[[str], str] = field( + default=Factory(lambda self: self.default_handle_input, takes_self=True), kw_only=True + ) + handle_output: Callable[..., None] = field( + default=Factory(lambda self: self.default_handle_output, takes_self=True), kw_only=True, ) logger_level: int = field(default=logging.ERROR, kw_only=True) - def default_output_fn(self, text: str) -> None: - from griptape.tasks.prompt_task import PromptTask + def default_handle_input(self, prompt_prefix: str) -> str: + return Chat.ChatPrompt.ask(prompt_prefix) - streaming_tasks = [ - task for task in self.structure.tasks if isinstance(task, PromptTask) and task.prompt_driver.stream - ] - if streaming_tasks: - print(text, end="", flush=True) # noqa: T201 + def default_handle_output(self, text: str, *, stream: bool = False) -> None: + if stream: + rprint(text, end="", flush=True) else: - print(text) # noqa: T201 + rprint(text) def start(self) -> None: from griptape.configs import Defaults @@ -45,24 +66,31 @@ def start(self) -> None: logging.getLogger(Defaults.logging_config.logger_name).setLevel(self.logger_level) if self.intro_text: - self.output_fn(self.intro_text) + self.handle_output(self.intro_text) + + has_streaming_tasks = self._has_streaming_tasks() while True: - question = input(self.prompt_prefix) + question = self.handle_input(self.prompt_prefix) if question.lower() in self.exit_keywords: - self.output_fn(self.exiting_text) + self.handle_output(self.exiting_text) break - if Defaults.drivers_config.prompt_driver.stream: - self.output_fn(self.processing_text + "\n") + if has_streaming_tasks: + self.handle_output(self.processing_text) stream = Stream(self.structure).run(question) first_chunk = next(stream) - self.output_fn(self.response_prefix + first_chunk.value) + self.handle_output(self.response_prefix + first_chunk.value, stream=True) for chunk in stream: - self.output_fn(chunk.value) + self.handle_output(chunk.value, stream=True) else: - self.output_fn(self.processing_text) - self.output_fn(f"{self.response_prefix}{self.structure.run(question).output_task.output.to_text()}") + self.handle_output(self.processing_text) + self.handle_output(f"{self.response_prefix}{self.structure.run(question).output_task.output.to_text()}") # Restore the original logger level logging.getLogger(Defaults.logging_config.logger_name).setLevel(old_logger_level) + + def _has_streaming_tasks(self) -> bool: + from griptape.tasks.prompt_task import PromptTask + + return any(isinstance(task, PromptTask) and task.prompt_driver.stream for task in self.structure.tasks) diff --git a/griptape/utils/contextvars_utils.py b/griptape/utils/contextvars_utils.py new file mode 100644 index 000000000..0d18230a4 --- /dev/null +++ b/griptape/utils/contextvars_utils.py @@ -0,0 +1,9 @@ +import contextvars +import functools +from typing import Callable + + +def with_contextvars(wrapped: Callable) -> Callable: + ctx = contextvars.copy_context() + + return functools.partial(ctx.run, wrapped) diff --git a/griptape/utils/decorators.py b/griptape/utils/decorators.py index 356f4eec0..3eef6d8d0 100644 --- a/griptape/utils/decorators.py +++ b/griptape/utils/decorators.py @@ -1,6 +1,7 @@ from __future__ import annotations import functools +import inspect from typing import Any, Callable, Optional import schema @@ -24,8 +25,8 @@ def activity(config: dict) -> Any: def decorator(func: Callable) -> Any: @functools.wraps(func) - def wrapper(self: Any, *args, **kwargs) -> Any: - return func(self, *args, **kwargs) + def wrapper(self: Any, params: dict) -> Any: + return func(self, **_build_kwargs(func, params)) setattr(wrapper, "name", func.__name__) setattr(wrapper, "config", validated_config) @@ -54,3 +55,34 @@ def lazy_attr(self: Any, value: Any) -> None: return lazy_attr return decorator + + +def _build_kwargs(func: Callable, params: dict) -> dict: + func_params = inspect.signature(func).parameters.copy() + func_params.pop("self") + + kwarg_var = None + for param in func_params.values(): + # if there is a **kwargs parameter, we can safely + # pass all the params to the function + if param.kind == inspect.Parameter.VAR_KEYWORD: + kwarg_var = func_params.pop(param.name).name + break + + # only pass the values that are in the function signature + # or if there is a **kwargs parameter, pass all the values + kwargs = {k: v for k, v in params.get("values", {}).items() if k in func_params or kwarg_var is not None} + + # add 'params' and 'values' if they are in the signature + # or if there is a **kwargs parameter + if "params" in func_params or kwarg_var is not None: + kwargs["params"] = params + if "values" in func_params or kwarg_var is not None: + kwargs["values"] = params.get("values") + + # set any missing parameters to None + for param_name in func_params: + if param_name not in kwargs: + kwargs[param_name] = None + + return kwargs diff --git a/griptape/utils/stream.py b/griptape/utils/stream.py index 8a764e85a..af8c65b3b 100644 --- a/griptape/utils/stream.py +++ b/griptape/utils/stream.py @@ -1,5 +1,6 @@ from __future__ import annotations +import json from queue import Queue from threading import Thread from typing import TYPE_CHECKING @@ -7,7 +8,15 @@ from attrs import Attribute, Factory, define, field from griptape.artifacts.text_artifact import TextArtifact -from griptape.events import CompletionChunkEvent, EventBus, EventListener, FinishPromptEvent, FinishStructureRunEvent +from griptape.events import ( + ActionChunkEvent, + BaseChunkEvent, + EventBus, + EventListener, + FinishPromptEvent, + FinishStructureRunEvent, + TextChunkEvent, +) if TYPE_CHECKING: from collections.abc import Iterator @@ -18,7 +27,7 @@ @define class Stream: - """A wrapper for Structures that converts `CompletionChunkEvent`s into an iterator of TextArtifacts. + """A wrapper for Structures that converts `BaseChunkEvent`s into an iterator of TextArtifacts. It achieves this by running the Structure in a separate thread, listening for events from the Structure, and yielding those events. @@ -48,14 +57,25 @@ def run(self, *args) -> Iterator[TextArtifact]: t = Thread(target=self._run_structure, args=args) t.start() + action_str = "" while True: event = self._event_queue.get() if isinstance(event, FinishStructureRunEvent): break elif isinstance(event, FinishPromptEvent): yield TextArtifact(value="\n") - elif isinstance(event, CompletionChunkEvent): + elif isinstance(event, TextChunkEvent): yield TextArtifact(value=event.token) + elif isinstance(event, ActionChunkEvent): + if event.tag is not None and event.name is not None and event.path is not None: + yield TextArtifact(f"{event.name}.{event.tag} ({event.path})") + if event.partial_input is not None: + action_str += event.partial_input + try: + yield TextArtifact(json.dumps(json.loads(action_str), indent=2)) + action_str = "" + except Exception: + pass t.join() def _run_structure(self, *args) -> None: @@ -63,8 +83,8 @@ def event_handler(event: BaseEvent) -> None: self._event_queue.put(event) stream_event_listener = EventListener( - handler=event_handler, - event_types=[CompletionChunkEvent, FinishPromptEvent, FinishStructureRunEvent], + on_event=event_handler, + event_types=[BaseChunkEvent, FinishPromptEvent, FinishStructureRunEvent], ) EventBus.add_event_listener(stream_event_listener) diff --git a/mkdocs.yml b/mkdocs.yml index 0be4ec7e5..f43b9e1f7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -89,6 +89,10 @@ nav: - Structure Config YAML: "griptape-cloud/structures/structure-config.md" - Running Your Structure: "griptape-cloud/structures/run-structure.md" - Structure Run Events: "griptape-cloud/structures/structure-run-events.md" + - Rules: + - Create a Ruleset: "griptape-cloud/rules/rulesets.md" + - Threads: + - Create a Thread: "griptape-cloud/threads/threads.md" - Cloud API: - API Reference: "griptape-cloud/api/api-reference.md" - Framework: @@ -128,6 +132,8 @@ nav: - Audio Transcription Drivers: "griptape-framework/drivers/audio-transcription-drivers.md" - Web Search Drivers: "griptape-framework/drivers/web-search-drivers.md" - Observability Drivers: "griptape-framework/drivers/observability-drivers.md" + - Ruleset Drivers: "griptape-framework/drivers/ruleset-drivers.md" + - File Manager Drivers: "griptape-framework/drivers/file-manager-drivers.md" - Data: - Overview: "griptape-framework/data/index.md" - Artifacts: "griptape-framework/data/artifacts.md" diff --git a/poetry.lock b/poetry.lock index 1ba22054f..b3d49dc6f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -161,13 +161,13 @@ files = [ [[package]] name = "anthropic" -version = "0.35.0" +version = "0.37.1" description = "The official Python library for the anthropic API" optional = true python-versions = ">=3.7" files = [ - {file = "anthropic-0.35.0-py3-none-any.whl", hash = "sha256:777983989ed9e444eb4a6d92dad84027f14a6639cba6f48772c0078d51959828"}, - {file = "anthropic-0.35.0.tar.gz", hash = "sha256:d2f998246413c309a7770d1faa617500f505377a04ab45a13a66f8559daf3742"}, + {file = "anthropic-0.37.1-py3-none-any.whl", hash = "sha256:8f550f88906823752e2abf99fbe491fbc8d40bce4cb26b9663abdf7be990d721"}, + {file = "anthropic-0.37.1.tar.gz", hash = "sha256:99f688265795daa7ba9256ee68eaf2f05d53cd99d7417f4a0c2dc292c106d00a"}, ] [package.dependencies] @@ -219,17 +219,16 @@ files = [ [[package]] name = "astrapy" -version = "1.5.0" +version = "1.5.2" description = "AstraPy is a Pythonic SDK for DataStax Astra and its Data API" optional = true python-versions = "<4.0.0,>=3.8.0" files = [ - {file = "astrapy-1.5.0-py3-none-any.whl", hash = "sha256:eb805202c976f5c3f5a6dcc2bd79f4c566e68b2c0ee25bfa3f56bf9db7b454b1"}, - {file = "astrapy-1.5.0.tar.gz", hash = "sha256:a9d75fade84f67f6fdf8d1286ed0bfb265f44c109f4f26acf50ed4883abef035"}, + {file = "astrapy-1.5.2-py3-none-any.whl", hash = "sha256:598b86de723727a11ec43e1c7fe682ecb42d63d37a94165fb08de41c20103f56"}, + {file = "astrapy-1.5.2.tar.gz", hash = "sha256:eaf703628b0d03891ae7c391ef04ff3aec1005837fdfa47c19f2ed4478c45a4a"}, ] [package.dependencies] -cassio = ">=0.1.4,<0.2.0" deprecation = ">=2.1.0,<2.2.0" httpx = {version = ">=0.25.2,<1", extras = ["http2"]} pymongo = ">=3" @@ -266,6 +265,45 @@ docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphi tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +[[package]] +name = "azure-core" +version = "1.31.0" +description = "Microsoft Azure Core Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_core-1.31.0-py3-none-any.whl", hash = "sha256:22954de3777e0250029360ef31d80448ef1be13b80a459bff80ba7073379e2cd"}, + {file = "azure_core-1.31.0.tar.gz", hash = "sha256:656a0dd61e1869b1506b7c6a3b31d62f15984b1a573d6326f6aa2f3e4123284b"}, +] + +[package.dependencies] +requests = ">=2.21.0" +six = ">=1.11.0" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["aiohttp (>=3.0)"] + +[[package]] +name = "azure-storage-blob" +version = "12.23.1" +description = "Microsoft Azure Blob Storage Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_storage_blob-12.23.1-py3-none-any.whl", hash = "sha256:1c2238aa841d1545f42714a5017c010366137a44a0605da2d45f770174bfc6b4"}, + {file = "azure_storage_blob-12.23.1.tar.gz", hash = "sha256:a587e54d4e39d2a27bd75109db164ffa2058fe194061e5446c5a89bca918272f"}, +] + +[package.dependencies] +azure-core = ">=1.30.0" +cryptography = ">=2.1.4" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["azure-core[aio] (>=1.30.0)"] + [[package]] name = "babel" version = "2.16.0" @@ -318,17 +356,17 @@ lxml = ["lxml"] [[package]] name = "boto3" -version = "1.35.34" +version = "1.35.49" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.35.34-py3-none-any.whl", hash = "sha256:291e7b97a34967ed93297e6171f1bebb8529e64633dd48426760e3fdef1cdea8"}, - {file = "boto3-1.35.34.tar.gz", hash = "sha256:57e6ee8504e7929bc094bb2afc879943906064179a1e88c23b4812e2c6f61532"}, + {file = "boto3-1.35.49-py3-none-any.whl", hash = "sha256:b660c649a27a6b47a34f6f858f5bd7c3b0a798a16dec8dda7cbebeee80fd1f60"}, + {file = "boto3-1.35.49.tar.gz", hash = "sha256:ddecb27f5699ca9f97711c52b6c0652c2e63bf6c2bfbc13b819b4f523b4d30ff"}, ] [package.dependencies] -botocore = ">=1.35.34,<1.36.0" +botocore = ">=1.35.49,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -337,13 +375,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.35.34" -description = "Type annotations for boto3 1.35.34 generated with mypy-boto3-builder 8.1.2" +version = "1.35.49" +description = "Type annotations for boto3 1.35.49 generated with mypy-boto3-builder 8.1.4" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs-1.35.34-py3-none-any.whl", hash = "sha256:6a2379d8ce47ca704690dbb058c29b8900e77e6210bf8bcebfe876640522ee1c"}, - {file = "boto3_stubs-1.35.34.tar.gz", hash = "sha256:5e9209b26901f8feba4f6bca47024ad1590f9e7e21423ce4d112928973a5e09c"}, + {file = "boto3_stubs-1.35.49-py3-none-any.whl", hash = "sha256:daad87dcff906f7c09dde4ef3c252e2c47b6e1e8e669f5a8311658ac0d1182c0"}, + {file = "boto3_stubs-1.35.49.tar.gz", hash = "sha256:2a2e08ba2383df6f478127f9754a02a590131249b40c59d7c6ca9fce76906785"}, ] [package.dependencies] @@ -365,7 +403,7 @@ accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)"] account = ["mypy-boto3-account (>=1.35.0,<1.36.0)"] acm = ["mypy-boto3-acm (>=1.35.0,<1.36.0)"] acm-pca = ["mypy-boto3-acm-pca (>=1.35.0,<1.36.0)"] -all = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)", "mypy-boto3-account (>=1.35.0,<1.36.0)", "mypy-boto3-acm (>=1.35.0,<1.36.0)", "mypy-boto3-acm-pca (>=1.35.0,<1.36.0)", "mypy-boto3-amp (>=1.35.0,<1.36.0)", "mypy-boto3-amplify (>=1.35.0,<1.36.0)", "mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)", "mypy-boto3-amplifyuibuilder (>=1.35.0,<1.36.0)", "mypy-boto3-apigateway (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewaymanagementapi (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewayv2 (>=1.35.0,<1.36.0)", "mypy-boto3-appconfig (>=1.35.0,<1.36.0)", "mypy-boto3-appconfigdata (>=1.35.0,<1.36.0)", "mypy-boto3-appfabric (>=1.35.0,<1.36.0)", "mypy-boto3-appflow (>=1.35.0,<1.36.0)", "mypy-boto3-appintegrations (>=1.35.0,<1.36.0)", "mypy-boto3-application-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-application-insights (>=1.35.0,<1.36.0)", "mypy-boto3-application-signals (>=1.35.0,<1.36.0)", "mypy-boto3-applicationcostprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-appmesh (>=1.35.0,<1.36.0)", "mypy-boto3-apprunner (>=1.35.0,<1.36.0)", "mypy-boto3-appstream (>=1.35.0,<1.36.0)", "mypy-boto3-appsync (>=1.35.0,<1.36.0)", "mypy-boto3-apptest (>=1.35.0,<1.36.0)", "mypy-boto3-arc-zonal-shift (>=1.35.0,<1.36.0)", "mypy-boto3-artifact (>=1.35.0,<1.36.0)", "mypy-boto3-athena (>=1.35.0,<1.36.0)", "mypy-boto3-auditmanager (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling-plans (>=1.35.0,<1.36.0)", "mypy-boto3-b2bi (>=1.35.0,<1.36.0)", "mypy-boto3-backup (>=1.35.0,<1.36.0)", "mypy-boto3-backup-gateway (>=1.35.0,<1.36.0)", "mypy-boto3-batch (>=1.35.0,<1.36.0)", "mypy-boto3-bcm-data-exports (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-billingconductor (>=1.35.0,<1.36.0)", "mypy-boto3-braket (>=1.35.0,<1.36.0)", "mypy-boto3-budgets (>=1.35.0,<1.36.0)", "mypy-boto3-ce (>=1.35.0,<1.36.0)", "mypy-boto3-chatbot (>=1.35.0,<1.36.0)", "mypy-boto3-chime (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-identity (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-meetings (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-messaging (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-voice (>=1.35.0,<1.36.0)", "mypy-boto3-cleanrooms (>=1.35.0,<1.36.0)", "mypy-boto3-cleanroomsml (>=1.35.0,<1.36.0)", "mypy-boto3-cloud9 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudcontrol (>=1.35.0,<1.36.0)", "mypy-boto3-clouddirectory (>=1.35.0,<1.36.0)", "mypy-boto3-cloudformation (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsm (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsmv2 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearch (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearchdomain (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail-data (>=1.35.0,<1.36.0)", "mypy-boto3-cloudwatch (>=1.35.0,<1.36.0)", "mypy-boto3-codeartifact (>=1.35.0,<1.36.0)", "mypy-boto3-codebuild (>=1.35.0,<1.36.0)", "mypy-boto3-codecatalyst (>=1.35.0,<1.36.0)", "mypy-boto3-codecommit (>=1.35.0,<1.36.0)", "mypy-boto3-codeconnections (>=1.35.0,<1.36.0)", "mypy-boto3-codedeploy (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-reviewer (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-security (>=1.35.0,<1.36.0)", "mypy-boto3-codeguruprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-codepipeline (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-connections (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-notifications (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-identity (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-idp (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-sync (>=1.35.0,<1.36.0)", "mypy-boto3-comprehend (>=1.35.0,<1.36.0)", "mypy-boto3-comprehendmedical (>=1.35.0,<1.36.0)", "mypy-boto3-compute-optimizer (>=1.35.0,<1.36.0)", "mypy-boto3-config (>=1.35.0,<1.36.0)", "mypy-boto3-connect (>=1.35.0,<1.36.0)", "mypy-boto3-connect-contact-lens (>=1.35.0,<1.36.0)", "mypy-boto3-connectcampaigns (>=1.35.0,<1.36.0)", "mypy-boto3-connectcases (>=1.35.0,<1.36.0)", "mypy-boto3-connectparticipant (>=1.35.0,<1.36.0)", "mypy-boto3-controlcatalog (>=1.35.0,<1.36.0)", "mypy-boto3-controltower (>=1.35.0,<1.36.0)", "mypy-boto3-cost-optimization-hub (>=1.35.0,<1.36.0)", "mypy-boto3-cur (>=1.35.0,<1.36.0)", "mypy-boto3-customer-profiles (>=1.35.0,<1.36.0)", "mypy-boto3-databrew (>=1.35.0,<1.36.0)", "mypy-boto3-dataexchange (>=1.35.0,<1.36.0)", "mypy-boto3-datapipeline (>=1.35.0,<1.36.0)", "mypy-boto3-datasync (>=1.35.0,<1.36.0)", "mypy-boto3-datazone (>=1.35.0,<1.36.0)", "mypy-boto3-dax (>=1.35.0,<1.36.0)", "mypy-boto3-deadline (>=1.35.0,<1.36.0)", "mypy-boto3-detective (>=1.35.0,<1.36.0)", "mypy-boto3-devicefarm (>=1.35.0,<1.36.0)", "mypy-boto3-devops-guru (>=1.35.0,<1.36.0)", "mypy-boto3-directconnect (>=1.35.0,<1.36.0)", "mypy-boto3-discovery (>=1.35.0,<1.36.0)", "mypy-boto3-dlm (>=1.35.0,<1.36.0)", "mypy-boto3-dms (>=1.35.0,<1.36.0)", "mypy-boto3-docdb (>=1.35.0,<1.36.0)", "mypy-boto3-docdb-elastic (>=1.35.0,<1.36.0)", "mypy-boto3-drs (>=1.35.0,<1.36.0)", "mypy-boto3-ds (>=1.35.0,<1.36.0)", "mypy-boto3-ds-data (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodb (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodbstreams (>=1.35.0,<1.36.0)", "mypy-boto3-ebs (>=1.35.0,<1.36.0)", "mypy-boto3-ec2 (>=1.35.0,<1.36.0)", "mypy-boto3-ec2-instance-connect (>=1.35.0,<1.36.0)", "mypy-boto3-ecr (>=1.35.0,<1.36.0)", "mypy-boto3-ecr-public (>=1.35.0,<1.36.0)", "mypy-boto3-ecs (>=1.35.0,<1.36.0)", "mypy-boto3-efs (>=1.35.0,<1.36.0)", "mypy-boto3-eks (>=1.35.0,<1.36.0)", "mypy-boto3-eks-auth (>=1.35.0,<1.36.0)", "mypy-boto3-elastic-inference (>=1.35.0,<1.36.0)", "mypy-boto3-elasticache (>=1.35.0,<1.36.0)", "mypy-boto3-elasticbeanstalk (>=1.35.0,<1.36.0)", "mypy-boto3-elastictranscoder (>=1.35.0,<1.36.0)", "mypy-boto3-elb (>=1.35.0,<1.36.0)", "mypy-boto3-elbv2 (>=1.35.0,<1.36.0)", "mypy-boto3-emr (>=1.35.0,<1.36.0)", "mypy-boto3-emr-containers (>=1.35.0,<1.36.0)", "mypy-boto3-emr-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-entityresolution (>=1.35.0,<1.36.0)", "mypy-boto3-es (>=1.35.0,<1.36.0)", "mypy-boto3-events (>=1.35.0,<1.36.0)", "mypy-boto3-evidently (>=1.35.0,<1.36.0)", "mypy-boto3-finspace (>=1.35.0,<1.36.0)", "mypy-boto3-finspace-data (>=1.35.0,<1.36.0)", "mypy-boto3-firehose (>=1.35.0,<1.36.0)", "mypy-boto3-fis (>=1.35.0,<1.36.0)", "mypy-boto3-fms (>=1.35.0,<1.36.0)", "mypy-boto3-forecast (>=1.35.0,<1.36.0)", "mypy-boto3-forecastquery (>=1.35.0,<1.36.0)", "mypy-boto3-frauddetector (>=1.35.0,<1.36.0)", "mypy-boto3-freetier (>=1.35.0,<1.36.0)", "mypy-boto3-fsx (>=1.35.0,<1.36.0)", "mypy-boto3-gamelift (>=1.35.0,<1.36.0)", "mypy-boto3-glacier (>=1.35.0,<1.36.0)", "mypy-boto3-globalaccelerator (>=1.35.0,<1.36.0)", "mypy-boto3-glue (>=1.35.0,<1.36.0)", "mypy-boto3-grafana (>=1.35.0,<1.36.0)", "mypy-boto3-greengrass (>=1.35.0,<1.36.0)", "mypy-boto3-greengrassv2 (>=1.35.0,<1.36.0)", "mypy-boto3-groundstation (>=1.35.0,<1.36.0)", "mypy-boto3-guardduty (>=1.35.0,<1.36.0)", "mypy-boto3-health (>=1.35.0,<1.36.0)", "mypy-boto3-healthlake (>=1.35.0,<1.36.0)", "mypy-boto3-iam (>=1.35.0,<1.36.0)", "mypy-boto3-identitystore (>=1.35.0,<1.36.0)", "mypy-boto3-imagebuilder (>=1.35.0,<1.36.0)", "mypy-boto3-importexport (>=1.35.0,<1.36.0)", "mypy-boto3-inspector (>=1.35.0,<1.36.0)", "mypy-boto3-inspector-scan (>=1.35.0,<1.36.0)", "mypy-boto3-inspector2 (>=1.35.0,<1.36.0)", "mypy-boto3-internetmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-iot (>=1.35.0,<1.36.0)", "mypy-boto3-iot-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot-jobs-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-devices (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-projects (>=1.35.0,<1.36.0)", "mypy-boto3-iotanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-iotdeviceadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents-data (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleethub (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleetwise (>=1.35.0,<1.36.0)", "mypy-boto3-iotsecuretunneling (>=1.35.0,<1.36.0)", "mypy-boto3-iotsitewise (>=1.35.0,<1.36.0)", "mypy-boto3-iotthingsgraph (>=1.35.0,<1.36.0)", "mypy-boto3-iottwinmaker (>=1.35.0,<1.36.0)", "mypy-boto3-iotwireless (>=1.35.0,<1.36.0)", "mypy-boto3-ivs (>=1.35.0,<1.36.0)", "mypy-boto3-ivs-realtime (>=1.35.0,<1.36.0)", "mypy-boto3-ivschat (>=1.35.0,<1.36.0)", "mypy-boto3-kafka (>=1.35.0,<1.36.0)", "mypy-boto3-kafkaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-kendra (>=1.35.0,<1.36.0)", "mypy-boto3-kendra-ranking (>=1.35.0,<1.36.0)", "mypy-boto3-keyspaces (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-archived-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-signaling (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisvideo (>=1.35.0,<1.36.0)", "mypy-boto3-kms (>=1.35.0,<1.36.0)", "mypy-boto3-lakeformation (>=1.35.0,<1.36.0)", "mypy-boto3-lambda (>=1.35.0,<1.36.0)", "mypy-boto3-launch-wizard (>=1.35.0,<1.36.0)", "mypy-boto3-lex-models (>=1.35.0,<1.36.0)", "mypy-boto3-lex-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-models (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-lightsail (>=1.35.0,<1.36.0)", "mypy-boto3-location (>=1.35.0,<1.36.0)", "mypy-boto3-logs (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutequipment (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutmetrics (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutvision (>=1.35.0,<1.36.0)", "mypy-boto3-m2 (>=1.35.0,<1.36.0)", "mypy-boto3-machinelearning (>=1.35.0,<1.36.0)", "mypy-boto3-macie2 (>=1.35.0,<1.36.0)", "mypy-boto3-mailmanager (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain-query (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-agreement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-catalog (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-deployment (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-entitlement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-reporting (>=1.35.0,<1.36.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconvert (>=1.35.0,<1.36.0)", "mypy-boto3-medialive (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage-vod (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackagev2 (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore-data (>=1.35.0,<1.36.0)", "mypy-boto3-mediatailor (>=1.35.0,<1.36.0)", "mypy-boto3-medical-imaging (>=1.35.0,<1.36.0)", "mypy-boto3-memorydb (>=1.35.0,<1.36.0)", "mypy-boto3-meteringmarketplace (>=1.35.0,<1.36.0)", "mypy-boto3-mgh (>=1.35.0,<1.36.0)", "mypy-boto3-mgn (>=1.35.0,<1.36.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhub-config (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhuborchestrator (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhubstrategy (>=1.35.0,<1.36.0)", "mypy-boto3-mq (>=1.35.0,<1.36.0)", "mypy-boto3-mturk (>=1.35.0,<1.36.0)", "mypy-boto3-mwaa (>=1.35.0,<1.36.0)", "mypy-boto3-neptune (>=1.35.0,<1.36.0)", "mypy-boto3-neptune-graph (>=1.35.0,<1.36.0)", "mypy-boto3-neptunedata (>=1.35.0,<1.36.0)", "mypy-boto3-network-firewall (>=1.35.0,<1.36.0)", "mypy-boto3-networkmanager (>=1.35.0,<1.36.0)", "mypy-boto3-networkmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-nimble (>=1.35.0,<1.36.0)", "mypy-boto3-oam (>=1.35.0,<1.36.0)", "mypy-boto3-omics (>=1.35.0,<1.36.0)", "mypy-boto3-opensearch (>=1.35.0,<1.36.0)", "mypy-boto3-opensearchserverless (>=1.35.0,<1.36.0)", "mypy-boto3-opsworks (>=1.35.0,<1.36.0)", "mypy-boto3-opsworkscm (>=1.35.0,<1.36.0)", "mypy-boto3-organizations (>=1.35.0,<1.36.0)", "mypy-boto3-osis (>=1.35.0,<1.36.0)", "mypy-boto3-outposts (>=1.35.0,<1.36.0)", "mypy-boto3-panorama (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography-data (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-ad (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-scep (>=1.35.0,<1.36.0)", "mypy-boto3-pcs (>=1.35.0,<1.36.0)", "mypy-boto3-personalize (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-events (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-pi (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-email (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.35.0,<1.36.0)", "mypy-boto3-pipes (>=1.35.0,<1.36.0)", "mypy-boto3-polly (>=1.35.0,<1.36.0)", "mypy-boto3-pricing (>=1.35.0,<1.36.0)", "mypy-boto3-privatenetworks (>=1.35.0,<1.36.0)", "mypy-boto3-proton (>=1.35.0,<1.36.0)", "mypy-boto3-qapps (>=1.35.0,<1.36.0)", "mypy-boto3-qbusiness (>=1.35.0,<1.36.0)", "mypy-boto3-qconnect (>=1.35.0,<1.36.0)", "mypy-boto3-qldb (>=1.35.0,<1.36.0)", "mypy-boto3-qldb-session (>=1.35.0,<1.36.0)", "mypy-boto3-quicksight (>=1.35.0,<1.36.0)", "mypy-boto3-ram (>=1.35.0,<1.36.0)", "mypy-boto3-rbin (>=1.35.0,<1.36.0)", "mypy-boto3-rds (>=1.35.0,<1.36.0)", "mypy-boto3-rds-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-rekognition (>=1.35.0,<1.36.0)", "mypy-boto3-repostspace (>=1.35.0,<1.36.0)", "mypy-boto3-resiliencehub (>=1.35.0,<1.36.0)", "mypy-boto3-resource-explorer-2 (>=1.35.0,<1.36.0)", "mypy-boto3-resource-groups (>=1.35.0,<1.36.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.35.0,<1.36.0)", "mypy-boto3-robomaker (>=1.35.0,<1.36.0)", "mypy-boto3-rolesanywhere (>=1.35.0,<1.36.0)", "mypy-boto3-route53 (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-cluster (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-control-config (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-readiness (>=1.35.0,<1.36.0)", "mypy-boto3-route53domains (>=1.35.0,<1.36.0)", "mypy-boto3-route53profiles (>=1.35.0,<1.36.0)", "mypy-boto3-route53resolver (>=1.35.0,<1.36.0)", "mypy-boto3-rum (>=1.35.0,<1.36.0)", "mypy-boto3-s3 (>=1.35.0,<1.36.0)", "mypy-boto3-s3control (>=1.35.0,<1.36.0)", "mypy-boto3-s3outposts (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-edge (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-geospatial (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-metrics (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-savingsplans (>=1.35.0,<1.36.0)", "mypy-boto3-scheduler (>=1.35.0,<1.36.0)", "mypy-boto3-schemas (>=1.35.0,<1.36.0)", "mypy-boto3-sdb (>=1.35.0,<1.36.0)", "mypy-boto3-secretsmanager (>=1.35.0,<1.36.0)", "mypy-boto3-securityhub (>=1.35.0,<1.36.0)", "mypy-boto3-securitylake (>=1.35.0,<1.36.0)", "mypy-boto3-serverlessrepo (>=1.35.0,<1.36.0)", "mypy-boto3-service-quotas (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog-appregistry (>=1.35.0,<1.36.0)", "mypy-boto3-servicediscovery (>=1.35.0,<1.36.0)", "mypy-boto3-ses (>=1.35.0,<1.36.0)", "mypy-boto3-sesv2 (>=1.35.0,<1.36.0)", "mypy-boto3-shield (>=1.35.0,<1.36.0)", "mypy-boto3-signer (>=1.35.0,<1.36.0)", "mypy-boto3-simspaceweaver (>=1.35.0,<1.36.0)", "mypy-boto3-sms (>=1.35.0,<1.36.0)", "mypy-boto3-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-snow-device-management (>=1.35.0,<1.36.0)", "mypy-boto3-snowball (>=1.35.0,<1.36.0)", "mypy-boto3-sns (>=1.35.0,<1.36.0)", "mypy-boto3-sqs (>=1.35.0,<1.36.0)", "mypy-boto3-ssm (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-contacts (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-incidents (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-quicksetup (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-sap (>=1.35.0,<1.36.0)", "mypy-boto3-sso (>=1.35.0,<1.36.0)", "mypy-boto3-sso-admin (>=1.35.0,<1.36.0)", "mypy-boto3-sso-oidc (>=1.35.0,<1.36.0)", "mypy-boto3-stepfunctions (>=1.35.0,<1.36.0)", "mypy-boto3-storagegateway (>=1.35.0,<1.36.0)", "mypy-boto3-sts (>=1.35.0,<1.36.0)", "mypy-boto3-supplychain (>=1.35.0,<1.36.0)", "mypy-boto3-support (>=1.35.0,<1.36.0)", "mypy-boto3-support-app (>=1.35.0,<1.36.0)", "mypy-boto3-swf (>=1.35.0,<1.36.0)", "mypy-boto3-synthetics (>=1.35.0,<1.36.0)", "mypy-boto3-taxsettings (>=1.35.0,<1.36.0)", "mypy-boto3-textract (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-influxdb (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-query (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-write (>=1.35.0,<1.36.0)", "mypy-boto3-tnb (>=1.35.0,<1.36.0)", "mypy-boto3-transcribe (>=1.35.0,<1.36.0)", "mypy-boto3-transfer (>=1.35.0,<1.36.0)", "mypy-boto3-translate (>=1.35.0,<1.36.0)", "mypy-boto3-trustedadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-verifiedpermissions (>=1.35.0,<1.36.0)", "mypy-boto3-voice-id (>=1.35.0,<1.36.0)", "mypy-boto3-vpc-lattice (>=1.35.0,<1.36.0)", "mypy-boto3-waf (>=1.35.0,<1.36.0)", "mypy-boto3-waf-regional (>=1.35.0,<1.36.0)", "mypy-boto3-wafv2 (>=1.35.0,<1.36.0)", "mypy-boto3-wellarchitected (>=1.35.0,<1.36.0)", "mypy-boto3-wisdom (>=1.35.0,<1.36.0)", "mypy-boto3-workdocs (>=1.35.0,<1.36.0)", "mypy-boto3-workmail (>=1.35.0,<1.36.0)", "mypy-boto3-workmailmessageflow (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-thin-client (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-web (>=1.35.0,<1.36.0)", "mypy-boto3-xray (>=1.35.0,<1.36.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.35.0,<1.36.0)", "mypy-boto3-account (>=1.35.0,<1.36.0)", "mypy-boto3-acm (>=1.35.0,<1.36.0)", "mypy-boto3-acm-pca (>=1.35.0,<1.36.0)", "mypy-boto3-amp (>=1.35.0,<1.36.0)", "mypy-boto3-amplify (>=1.35.0,<1.36.0)", "mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)", "mypy-boto3-amplifyuibuilder (>=1.35.0,<1.36.0)", "mypy-boto3-apigateway (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewaymanagementapi (>=1.35.0,<1.36.0)", "mypy-boto3-apigatewayv2 (>=1.35.0,<1.36.0)", "mypy-boto3-appconfig (>=1.35.0,<1.36.0)", "mypy-boto3-appconfigdata (>=1.35.0,<1.36.0)", "mypy-boto3-appfabric (>=1.35.0,<1.36.0)", "mypy-boto3-appflow (>=1.35.0,<1.36.0)", "mypy-boto3-appintegrations (>=1.35.0,<1.36.0)", "mypy-boto3-application-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-application-insights (>=1.35.0,<1.36.0)", "mypy-boto3-application-signals (>=1.35.0,<1.36.0)", "mypy-boto3-applicationcostprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-appmesh (>=1.35.0,<1.36.0)", "mypy-boto3-apprunner (>=1.35.0,<1.36.0)", "mypy-boto3-appstream (>=1.35.0,<1.36.0)", "mypy-boto3-appsync (>=1.35.0,<1.36.0)", "mypy-boto3-apptest (>=1.35.0,<1.36.0)", "mypy-boto3-arc-zonal-shift (>=1.35.0,<1.36.0)", "mypy-boto3-artifact (>=1.35.0,<1.36.0)", "mypy-boto3-athena (>=1.35.0,<1.36.0)", "mypy-boto3-auditmanager (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling (>=1.35.0,<1.36.0)", "mypy-boto3-autoscaling-plans (>=1.35.0,<1.36.0)", "mypy-boto3-b2bi (>=1.35.0,<1.36.0)", "mypy-boto3-backup (>=1.35.0,<1.36.0)", "mypy-boto3-backup-gateway (>=1.35.0,<1.36.0)", "mypy-boto3-batch (>=1.35.0,<1.36.0)", "mypy-boto3-bcm-data-exports (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-billingconductor (>=1.35.0,<1.36.0)", "mypy-boto3-braket (>=1.35.0,<1.36.0)", "mypy-boto3-budgets (>=1.35.0,<1.36.0)", "mypy-boto3-ce (>=1.35.0,<1.36.0)", "mypy-boto3-chatbot (>=1.35.0,<1.36.0)", "mypy-boto3-chime (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-identity (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-meetings (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-messaging (>=1.35.0,<1.36.0)", "mypy-boto3-chime-sdk-voice (>=1.35.0,<1.36.0)", "mypy-boto3-cleanrooms (>=1.35.0,<1.36.0)", "mypy-boto3-cleanroomsml (>=1.35.0,<1.36.0)", "mypy-boto3-cloud9 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudcontrol (>=1.35.0,<1.36.0)", "mypy-boto3-clouddirectory (>=1.35.0,<1.36.0)", "mypy-boto3-cloudformation (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront (>=1.35.0,<1.36.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsm (>=1.35.0,<1.36.0)", "mypy-boto3-cloudhsmv2 (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearch (>=1.35.0,<1.36.0)", "mypy-boto3-cloudsearchdomain (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail (>=1.35.0,<1.36.0)", "mypy-boto3-cloudtrail-data (>=1.35.0,<1.36.0)", "mypy-boto3-cloudwatch (>=1.35.0,<1.36.0)", "mypy-boto3-codeartifact (>=1.35.0,<1.36.0)", "mypy-boto3-codebuild (>=1.35.0,<1.36.0)", "mypy-boto3-codecatalyst (>=1.35.0,<1.36.0)", "mypy-boto3-codecommit (>=1.35.0,<1.36.0)", "mypy-boto3-codeconnections (>=1.35.0,<1.36.0)", "mypy-boto3-codedeploy (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-reviewer (>=1.35.0,<1.36.0)", "mypy-boto3-codeguru-security (>=1.35.0,<1.36.0)", "mypy-boto3-codeguruprofiler (>=1.35.0,<1.36.0)", "mypy-boto3-codepipeline (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-connections (>=1.35.0,<1.36.0)", "mypy-boto3-codestar-notifications (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-identity (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-idp (>=1.35.0,<1.36.0)", "mypy-boto3-cognito-sync (>=1.35.0,<1.36.0)", "mypy-boto3-comprehend (>=1.35.0,<1.36.0)", "mypy-boto3-comprehendmedical (>=1.35.0,<1.36.0)", "mypy-boto3-compute-optimizer (>=1.35.0,<1.36.0)", "mypy-boto3-config (>=1.35.0,<1.36.0)", "mypy-boto3-connect (>=1.35.0,<1.36.0)", "mypy-boto3-connect-contact-lens (>=1.35.0,<1.36.0)", "mypy-boto3-connectcampaigns (>=1.35.0,<1.36.0)", "mypy-boto3-connectcases (>=1.35.0,<1.36.0)", "mypy-boto3-connectparticipant (>=1.35.0,<1.36.0)", "mypy-boto3-controlcatalog (>=1.35.0,<1.36.0)", "mypy-boto3-controltower (>=1.35.0,<1.36.0)", "mypy-boto3-cost-optimization-hub (>=1.35.0,<1.36.0)", "mypy-boto3-cur (>=1.35.0,<1.36.0)", "mypy-boto3-customer-profiles (>=1.35.0,<1.36.0)", "mypy-boto3-databrew (>=1.35.0,<1.36.0)", "mypy-boto3-dataexchange (>=1.35.0,<1.36.0)", "mypy-boto3-datapipeline (>=1.35.0,<1.36.0)", "mypy-boto3-datasync (>=1.35.0,<1.36.0)", "mypy-boto3-datazone (>=1.35.0,<1.36.0)", "mypy-boto3-dax (>=1.35.0,<1.36.0)", "mypy-boto3-deadline (>=1.35.0,<1.36.0)", "mypy-boto3-detective (>=1.35.0,<1.36.0)", "mypy-boto3-devicefarm (>=1.35.0,<1.36.0)", "mypy-boto3-devops-guru (>=1.35.0,<1.36.0)", "mypy-boto3-directconnect (>=1.35.0,<1.36.0)", "mypy-boto3-discovery (>=1.35.0,<1.36.0)", "mypy-boto3-dlm (>=1.35.0,<1.36.0)", "mypy-boto3-dms (>=1.35.0,<1.36.0)", "mypy-boto3-docdb (>=1.35.0,<1.36.0)", "mypy-boto3-docdb-elastic (>=1.35.0,<1.36.0)", "mypy-boto3-drs (>=1.35.0,<1.36.0)", "mypy-boto3-ds (>=1.35.0,<1.36.0)", "mypy-boto3-ds-data (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodb (>=1.35.0,<1.36.0)", "mypy-boto3-dynamodbstreams (>=1.35.0,<1.36.0)", "mypy-boto3-ebs (>=1.35.0,<1.36.0)", "mypy-boto3-ec2 (>=1.35.0,<1.36.0)", "mypy-boto3-ec2-instance-connect (>=1.35.0,<1.36.0)", "mypy-boto3-ecr (>=1.35.0,<1.36.0)", "mypy-boto3-ecr-public (>=1.35.0,<1.36.0)", "mypy-boto3-ecs (>=1.35.0,<1.36.0)", "mypy-boto3-efs (>=1.35.0,<1.36.0)", "mypy-boto3-eks (>=1.35.0,<1.36.0)", "mypy-boto3-eks-auth (>=1.35.0,<1.36.0)", "mypy-boto3-elastic-inference (>=1.35.0,<1.36.0)", "mypy-boto3-elasticache (>=1.35.0,<1.36.0)", "mypy-boto3-elasticbeanstalk (>=1.35.0,<1.36.0)", "mypy-boto3-elastictranscoder (>=1.35.0,<1.36.0)", "mypy-boto3-elb (>=1.35.0,<1.36.0)", "mypy-boto3-elbv2 (>=1.35.0,<1.36.0)", "mypy-boto3-emr (>=1.35.0,<1.36.0)", "mypy-boto3-emr-containers (>=1.35.0,<1.36.0)", "mypy-boto3-emr-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-entityresolution (>=1.35.0,<1.36.0)", "mypy-boto3-es (>=1.35.0,<1.36.0)", "mypy-boto3-events (>=1.35.0,<1.36.0)", "mypy-boto3-evidently (>=1.35.0,<1.36.0)", "mypy-boto3-finspace (>=1.35.0,<1.36.0)", "mypy-boto3-finspace-data (>=1.35.0,<1.36.0)", "mypy-boto3-firehose (>=1.35.0,<1.36.0)", "mypy-boto3-fis (>=1.35.0,<1.36.0)", "mypy-boto3-fms (>=1.35.0,<1.36.0)", "mypy-boto3-forecast (>=1.35.0,<1.36.0)", "mypy-boto3-forecastquery (>=1.35.0,<1.36.0)", "mypy-boto3-frauddetector (>=1.35.0,<1.36.0)", "mypy-boto3-freetier (>=1.35.0,<1.36.0)", "mypy-boto3-fsx (>=1.35.0,<1.36.0)", "mypy-boto3-gamelift (>=1.35.0,<1.36.0)", "mypy-boto3-glacier (>=1.35.0,<1.36.0)", "mypy-boto3-globalaccelerator (>=1.35.0,<1.36.0)", "mypy-boto3-glue (>=1.35.0,<1.36.0)", "mypy-boto3-grafana (>=1.35.0,<1.36.0)", "mypy-boto3-greengrass (>=1.35.0,<1.36.0)", "mypy-boto3-greengrassv2 (>=1.35.0,<1.36.0)", "mypy-boto3-groundstation (>=1.35.0,<1.36.0)", "mypy-boto3-guardduty (>=1.35.0,<1.36.0)", "mypy-boto3-health (>=1.35.0,<1.36.0)", "mypy-boto3-healthlake (>=1.35.0,<1.36.0)", "mypy-boto3-iam (>=1.35.0,<1.36.0)", "mypy-boto3-identitystore (>=1.35.0,<1.36.0)", "mypy-boto3-imagebuilder (>=1.35.0,<1.36.0)", "mypy-boto3-importexport (>=1.35.0,<1.36.0)", "mypy-boto3-inspector (>=1.35.0,<1.36.0)", "mypy-boto3-inspector-scan (>=1.35.0,<1.36.0)", "mypy-boto3-inspector2 (>=1.35.0,<1.36.0)", "mypy-boto3-internetmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-iot (>=1.35.0,<1.36.0)", "mypy-boto3-iot-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot-jobs-data (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-devices (>=1.35.0,<1.36.0)", "mypy-boto3-iot1click-projects (>=1.35.0,<1.36.0)", "mypy-boto3-iotanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-iotdeviceadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents (>=1.35.0,<1.36.0)", "mypy-boto3-iotevents-data (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleethub (>=1.35.0,<1.36.0)", "mypy-boto3-iotfleetwise (>=1.35.0,<1.36.0)", "mypy-boto3-iotsecuretunneling (>=1.35.0,<1.36.0)", "mypy-boto3-iotsitewise (>=1.35.0,<1.36.0)", "mypy-boto3-iotthingsgraph (>=1.35.0,<1.36.0)", "mypy-boto3-iottwinmaker (>=1.35.0,<1.36.0)", "mypy-boto3-iotwireless (>=1.35.0,<1.36.0)", "mypy-boto3-ivs (>=1.35.0,<1.36.0)", "mypy-boto3-ivs-realtime (>=1.35.0,<1.36.0)", "mypy-boto3-ivschat (>=1.35.0,<1.36.0)", "mypy-boto3-kafka (>=1.35.0,<1.36.0)", "mypy-boto3-kafkaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-kendra (>=1.35.0,<1.36.0)", "mypy-boto3-kendra-ranking (>=1.35.0,<1.36.0)", "mypy-boto3-keyspaces (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-archived-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-media (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-signaling (>=1.35.0,<1.36.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.35.0,<1.36.0)", "mypy-boto3-kinesisvideo (>=1.35.0,<1.36.0)", "mypy-boto3-kms (>=1.35.0,<1.36.0)", "mypy-boto3-lakeformation (>=1.35.0,<1.36.0)", "mypy-boto3-lambda (>=1.35.0,<1.36.0)", "mypy-boto3-launch-wizard (>=1.35.0,<1.36.0)", "mypy-boto3-lex-models (>=1.35.0,<1.36.0)", "mypy-boto3-lex-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-models (>=1.35.0,<1.36.0)", "mypy-boto3-lexv2-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.35.0,<1.36.0)", "mypy-boto3-lightsail (>=1.35.0,<1.36.0)", "mypy-boto3-location (>=1.35.0,<1.36.0)", "mypy-boto3-logs (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutequipment (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutmetrics (>=1.35.0,<1.36.0)", "mypy-boto3-lookoutvision (>=1.35.0,<1.36.0)", "mypy-boto3-m2 (>=1.35.0,<1.36.0)", "mypy-boto3-machinelearning (>=1.35.0,<1.36.0)", "mypy-boto3-macie2 (>=1.35.0,<1.36.0)", "mypy-boto3-mailmanager (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain (>=1.35.0,<1.36.0)", "mypy-boto3-managedblockchain-query (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-agreement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-catalog (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-deployment (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-entitlement (>=1.35.0,<1.36.0)", "mypy-boto3-marketplace-reporting (>=1.35.0,<1.36.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconnect (>=1.35.0,<1.36.0)", "mypy-boto3-mediaconvert (>=1.35.0,<1.36.0)", "mypy-boto3-medialive (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackage-vod (>=1.35.0,<1.36.0)", "mypy-boto3-mediapackagev2 (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore (>=1.35.0,<1.36.0)", "mypy-boto3-mediastore-data (>=1.35.0,<1.36.0)", "mypy-boto3-mediatailor (>=1.35.0,<1.36.0)", "mypy-boto3-medical-imaging (>=1.35.0,<1.36.0)", "mypy-boto3-memorydb (>=1.35.0,<1.36.0)", "mypy-boto3-meteringmarketplace (>=1.35.0,<1.36.0)", "mypy-boto3-mgh (>=1.35.0,<1.36.0)", "mypy-boto3-mgn (>=1.35.0,<1.36.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhub-config (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhuborchestrator (>=1.35.0,<1.36.0)", "mypy-boto3-migrationhubstrategy (>=1.35.0,<1.36.0)", "mypy-boto3-mq (>=1.35.0,<1.36.0)", "mypy-boto3-mturk (>=1.35.0,<1.36.0)", "mypy-boto3-mwaa (>=1.35.0,<1.36.0)", "mypy-boto3-neptune (>=1.35.0,<1.36.0)", "mypy-boto3-neptune-graph (>=1.35.0,<1.36.0)", "mypy-boto3-neptunedata (>=1.35.0,<1.36.0)", "mypy-boto3-network-firewall (>=1.35.0,<1.36.0)", "mypy-boto3-networkmanager (>=1.35.0,<1.36.0)", "mypy-boto3-networkmonitor (>=1.35.0,<1.36.0)", "mypy-boto3-oam (>=1.35.0,<1.36.0)", "mypy-boto3-omics (>=1.35.0,<1.36.0)", "mypy-boto3-opensearch (>=1.35.0,<1.36.0)", "mypy-boto3-opensearchserverless (>=1.35.0,<1.36.0)", "mypy-boto3-opsworks (>=1.35.0,<1.36.0)", "mypy-boto3-opsworkscm (>=1.35.0,<1.36.0)", "mypy-boto3-organizations (>=1.35.0,<1.36.0)", "mypy-boto3-osis (>=1.35.0,<1.36.0)", "mypy-boto3-outposts (>=1.35.0,<1.36.0)", "mypy-boto3-panorama (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography (>=1.35.0,<1.36.0)", "mypy-boto3-payment-cryptography-data (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-ad (>=1.35.0,<1.36.0)", "mypy-boto3-pca-connector-scep (>=1.35.0,<1.36.0)", "mypy-boto3-pcs (>=1.35.0,<1.36.0)", "mypy-boto3-personalize (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-events (>=1.35.0,<1.36.0)", "mypy-boto3-personalize-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-pi (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-email (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.35.0,<1.36.0)", "mypy-boto3-pipes (>=1.35.0,<1.36.0)", "mypy-boto3-polly (>=1.35.0,<1.36.0)", "mypy-boto3-pricing (>=1.35.0,<1.36.0)", "mypy-boto3-privatenetworks (>=1.35.0,<1.36.0)", "mypy-boto3-proton (>=1.35.0,<1.36.0)", "mypy-boto3-qapps (>=1.35.0,<1.36.0)", "mypy-boto3-qbusiness (>=1.35.0,<1.36.0)", "mypy-boto3-qconnect (>=1.35.0,<1.36.0)", "mypy-boto3-qldb (>=1.35.0,<1.36.0)", "mypy-boto3-qldb-session (>=1.35.0,<1.36.0)", "mypy-boto3-quicksight (>=1.35.0,<1.36.0)", "mypy-boto3-ram (>=1.35.0,<1.36.0)", "mypy-boto3-rbin (>=1.35.0,<1.36.0)", "mypy-boto3-rds (>=1.35.0,<1.36.0)", "mypy-boto3-rds-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-data (>=1.35.0,<1.36.0)", "mypy-boto3-redshift-serverless (>=1.35.0,<1.36.0)", "mypy-boto3-rekognition (>=1.35.0,<1.36.0)", "mypy-boto3-repostspace (>=1.35.0,<1.36.0)", "mypy-boto3-resiliencehub (>=1.35.0,<1.36.0)", "mypy-boto3-resource-explorer-2 (>=1.35.0,<1.36.0)", "mypy-boto3-resource-groups (>=1.35.0,<1.36.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.35.0,<1.36.0)", "mypy-boto3-robomaker (>=1.35.0,<1.36.0)", "mypy-boto3-rolesanywhere (>=1.35.0,<1.36.0)", "mypy-boto3-route53 (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-cluster (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-control-config (>=1.35.0,<1.36.0)", "mypy-boto3-route53-recovery-readiness (>=1.35.0,<1.36.0)", "mypy-boto3-route53domains (>=1.35.0,<1.36.0)", "mypy-boto3-route53profiles (>=1.35.0,<1.36.0)", "mypy-boto3-route53resolver (>=1.35.0,<1.36.0)", "mypy-boto3-rum (>=1.35.0,<1.36.0)", "mypy-boto3-s3 (>=1.35.0,<1.36.0)", "mypy-boto3-s3control (>=1.35.0,<1.36.0)", "mypy-boto3-s3outposts (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-edge (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-geospatial (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-metrics (>=1.35.0,<1.36.0)", "mypy-boto3-sagemaker-runtime (>=1.35.0,<1.36.0)", "mypy-boto3-savingsplans (>=1.35.0,<1.36.0)", "mypy-boto3-scheduler (>=1.35.0,<1.36.0)", "mypy-boto3-schemas (>=1.35.0,<1.36.0)", "mypy-boto3-sdb (>=1.35.0,<1.36.0)", "mypy-boto3-secretsmanager (>=1.35.0,<1.36.0)", "mypy-boto3-securityhub (>=1.35.0,<1.36.0)", "mypy-boto3-securitylake (>=1.35.0,<1.36.0)", "mypy-boto3-serverlessrepo (>=1.35.0,<1.36.0)", "mypy-boto3-service-quotas (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog (>=1.35.0,<1.36.0)", "mypy-boto3-servicecatalog-appregistry (>=1.35.0,<1.36.0)", "mypy-boto3-servicediscovery (>=1.35.0,<1.36.0)", "mypy-boto3-ses (>=1.35.0,<1.36.0)", "mypy-boto3-sesv2 (>=1.35.0,<1.36.0)", "mypy-boto3-shield (>=1.35.0,<1.36.0)", "mypy-boto3-signer (>=1.35.0,<1.36.0)", "mypy-boto3-simspaceweaver (>=1.35.0,<1.36.0)", "mypy-boto3-sms (>=1.35.0,<1.36.0)", "mypy-boto3-sms-voice (>=1.35.0,<1.36.0)", "mypy-boto3-snow-device-management (>=1.35.0,<1.36.0)", "mypy-boto3-snowball (>=1.35.0,<1.36.0)", "mypy-boto3-sns (>=1.35.0,<1.36.0)", "mypy-boto3-socialmessaging (>=1.35.0,<1.36.0)", "mypy-boto3-sqs (>=1.35.0,<1.36.0)", "mypy-boto3-ssm (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-contacts (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-incidents (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-quicksetup (>=1.35.0,<1.36.0)", "mypy-boto3-ssm-sap (>=1.35.0,<1.36.0)", "mypy-boto3-sso (>=1.35.0,<1.36.0)", "mypy-boto3-sso-admin (>=1.35.0,<1.36.0)", "mypy-boto3-sso-oidc (>=1.35.0,<1.36.0)", "mypy-boto3-stepfunctions (>=1.35.0,<1.36.0)", "mypy-boto3-storagegateway (>=1.35.0,<1.36.0)", "mypy-boto3-sts (>=1.35.0,<1.36.0)", "mypy-boto3-supplychain (>=1.35.0,<1.36.0)", "mypy-boto3-support (>=1.35.0,<1.36.0)", "mypy-boto3-support-app (>=1.35.0,<1.36.0)", "mypy-boto3-swf (>=1.35.0,<1.36.0)", "mypy-boto3-synthetics (>=1.35.0,<1.36.0)", "mypy-boto3-taxsettings (>=1.35.0,<1.36.0)", "mypy-boto3-textract (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-influxdb (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-query (>=1.35.0,<1.36.0)", "mypy-boto3-timestream-write (>=1.35.0,<1.36.0)", "mypy-boto3-tnb (>=1.35.0,<1.36.0)", "mypy-boto3-transcribe (>=1.35.0,<1.36.0)", "mypy-boto3-transfer (>=1.35.0,<1.36.0)", "mypy-boto3-translate (>=1.35.0,<1.36.0)", "mypy-boto3-trustedadvisor (>=1.35.0,<1.36.0)", "mypy-boto3-verifiedpermissions (>=1.35.0,<1.36.0)", "mypy-boto3-voice-id (>=1.35.0,<1.36.0)", "mypy-boto3-vpc-lattice (>=1.35.0,<1.36.0)", "mypy-boto3-waf (>=1.35.0,<1.36.0)", "mypy-boto3-waf-regional (>=1.35.0,<1.36.0)", "mypy-boto3-wafv2 (>=1.35.0,<1.36.0)", "mypy-boto3-wellarchitected (>=1.35.0,<1.36.0)", "mypy-boto3-wisdom (>=1.35.0,<1.36.0)", "mypy-boto3-workdocs (>=1.35.0,<1.36.0)", "mypy-boto3-workmail (>=1.35.0,<1.36.0)", "mypy-boto3-workmailmessageflow (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-thin-client (>=1.35.0,<1.36.0)", "mypy-boto3-workspaces-web (>=1.35.0,<1.36.0)", "mypy-boto3-xray (>=1.35.0,<1.36.0)"] amp = ["mypy-boto3-amp (>=1.35.0,<1.36.0)"] amplify = ["mypy-boto3-amplify (>=1.35.0,<1.36.0)"] amplifybackend = ["mypy-boto3-amplifybackend (>=1.35.0,<1.36.0)"] @@ -403,7 +441,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.35.0,<1.36.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.35.0,<1.36.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.35.0,<1.36.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.35.0,<1.36.0)"] -boto3 = ["boto3 (==1.35.34)", "botocore (==1.35.34)"] +boto3 = ["boto3 (==1.35.49)", "botocore (==1.35.49)"] braket = ["mypy-boto3-braket (>=1.35.0,<1.36.0)"] budgets = ["mypy-boto3-budgets (>=1.35.0,<1.36.0)"] ce = ["mypy-boto3-ce (>=1.35.0,<1.36.0)"] @@ -620,7 +658,6 @@ neptunedata = ["mypy-boto3-neptunedata (>=1.35.0,<1.36.0)"] network-firewall = ["mypy-boto3-network-firewall (>=1.35.0,<1.36.0)"] networkmanager = ["mypy-boto3-networkmanager (>=1.35.0,<1.36.0)"] networkmonitor = ["mypy-boto3-networkmonitor (>=1.35.0,<1.36.0)"] -nimble = ["mypy-boto3-nimble (>=1.35.0,<1.36.0)"] oam = ["mypy-boto3-oam (>=1.35.0,<1.36.0)"] omics = ["mypy-boto3-omics (>=1.35.0,<1.36.0)"] opensearch = ["mypy-boto3-opensearch (>=1.35.0,<1.36.0)"] @@ -710,6 +747,7 @@ sms-voice = ["mypy-boto3-sms-voice (>=1.35.0,<1.36.0)"] snow-device-management = ["mypy-boto3-snow-device-management (>=1.35.0,<1.36.0)"] snowball = ["mypy-boto3-snowball (>=1.35.0,<1.36.0)"] sns = ["mypy-boto3-sns (>=1.35.0,<1.36.0)"] +socialmessaging = ["mypy-boto3-socialmessaging (>=1.35.0,<1.36.0)"] sqs = ["mypy-boto3-sqs (>=1.35.0,<1.36.0)"] ssm = ["mypy-boto3-ssm (>=1.35.0,<1.36.0)"] ssm-contacts = ["mypy-boto3-ssm-contacts (>=1.35.0,<1.36.0)"] @@ -755,13 +793,13 @@ xray = ["mypy-boto3-xray (>=1.35.0,<1.36.0)"] [[package]] name = "botocore" -version = "1.35.34" +version = "1.35.50" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.34-py3-none-any.whl", hash = "sha256:ccb0fe397b11b81c9abc0c87029d17298e17bf658d8db5c0c5a551a12a207e7a"}, - {file = "botocore-1.35.34.tar.gz", hash = "sha256:789b6501a3bb4a9591c1fe10da200cc315c1fa5df5ada19c720d8ef06439b3e3"}, + {file = "botocore-1.35.50-py3-none-any.whl", hash = "sha256:965d3b99179ac04aa98e4c4baf4a970ebce77a5e02bb2a0a21cb6304e2bc0955"}, + {file = "botocore-1.35.50.tar.gz", hash = "sha256:136ecef8d5a1088f1ba485c0bbfca40abd42b9f9fe9e11d8cde4e53b4c05b188"}, ] [package.dependencies] @@ -803,69 +841,6 @@ files = [ {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, ] -[[package]] -name = "cassandra-driver" -version = "3.29.1" -description = "DataStax Driver for Apache Cassandra" -optional = true -python-versions = "*" -files = [ - {file = "cassandra-driver-3.29.1.tar.gz", hash = "sha256:38e9c2a2f2a9664bb03f1f852d5fccaeff2163942b5db35dffcf8bf32a51cfe5"}, - {file = "cassandra_driver-3.29.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a8f175c7616a63ca48cb8bd4acc443e2a3d889964d5157cead761f23cc8db7bd"}, - {file = "cassandra_driver-3.29.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7d66398952b9cd21c40edff56e22b6d3bce765edc94b207ddb5896e7bc9aa088"}, - {file = "cassandra_driver-3.29.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bbc6f575ef109ce5d4abfa2033bf36c394032abd83e32ab671159ce68e7e17b"}, - {file = "cassandra_driver-3.29.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78f241af75696adb3e470209e2fbb498804c99e2b197d24d74774eee6784f283"}, - {file = "cassandra_driver-3.29.1-cp310-cp310-win32.whl", hash = "sha256:54d9e651a742d6ca3d874ef8d06a40fa032d2dba97142da2d36f60c5675e39f8"}, - {file = "cassandra_driver-3.29.1-cp310-cp310-win_amd64.whl", hash = "sha256:630dc5423cd40eba0ee9db31065e2238098ff1a25a6b1bd36360f85738f26e4b"}, - {file = "cassandra_driver-3.29.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b841d38c96bb878d31df393954863652d6d3a85f47bcc00fd1d70a5ea73023f"}, - {file = "cassandra_driver-3.29.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:19cc7375f673e215bd4cbbefae2de9f07830be7dabef55284a2d2ff8d8691efe"}, - {file = "cassandra_driver-3.29.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b74b355be3dcafe652fffda8f14f385ccc1a8dae9df28e6080cc660da39b45f"}, - {file = "cassandra_driver-3.29.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e6dac7eddd3f4581859f180383574068a3f113907811b4dad755a8ace4c3fbd"}, - {file = "cassandra_driver-3.29.1-cp311-cp311-win32.whl", hash = "sha256:293a79dba417112b56320ed0013d71fd7520f5fc4a5fd2ac8000c762c6dd5b07"}, - {file = "cassandra_driver-3.29.1-cp311-cp311-win_amd64.whl", hash = "sha256:7c2374fdf1099047a6c9c8329c79d71ad11e61d9cca7de92a0f49655da4bdd8a"}, - {file = "cassandra_driver-3.29.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4431a0c836f33a33c733c84997fbdb6398be005c4d18a8c8525c469fdc29393c"}, - {file = "cassandra_driver-3.29.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d23b08381b171a9e42ace483a82457edcddada9e8367e31677b97538cde2dc34"}, - {file = "cassandra_driver-3.29.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4beb29a0139e63a10a5b9a3c7b72c30a4e6e20c9f0574f9d22c0d4144fe3d348"}, - {file = "cassandra_driver-3.29.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b206423cc454a78f16b411e7cb641dddc26168ac2e18f2c13665f5f3c89868c"}, - {file = "cassandra_driver-3.29.1-cp312-cp312-win32.whl", hash = "sha256:ac898cca7303a3a2a3070513eee12ef0f1be1a0796935c5b8aa13dae8c0a7f7e"}, - {file = "cassandra_driver-3.29.1-cp312-cp312-win_amd64.whl", hash = "sha256:4ad0c9fb2229048ad6ff8c6ddbf1fdc78b111f2b061c66237c2257fcc4a31b14"}, - {file = "cassandra_driver-3.29.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4282c5deac462e4bb0f6fd0553a33d514dbd5ee99d0812594210080330ddd1a2"}, - {file = "cassandra_driver-3.29.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:41ca7eea069754002418d3bdfbd3dfd150ea12cb9db474ab1a01fa4679a05bcb"}, - {file = "cassandra_driver-3.29.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6639ccb268c4dc754bc45e03551711780d0e02cb298ab26cde1f42b7bcc74f8"}, - {file = "cassandra_driver-3.29.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a9d7d3b1be24a7f113b5404186ccccc977520401303a8fe78ba34134cad2482"}, - {file = "cassandra_driver-3.29.1-cp38-cp38-win32.whl", hash = "sha256:81c8fd556c6e1bb93577e69c1f10a3fadf7ddb93958d226ccbb72389396e9a92"}, - {file = "cassandra_driver-3.29.1-cp38-cp38-win_amd64.whl", hash = "sha256:cfe70ed0f27af949de2767ea9cef4092584e8748759374a55bf23c30746c7b23"}, - {file = "cassandra_driver-3.29.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2c03c1d834ac1a0ae39f9af297a8cd38829003ce910b08b324fb3abe488ce2b"}, - {file = "cassandra_driver-3.29.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9a3e1e2b01f3b7a5cf75c97401bce830071d99c42464352087d7475e0161af93"}, - {file = "cassandra_driver-3.29.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90c42006665a4e490b0766b70f3d637f36a30accbef2da35d6d4081c0e0bafc3"}, - {file = "cassandra_driver-3.29.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c1aca41f45772f9759e8246030907d92bc35fbbdc91525a3cb9b49939b80ad7"}, - {file = "cassandra_driver-3.29.1-cp39-cp39-win32.whl", hash = "sha256:ce4a66245d4a0c8b07fdcb6398698c2c42eb71245fb49cff39435bb702ff7be6"}, - {file = "cassandra_driver-3.29.1-cp39-cp39-win_amd64.whl", hash = "sha256:4cae69ceb1b1d9383e988a1b790115253eacf7867ceb15ed2adb736e3ce981be"}, -] - -[package.dependencies] -geomet = ">=0.1,<0.3" - -[package.extras] -cle = ["cryptography (>=35.0)"] -graph = ["gremlinpython (==3.4.6)"] - -[[package]] -name = "cassio" -version = "0.1.8" -description = "A framework-agnostic Python library to seamlessly integrate Apache Cassandra(R) with ML/LLM/genAI workloads." -optional = true -python-versions = "<4.0,>=3.8" -files = [ - {file = "cassio-0.1.8-py3-none-any.whl", hash = "sha256:c09e7c884ba7227ff5277c86f3b0f31c523672ea407f56d093c7227e69c54d94"}, - {file = "cassio-0.1.8.tar.gz", hash = "sha256:4e09929506cb3dd6fad217e89846d0a1a59069afd24b82c72526ef6f2e9271af"}, -] - -[package.dependencies] -cassandra-driver = ">=3.28.0,<4.0.0" -numpy = ">=1.0" -requests = ">=2.31.0,<3.0.0" - [[package]] name = "certifi" version = "2024.7.4" @@ -1082,17 +1057,16 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cohere" -version = "5.10.0" +version = "5.11.2" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "cohere-5.10.0-py3-none-any.whl", hash = "sha256:46e50e3e8514a99cf77b4c022c8077a6205fba948051c33087ddeb66ec706f0a"}, - {file = "cohere-5.10.0.tar.gz", hash = "sha256:21020a7ae4c30f72991ef91566a926a9d7d1485d7abeed7bfa2bd6f35ea34783"}, + {file = "cohere-5.11.2-py3-none-any.whl", hash = "sha256:310adb975817068488ba60d2d39e65b8fd28756df9a4905d5b16a69f79d78db7"}, + {file = "cohere-5.11.2.tar.gz", hash = "sha256:99498e20343947ef1e1e01165312dd2fbf40be4f9eac336f9b71efba55e7ba6e"}, ] [package.dependencies] -boto3 = ">=1.34.0,<2.0.0" fastavro = ">=1.9.4,<2.0.0" httpx = ">=0.21.2" httpx-sse = "0.4.0" @@ -1104,6 +1078,9 @@ tokenizers = ">=0.15,<1" types-requests = ">=2.0.0,<3.0.0" typing_extensions = ">=4.0.0" +[package.extras] +aws = ["boto3 (>=1.34.0,<2.0.0)", "sagemaker (>=2.232.1,<3.0.0)"] + [[package]] name = "colorama" version = "0.4.6" @@ -1325,13 +1302,13 @@ packaging = "*" [[package]] name = "diffusers" -version = "0.30.3" +version = "0.31.0" description = "State-of-the-art diffusion in PyTorch and JAX." optional = true python-versions = ">=3.8.0" files = [ - {file = "diffusers-0.30.3-py3-none-any.whl", hash = "sha256:1b70209e4d2c61223b96a7e13bc4d70869c8b0b68f54a35ce3a67fcf813edeee"}, - {file = "diffusers-0.30.3.tar.gz", hash = "sha256:67c5eb25d5b50bf0742624ef43fe0f6d1e1604f64aad3e8558469cbe89ecf72f"}, + {file = "diffusers-0.31.0-py3-none-any.whl", hash = "sha256:cbc498ae63f4abfc7c3a07649cdcbee229ef2f9a9a1f0d19c9bbaf22f8d30c1f"}, + {file = "diffusers-0.31.0.tar.gz", hash = "sha256:b1d01a73e45d43a0630c299173915dddd69fc50f2ae8f2ab5de4fd245eaed72f"}, ] [package.dependencies] @@ -1345,12 +1322,12 @@ requests = "*" safetensors = ">=0.3.1" [package.extras] -dev = ["GitPython (<3.1.19)", "Jinja2", "accelerate (>=0.31.0)", "compel (==0.1.8)", "datasets", "flax (>=0.4.1)", "hf-doc-builder (>=0.3.0)", "invisible-watermark (>=0.2.0)", "isort (>=5.5.4)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "ruff (==0.1.5)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "torch (>=1.4)", "torchvision", "transformers (>=4.41.2)", "urllib3 (<=2.0.0)"] +dev = ["GitPython (<3.1.19)", "Jinja2", "accelerate (>=0.31.0)", "compel (==0.1.8)", "datasets", "flax (>=0.4.1)", "hf-doc-builder (>=0.3.0)", "invisible-watermark (>=0.2.0)", "isort (>=5.5.4)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "ruff (==0.1.5)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "torch (>=1.4,<2.5.0)", "torchvision", "transformers (>=4.41.2)", "urllib3 (<=2.0.0)"] docs = ["hf-doc-builder (>=0.3.0)"] flax = ["flax (>=0.4.1)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)"] quality = ["hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<=2.0.0)"] test = ["GitPython (<3.1.19)", "Jinja2", "compel (==0.1.8)", "datasets", "invisible-watermark (>=0.2.0)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "torchvision", "transformers (>=4.41.2)"] -torch = ["accelerate (>=0.31.0)", "torch (>=1.4)"] +torch = ["accelerate (>=0.31.0)", "torch (>=1.4,<2.5.0)"] training = ["Jinja2", "accelerate (>=0.31.0)", "datasets", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "tensorboard"] [[package]] @@ -1430,18 +1407,18 @@ files = [ [[package]] name = "duckduckgo-search" -version = "6.3.0" +version = "6.3.2" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." optional = true python-versions = ">=3.8" files = [ - {file = "duckduckgo_search-6.3.0-py3-none-any.whl", hash = "sha256:9a231a7b325226811cf7d35a240f3f501e718ae10a1aa0a638cabc80e129dfe7"}, - {file = "duckduckgo_search-6.3.0.tar.gz", hash = "sha256:e9f56955569325a7d9cacda2488ca78bf6629a459e74415892bee560b664f5eb"}, + {file = "duckduckgo_search-6.3.2-py3-none-any.whl", hash = "sha256:cd631275292460d590d1d496995d002bf2fe6db9752713fab17b9e95924ced98"}, + {file = "duckduckgo_search-6.3.2.tar.gz", hash = "sha256:53dbf45f8749bfc67483eb9f281f2e722a5fe644d61c54ed9e551d26cb6bcbf2"}, ] [package.dependencies] click = ">=8.1.7" -primp = ">=0.6.3" +primp = ">=0.6.4" [package.extras] dev = ["mypy (>=1.11.1)", "pytest (>=8.3.1)", "pytest-asyncio (>=0.23.8)", "ruff (>=0.6.1)"] @@ -1449,13 +1426,13 @@ lxml = ["lxml (>=5.2.2)"] [[package]] name = "elevenlabs" -version = "1.9.0" +version = "1.11.0" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "elevenlabs-1.9.0-py3-none-any.whl", hash = "sha256:e8828d154085c717bc5b35c5d8a65d3421655a7670643fc596ba54dc53e17c30"}, - {file = "elevenlabs-1.9.0.tar.gz", hash = "sha256:873baad8f687b865436f2ca6d697a0d75f38796bec1cc0728c9ed589d1d846b2"}, + {file = "elevenlabs-1.11.0-py3-none-any.whl", hash = "sha256:960fd40aa27a12fac300000d8a5c2ff5e54ef71eb63969b216fd12bb18d365d7"}, + {file = "elevenlabs-1.11.0.tar.gz", hash = "sha256:0028f8bc9218adad74b40b5610159f5004e87bc7b268af9c0a361c66a34f4d63"}, ] [package.dependencies] @@ -1466,6 +1443,9 @@ requests = ">=2.20" typing_extensions = ">=4.0.0" websockets = ">=11.0" +[package.extras] +pyaudio = ["pyaudio (>=0.2.14)"] + [[package]] name = "events" version = "0.5" @@ -1478,13 +1458,13 @@ files = [ [[package]] name = "exa-py" -version = "1.4.0" +version = "1.5.0" description = "Python SDK for Exa API." optional = true python-versions = "*" files = [ - {file = "exa_py-1.4.0-py3-none-any.whl", hash = "sha256:89e425c44ee8a78e57ca831a2b1c12adfc75be0ac5c1de0c6ab49f91b0c5398b"}, - {file = "exa_py-1.4.0.tar.gz", hash = "sha256:d31e13ab290203c44f7fe66ae34c9ad3329e0d9fb7a346d96b206e1d0f647eed"}, + {file = "exa_py-1.5.0-py3-none-any.whl", hash = "sha256:6b88931cb50350c8c95302a1df262d58345f155a2d6a8b35f20965f1b4474d72"}, + {file = "exa_py-1.5.0.tar.gz", hash = "sha256:f262c72d95204015629a89dd747f6b5a0da9eb1b6bac4d3096a64985103d8e3d"}, ] [package.dependencies] @@ -1732,21 +1712,6 @@ files = [ [package.extras] speedup = ["python-levenshtein (>=0.12)"] -[[package]] -name = "geomet" -version = "0.2.1.post1" -description = "GeoJSON <-> WKT/WKB conversion utilities" -optional = true -python-versions = ">2.6, !=3.3.*, <4" -files = [ - {file = "geomet-0.2.1.post1-py3-none-any.whl", hash = "sha256:a41a1e336b381416d6cbed7f1745c848e91defaa4d4c1bdc1312732e46ffad2b"}, - {file = "geomet-0.2.1.post1.tar.gz", hash = "sha256:91d754f7c298cbfcabd3befdb69c641c27fe75e808b27aa55028605761d17e95"}, -] - -[package.dependencies] -click = "*" -six = "*" - [[package]] name = "ghp-import" version = "2.1.0" @@ -1910,69 +1875,84 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "greenlet" -version = "3.0.3" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = true python-versions = ">=3.7" files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] @@ -2261,13 +2241,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.25.1" +version = "0.26.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = true python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.25.1-py3-none-any.whl", hash = "sha256:a5158ded931b3188f54ea9028097312cb0acd50bffaaa2612014c3c526b44972"}, - {file = "huggingface_hub-0.25.1.tar.gz", hash = "sha256:9ff7cb327343211fbd06e2b149b8f362fd1e389454f3f14c6db75a4999ee20ff"}, + {file = "huggingface_hub-0.26.2-py3-none-any.whl", hash = "sha256:98c2a5a8e786c7b2cb6fdeb2740893cba4d53e312572ed3d8afafda65b128c46"}, + {file = "huggingface_hub-0.26.2.tar.gz", hash = "sha256:b100d853465d965733964d123939ba287da60a547087783ddff8a323f340332b"}, ] [package.dependencies] @@ -2280,16 +2260,16 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] -inference = ["aiohttp", "minijinja (>=1.0)"] -quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] +inference = ["aiohttp"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] @@ -2359,6 +2339,17 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "isodate" +version = "0.7.2" +description = "An ISO 8601 date/time/duration parser and formatter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15"}, + {file = "isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6"}, +] + [[package]] name = "jaraco-classes" version = "3.4.0" @@ -2582,6 +2573,26 @@ completion = ["shtab (>=1.1.0)"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] test = ["pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +[[package]] +name = "linkify-it-py" +version = "2.0.3" +description = "Links recognition library with FULL unicode support." +optional = false +python-versions = ">=3.7" +files = [ + {file = "linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048"}, + {file = "linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79"}, +] + +[package.dependencies] +uc-micro-py = "*" + +[package.extras] +benchmark = ["pytest", "pytest-benchmark"] +dev = ["black", "flake8", "isort", "pre-commit", "pyproject-flake8"] +doc = ["myst-parser", "sphinx", "sphinx-book-theme"] +test = ["coverage", "pytest", "pytest-cov"] + [[package]] name = "lxml" version = "5.3.0" @@ -2798,6 +2809,7 @@ files = [ ] [package.dependencies] +linkify-it-py = {version = ">=1,<3", optional = true, markers = "extra == \"linkify\""} mdurl = ">=0.1,<1.0" [package.extras] @@ -2914,22 +2926,22 @@ urllib3 = ">=1.26.0,<2.0.0" [[package]] name = "marshmallow" -version = "3.22.0" +version = "3.23.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, - {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, + {file = "marshmallow-3.23.0-py3-none-any.whl", hash = "sha256:82f20a2397834fe6d9611b241f2f7e7b680ed89c49f84728a1ad937be6b4bdf4"}, + {file = "marshmallow-3.23.0.tar.gz", hash = "sha256:98d8827a9f10c03d44ead298d2e99c6aea8197df18ccfad360dae7f89a50da2e"}, ] [package.dependencies] packaging = ">=17.0" [package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.1.3)", "sphinx-issues (==5.0.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "simplejson"] [[package]] name = "marshmallow-enum" @@ -2945,6 +2957,134 @@ files = [ [package.dependencies] marshmallow = ">=2.0.0" +[[package]] +name = "mdformat" +version = "0.7.18" +description = "CommonMark compliant Markdown formatter" +optional = false +python-versions = ">=3.9" +files = [ + {file = "mdformat-0.7.18-py3-none-any.whl", hash = "sha256:0060cff2a9d53a2c29a4b2be56ff90cc210d2e8506684fa482c9846166f05e22"}, + {file = "mdformat-0.7.18.tar.gz", hash = "sha256:42cba8bc5a6bb12d50bdf7c1e470c1f837a8ab8ce81571d4e53b9e62051f6e4f"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""} +markdown-it-py = ">=1.0.0,<4.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "mdformat-admon" +version = "2.0.6" +description = "An mdformat plugin for admonitions." +optional = false +python-versions = ">=3.8.5" +files = [ + {file = "mdformat_admon-2.0.6-py3-none-any.whl", hash = "sha256:2fda60659d11210d6cb07ee0df11bf68bae84f75bbd471de8b786accdb674ede"}, + {file = "mdformat_admon-2.0.6.tar.gz", hash = "sha256:009aa1c5e171cf03ee65588579e7f5310929a67712127020a0369a645339f0e8"}, +] + +[package.dependencies] +mdformat = ">=0.7.17" +mdit-py-plugins = ">=0.4.1" + +[package.extras] +dev = ["pre-commit"] +test = ["pytest (>=7.4.4)", "pytest-beartype (>=0.0.2)", "pytest-cov (>=4.1.0)"] + +[[package]] +name = "mdformat-footnote" +version = "0.1.1" +description = "An mdformat plugin for parsing/validating footnotes" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdformat_footnote-0.1.1-py3-none-any.whl", hash = "sha256:30063aaa0f74c36257c2e80fa0cf00d7c71a5277f27e98109e8765ae8678a95b"}, + {file = "mdformat_footnote-0.1.1.tar.gz", hash = "sha256:3b85c4c84119f15f0b651df89c99a4f6f119fc46dca6b33f7edf4f09655d1126"}, +] + +[package.dependencies] +mdformat = ">=0.7.8,<0.8.0" +mdit-py-plugins = "*" + +[package.extras] +dev = ["pre-commit"] +test = ["coverage", "pytest (>=6.0,<7.0)", "pytest-cov"] + +[[package]] +name = "mdformat-frontmatter" +version = "2.0.8" +description = "An mdformat plugin for parsing / ignoring frontmatter." +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdformat_frontmatter-2.0.8-py3-none-any.whl", hash = "sha256:577396695af96ad66dff1ff781284ff3764a10be3ab8659f2ef842ab42264ebb"}, + {file = "mdformat_frontmatter-2.0.8.tar.gz", hash = "sha256:c11190ae3f9c91ada78fbd820f5b221631b520484e0b644715aa0f6ed7f097ed"}, +] + +[package.dependencies] +mdformat = ">=0.7.16,<0.8.0" +mdit-py-plugins = ">=0.4.0" +"ruamel.yaml" = "*" + +[package.extras] +dev = ["flit (>=3.9)", "pre-commit", "python-semantic-release (>=8.3.0)"] +test = ["coverage", "pytest (>=7.3)", "pytest-cov"] + +[[package]] +name = "mdformat-gfm" +version = "0.3.7" +description = "Mdformat plugin for GitHub Flavored Markdown compatibility" +optional = false +python-versions = ">=3.9" +files = [ + {file = "mdformat_gfm-0.3.7-py3-none-any.whl", hash = "sha256:c40966ef26e334226961ab77908dc9697ed63668f6383a18c80cca1cb4bb5c10"}, + {file = "mdformat_gfm-0.3.7.tar.gz", hash = "sha256:7deb2cd1d5334541af5454e52e116639796fc441ddc08e4415f967955950fe10"}, +] + +[package.dependencies] +markdown-it-py = {version = "*", extras = ["linkify"]} +mdformat = ">=0.7.5,<0.8.0" +mdformat-tables = ">=0.4.0" +mdit-py-plugins = ">=0.2.0" + +[[package]] +name = "mdformat-tables" +version = "1.0.0" +description = "An mdformat plugin for rendering tables." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "mdformat_tables-1.0.0-py3-none-any.whl", hash = "sha256:94cd86126141b2adc3b04c08d1441eb1272b36c39146bab078249a41c7240a9a"}, + {file = "mdformat_tables-1.0.0.tar.gz", hash = "sha256:a57db1ac17c4a125da794ef45539904bb8a9592e80557d525e1f169c96daa2c8"}, +] + +[package.dependencies] +mdformat = ">=0.7.5,<0.8.0" +wcwidth = ">=0.2.13" + +[package.extras] +test = ["coverage", "pytest (>=6.0,<7.0)", "pytest-cov"] + +[[package]] +name = "mdit-py-plugins" +version = "0.4.2" +description = "Collection of plugins for markdown-it-py" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636"}, + {file = "mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5"}, +] + +[package.dependencies] +markdown-it-py = ">=1.0.0,<4.0.0" + +[package.extras] +code-style = ["pre-commit"] +rtd = ["myst-parser", "sphinx-book-theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "mdurl" version = "0.1.2" @@ -3072,13 +3212,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.39" +version = "9.5.42" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.39-py3-none-any.whl", hash = "sha256:0f2f68c8db89523cb4a59705cd01b4acd62b2f71218ccb67e1e004e560410d2b"}, - {file = "mkdocs_material-9.5.39.tar.gz", hash = "sha256:25faa06142afa38549d2b781d475a86fb61de93189f532b88e69bf11e5e5c3be"}, + {file = "mkdocs_material-9.5.42-py3-none-any.whl", hash = "sha256:452a7c5d21284b373f36b981a2cbebfff59263feebeede1bc28652e9c5bbe316"}, + {file = "mkdocs_material-9.5.42.tar.gz", hash = "sha256:92779b5e9b5934540c574c11647131d217dc540dce72b05feeda088c8eb1b8f2"}, ] [package.dependencies] @@ -3126,13 +3266,13 @@ mkdocs = ">=1.2" [[package]] name = "mkdocstrings" -version = "0.26.1" +version = "0.26.2" description = "Automatic documentation from sources, for MkDocs." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "mkdocstrings-0.26.1-py3-none-any.whl", hash = "sha256:29738bfb72b4608e8e55cc50fb8a54f325dc7ebd2014e4e3881a49892d5983cf"}, - {file = "mkdocstrings-0.26.1.tar.gz", hash = "sha256:bb8b8854d6713d5348ad05b069a09f3b79edbc6a0f33a34c6821141adb03fe33"}, + {file = "mkdocstrings-0.26.2-py3-none-any.whl", hash = "sha256:1248f3228464f3b8d1a15bd91249ce1701fe3104ac517a5f167a0e01ca850ba5"}, + {file = "mkdocstrings-0.26.2.tar.gz", hash = "sha256:34a8b50f1e6cfd29546c6c09fbe02154adfb0b361bb758834bf56aa284ba876e"}, ] [package.dependencies] @@ -3201,13 +3341,13 @@ files = [ [[package]] name = "moto" -version = "5.0.16" +version = "5.0.18" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "moto-5.0.16-py2.py3-none-any.whl", hash = "sha256:4ce1f34830307f7b3d553d77a7ef26066ab3b70006203d4226b048c9d11a3be4"}, - {file = "moto-5.0.16.tar.gz", hash = "sha256:f4afb176a964cd7a70da9bc5e053d43109614ce3cab26044bcbb53610435dff4"}, + {file = "moto-5.0.18-py2.py3-none-any.whl", hash = "sha256:8e25401f7d7910e19a732b417e0d503ef86cf4de9114a273dd62679a42f3be1c"}, + {file = "moto-5.0.18.tar.gz", hash = "sha256:8a7ad2f53a2e6cc9db2ff65c0e0d4b5d7e78bc00b825c9e1ff6cc394371e76e9"}, ] [package.dependencies] @@ -3225,7 +3365,7 @@ werkzeug = ">=0.5,<2.2.0 || >2.2.0,<2.2.1 || >2.2.1" xmltodict = "*" [package.extras] -all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)", "setuptools"] +all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "jsonschema", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)", "setuptools"] apigateway = ["PyYAML (>=5.1)", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)"] apigatewayv2 = ["PyYAML (>=5.1)", "openapi-spec-validator (>=0.5.0)"] appsync = ["graphql-core"] @@ -3239,6 +3379,7 @@ events = ["jsonpath-ng"] glue = ["pyparsing (>=3.0.7)"] iotdata = ["jsondiff (>=1.1.2)"] proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)", "setuptools"] +quicksight = ["jsonschema"] resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.5.6)", "pyparsing (>=3.0.7)"] s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.5.6)"] s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.5.6)"] @@ -3606,46 +3747,50 @@ files = [ [[package]] name = "nvidia-cublas-cu12" -version = "12.1.3.1" +version = "12.4.5.8" description = "CUBLAS native runtime libraries" optional = false python-versions = ">=3" files = [ - {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, - {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, + {file = "nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3"}, + {file = "nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b"}, + {file = "nvidia_cublas_cu12-12.4.5.8-py3-none-win_amd64.whl", hash = "sha256:5a796786da89203a0657eda402bcdcec6180254a8ac22d72213abc42069522dc"}, ] [[package]] name = "nvidia-cuda-cupti-cu12" -version = "12.1.105" +version = "12.4.127" description = "CUDA profiling tools runtime libs." optional = false python-versions = ">=3" files = [ - {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, - {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, + {file = "nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a"}, + {file = "nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb"}, + {file = "nvidia_cuda_cupti_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:5688d203301ab051449a2b1cb6690fbe90d2b372f411521c86018b950f3d7922"}, ] [[package]] name = "nvidia-cuda-nvrtc-cu12" -version = "12.1.105" +version = "12.4.127" description = "NVRTC native runtime libraries" optional = false python-versions = ">=3" files = [ - {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, - {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, + {file = "nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198"}, + {file = "nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338"}, + {file = "nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:a961b2f1d5f17b14867c619ceb99ef6fcec12e46612711bcec78eb05068a60ec"}, ] [[package]] name = "nvidia-cuda-runtime-cu12" -version = "12.1.105" +version = "12.4.127" description = "CUDA Runtime native Libraries" optional = false python-versions = ">=3" files = [ - {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, - {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, + {file = "nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3"}, + {file = "nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5"}, + {file = "nvidia_cuda_runtime_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:09c2e35f48359752dfa822c09918211844a3d93c100a715d79b59591130c5e1e"}, ] [[package]] @@ -3664,35 +3809,41 @@ nvidia-cublas-cu12 = "*" [[package]] name = "nvidia-cufft-cu12" -version = "11.0.2.54" +version = "11.2.1.3" description = "CUFFT native runtime libraries" optional = false python-versions = ">=3" files = [ - {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, - {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, + {file = "nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399"}, + {file = "nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9"}, + {file = "nvidia_cufft_cu12-11.2.1.3-py3-none-win_amd64.whl", hash = "sha256:d802f4954291101186078ccbe22fc285a902136f974d369540fd4a5333d1440b"}, ] +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + [[package]] name = "nvidia-curand-cu12" -version = "10.3.2.106" +version = "10.3.5.147" description = "CURAND native runtime libraries" optional = false python-versions = ">=3" files = [ - {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, - {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, + {file = "nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9"}, + {file = "nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b"}, + {file = "nvidia_curand_cu12-10.3.5.147-py3-none-win_amd64.whl", hash = "sha256:f307cc191f96efe9e8f05a87096abc20d08845a841889ef78cb06924437f6771"}, ] [[package]] name = "nvidia-cusolver-cu12" -version = "11.4.5.107" +version = "11.6.1.9" description = "CUDA solver native runtime libraries" optional = false python-versions = ">=3" files = [ - {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, - {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, + {file = "nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e"}, + {file = "nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260"}, + {file = "nvidia_cusolver_cu12-11.6.1.9-py3-none-win_amd64.whl", hash = "sha256:e77314c9d7b694fcebc84f58989f3aa4fb4cb442f12ca1a9bde50f5e8f6d1b9c"}, ] [package.dependencies] @@ -3702,13 +3853,14 @@ nvidia-nvjitlink-cu12 = "*" [[package]] name = "nvidia-cusparse-cu12" -version = "12.1.0.106" +version = "12.3.1.170" description = "CUSPARSE native runtime libraries" optional = false python-versions = ">=3" files = [ - {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, - {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, + {file = "nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3"}, + {file = "nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1"}, + {file = "nvidia_cusparse_cu12-12.3.1.170-py3-none-win_amd64.whl", hash = "sha256:9bc90fb087bc7b4c15641521f31c0371e9a612fc2ba12c338d3ae032e6b6797f"}, ] [package.dependencies] @@ -3716,36 +3868,36 @@ nvidia-nvjitlink-cu12 = "*" [[package]] name = "nvidia-nccl-cu12" -version = "2.20.5" +version = "2.21.5" description = "NVIDIA Collective Communication Library (NCCL) Runtime" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, - {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, + {file = "nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:8579076d30a8c24988834445f8d633c697d42397e92ffc3f63fa26766d25e0a0"}, ] [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.6.77" +version = "12.4.127" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:3bf10d85bb1801e9c894c6e197e44dd137d2a0a9e43f8450e9ad13f2df0dd52d"}, - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9ae346d16203ae4ea513be416495167a0101d33d2d14935aa9c1829a3fb45142"}, - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:410718cd44962bed862a31dd0318620f6f9a8b28a6291967bcfcb446a6516771"}, + {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83"}, + {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57"}, + {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1"}, ] [[package]] name = "nvidia-nvtx-cu12" -version = "12.1.105" +version = "12.4.127" description = "NVIDIA Tools Extension" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, - {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, + {file = "nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3"}, + {file = "nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a"}, + {file = "nvidia_nvtx_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:641dccaaa1139f3ffb0d3164b4b84f9d253397e38246a4f2f36728b48566d485"}, ] [[package]] @@ -3764,13 +3916,13 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "openai" -version = "1.51.1" +version = "1.52.2" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.51.1-py3-none-any.whl", hash = "sha256:035ba637bef7523282b5b8d9f2f5fdc0bb5bc18d52af2bfc7f64e4a7b0a169fb"}, - {file = "openai-1.51.1.tar.gz", hash = "sha256:a4908d68e0a1f4bcb45cbaf273c5fbdc3a4fa6239bb75128b58b94f7d5411563"}, + {file = "openai-1.52.2-py3-none-any.whl", hash = "sha256:57e9e37bc407f39bb6ec3a27d7e8fb9728b2779936daa1fcf95df17d3edfaccc"}, + {file = "openai-1.52.2.tar.gz", hash = "sha256:87b7d0f69d85f5641678d414b7ee3082363647a5c66a462ed7f3ccb59582da0d"}, ] [package.dependencies] @@ -4035,13 +4187,13 @@ files = [ [[package]] name = "pgvector" -version = "0.3.5" +version = "0.3.6" description = "pgvector support for Python" optional = true python-versions = ">=3.8" files = [ - {file = "pgvector-0.3.5-py3-none-any.whl", hash = "sha256:56cca90392e596ea18873c593ec858a1984a77d16d1f82b8d0c180e79ef1018f"}, - {file = "pgvector-0.3.5.tar.gz", hash = "sha256:e876c9ee382c4c2f7ee57691a4c4015d688c7222e47448ce310ded03ecfafe2f"}, + {file = "pgvector-0.3.6-py3-none-any.whl", hash = "sha256:f6c269b3c110ccb7496bac87202148ed18f34b390a0189c783e351062400a75a"}, + {file = "pgvector-0.3.6.tar.gz", hash = "sha256:31d01690e6ea26cea8a633cde5f0f55f5b246d9c8292d68efdef8c22ec994ade"}, ] [package.dependencies] @@ -4049,95 +4201,90 @@ numpy = "*" [[package]] name = "pillow" -version = "10.4.0" +version = "11.0.0" description = "Python Imaging Library (Fork)" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, - {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, - {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, - {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, - {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, - {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, - {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, - {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, - {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, - {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, - {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, - {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, - {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, - {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, - {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, - {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, - {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, - {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, - {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, - {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, + {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, + {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, + {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, + {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, + {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, + {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, + {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, + {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, + {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, + {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, + {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, + {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, + {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, + {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, + {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, + {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, + {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, + {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, + {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, + {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, + {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, + {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] @@ -4199,22 +4346,22 @@ type = ["mypy (>=1.8)"] [[package]] name = "playwright" -version = "1.47.0" +version = "1.48.0" description = "A high-level API to automate web browsers" optional = true python-versions = ">=3.8" files = [ - {file = "playwright-1.47.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:f205df24edb925db1a4ab62f1ab0da06f14bb69e382efecfb0deedc4c7f4b8cd"}, - {file = "playwright-1.47.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7fc820faf6885f69a52ba4ec94124e575d3c4a4003bf29200029b4a4f2b2d0ab"}, - {file = "playwright-1.47.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:8e212dc472ff19c7d46ed7e900191c7a786ce697556ac3f1615986ec3aa00341"}, - {file = "playwright-1.47.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:a1935672531963e4b2a321de5aa59b982fb92463ee6e1032dd7326378e462955"}, - {file = "playwright-1.47.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0a1b61473d6f7f39c5d77d4800b3cbefecb03344c90b98f3fbcae63294ad249"}, - {file = "playwright-1.47.0-py3-none-win32.whl", hash = "sha256:1b977ed81f6bba5582617684a21adab9bad5676d90a357ebf892db7bdf4a9974"}, - {file = "playwright-1.47.0-py3-none-win_amd64.whl", hash = "sha256:0ec1056042d2e86088795a503347407570bffa32cbe20748e5d4c93dba085280"}, + {file = "playwright-1.48.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:082bce2739f1078acc7d0734da8cc0e23eb91b7fae553f3316d733276f09a6b1"}, + {file = "playwright-1.48.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7da2eb51a19c7f3b523e9faa9d98e7af92e52eb983a099979ea79c9668e3cbf7"}, + {file = "playwright-1.48.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:115b988d1da322358b77bc3bf2d3cc90f8c881e691461538e7df91614c4833c9"}, + {file = "playwright-1.48.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:8dabb80e62f667fe2640a8b694e26a7b884c0b4803f7514a3954fc849126227b"}, + {file = "playwright-1.48.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ff8303409ebed76bed4c3d655340320b768817d900ba208b394fdd7d7939a5c"}, + {file = "playwright-1.48.0-py3-none-win32.whl", hash = "sha256:85598c360c590076d4f435525be991246d74a905b654ac19d26eab7ed9b98b2d"}, + {file = "playwright-1.48.0-py3-none-win_amd64.whl", hash = "sha256:e0e87b0c4dc8fce83c725dd851aec37bc4e882bb225ec8a96bd83cf32d4f1623"}, ] [package.dependencies] -greenlet = "3.0.3" +greenlet = "3.1.1" pyee = "12.0.0" [[package]] @@ -4264,13 +4411,13 @@ files = [ [[package]] name = "pre-commit" -version = "4.0.0" +version = "4.0.1" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" files = [ - {file = "pre_commit-4.0.0-py2.py3-none-any.whl", hash = "sha256:0ca2341cf94ac1865350970951e54b1a50521e57b7b500403307aed4315a1234"}, - {file = "pre_commit-4.0.0.tar.gz", hash = "sha256:5d9807162cc5537940f94f266cbe2d716a75cfad0d78a317a92cac16287cfed6"}, + {file = "pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878"}, + {file = "pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2"}, ] [package.dependencies] @@ -4282,19 +4429,19 @@ virtualenv = ">=20.10.0" [[package]] name = "primp" -version = "0.6.3" +version = "0.6.5" description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" optional = true python-versions = ">=3.8" files = [ - {file = "primp-0.6.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bdbe6a7cdaaf5c9ed863432a941f4a75bd4c6ff626cbc8d32fc232793c70ba06"}, - {file = "primp-0.6.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:eeb53eb987bdcbcd85740633470255cab887d921df713ffa12a36a13366c9cdb"}, - {file = "primp-0.6.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78da53d3c92a8e3f05bd3286ac76c291f1b6fe5e08ea63b7ba92b0f9141800bb"}, - {file = "primp-0.6.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:86337b44deecdac752bd8112909987fc9fa9b894f30191c80a164dc8f895da53"}, - {file = "primp-0.6.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d3cd9a22b97f3eae42b2a5fb99f00480daf4cd6d9b139e05b0ffb03f7cc037f3"}, - {file = "primp-0.6.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7732bec917e2d3c48a31cdb92e1250f4ad6203a1aa4f802bd9abd84f2286a1e0"}, - {file = "primp-0.6.3-cp38-abi3-win_amd64.whl", hash = "sha256:1e4113c34b86c676ae321af185f03a372caef3ee009f1682c2d62e30ec87348c"}, - {file = "primp-0.6.3.tar.gz", hash = "sha256:17d30ebe26864defad5232dbbe1372e80483940012356e1f68846bb182282039"}, + {file = "primp-0.6.5-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b2bab0250d38c02a437c75ed94b99e3a8c03a281ba9a4c33780ccd04999c741b"}, + {file = "primp-0.6.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:0aedb33515d86df4c1f91b9d5772e1b74d1593dfe8978c258b136c171f8ab94c"}, + {file = "primp-0.6.5-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8850be30fbfefeb76c1eb5859a55c5f11c8c285a4a03ebf99c73fea964b2a"}, + {file = "primp-0.6.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9b71ac07a79cbb401390e2ee5a5767d0bf202a956a533fd084957020fcb2a64"}, + {file = "primp-0.6.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:79c65fcb07b36bd0f8c3966a4a18c4f6a6d624a33a0b0133b0f0cc8d0050c351"}, + {file = "primp-0.6.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5a55e450bb52a88f4a2891db50577c8f20b134d17d37e93361ee51de1a6fe8c8"}, + {file = "primp-0.6.5-cp38-abi3-win_amd64.whl", hash = "sha256:cbe584de5c177b9f0656b77e88721296ae6151b6c4565e2e0a342b6473990f27"}, + {file = "primp-0.6.5.tar.gz", hash = "sha256:abb46c579ae682f34c1f339faac38709c85ab76c056ec3711a26823334ab8124"}, ] [package.extras] @@ -4339,83 +4486,78 @@ files = [ [[package]] name = "psycopg2-binary" -version = "2.9.9" +version = "2.9.10" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-win32.whl", hash = "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682"}, - {file = "psycopg2_binary-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, - {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692"}, - {file = "psycopg2_binary-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-win32.whl", hash = "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5"}, - {file = "psycopg2_binary-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-win32.whl", hash = "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90"}, - {file = "psycopg2_binary-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957"}, + {file = "psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:3e9c76f0ac6f92ecfc79516a8034a544926430f7b080ec5a0537bca389ee0906"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad26b467a405c798aaa1458ba09d7e2b6e5f96b1ce0ac15d82fd9f95dc38a92"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:270934a475a0e4b6925b5f804e3809dd5f90f8613621d062848dd82f9cd62007"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48b338f08d93e7be4ab2b5f1dbe69dc5e9ef07170fe1f86514422076d9c010d0"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4152f8f76d2023aac16285576a9ecd2b11a9895373a1f10fd9db54b3ff06b4"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32581b3020c72d7a421009ee1c6bf4a131ef5f0a968fab2e2de0c9d2bb4577f1"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2ce3e21dc3437b1d960521eca599d57408a695a0d3c26797ea0f72e834c7ffe5"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e984839e75e0b60cfe75e351db53d6db750b00de45644c5d1f7ee5d1f34a1ce5"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c4745a90b78e51d9ba06e2088a2fe0c693ae19cc8cb051ccda44e8df8a6eb53"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-win32.whl", hash = "sha256:e5720a5d25e3b99cd0dc5c8a440570469ff82659bb09431c1439b92caf184d3b"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:3c18f74eb4386bf35e92ab2354a12c17e5eb4d9798e4c0ad3a00783eae7cd9f1"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:04392983d0bb89a8717772a193cfaac58871321e3ec69514e1c4e0d4957b5aff"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1a6784f0ce3fec4edc64e985865c17778514325074adf5ad8f80636cd029ef7c"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5f86c56eeb91dc3135b3fd8a95dc7ae14c538a2f3ad77a19645cf55bab1799c"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b3d2491d4d78b6b14f76881905c7a8a8abcf974aad4a8a0b065273a0ed7a2cb"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2286791ececda3a723d1910441c793be44625d86d1a4e79942751197f4d30341"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:512d29bb12608891e349af6a0cccedce51677725a921c07dba6342beaf576f9a"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5a507320c58903967ef7384355a4da7ff3f28132d679aeb23572753cbf2ec10b"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d4fa1079cab9018f4d0bd2db307beaa612b0d13ba73b5c6304b9fe2fb441ff7"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:851485a42dbb0bdc1edcdabdb8557c09c9655dfa2ca0460ff210522e073e319e"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:35958ec9e46432d9076286dda67942ed6d968b9c3a6a2fd62b48939d1d78bf68"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-win32.whl", hash = "sha256:ecced182e935529727401b24d76634a357c71c9275b356efafd8a2a91ec07392"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:ee0e8c683a7ff25d23b55b11161c2663d4b099770f6085ff0a20d4505778d6b4"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:880845dfe1f85d9d5f7c412efea7a08946a46894537e4e5d091732eb1d34d9a0"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9440fa522a79356aaa482aa4ba500b65f28e5d0e63b801abf6aa152a29bd842a"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3923c1d9870c49a2d44f795df0c889a22380d36ef92440ff618ec315757e539"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b2c956c028ea5de47ff3a8d6b3cc3330ab45cf0b7c3da35a2d6ff8420896526"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f758ed67cab30b9a8d2833609513ce4d3bd027641673d4ebc9c067e4d208eec1"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd9b4f2cfab88ed4a9106192de509464b75a906462fb846b936eabe45c2063e"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dc08420625b5a20b53551c50deae6e231e6371194fa0651dbe0fb206452ae1f"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7cd730dfa7c36dbe8724426bf5612798734bff2d3c3857f36f2733f5bfc7c00"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:155e69561d54d02b3c3209545fb08938e27889ff5a10c19de8d23eb5a41be8a5"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3cc28a6fd5a4a26224007712e79b81dbaee2ffb90ff406256158ec4d7b52b47"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-win32.whl", hash = "sha256:ec8a77f521a17506a24a5f626cb2aee7850f9b69a0afe704586f63a464f3cd64"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:18c5ee682b9c6dd3696dad6e54cc7ff3a1a9020df6a5c0f861ef8bfd338c3ca0"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:26540d4a9a4e2b096f1ff9cce51253d0504dca5a85872c7f7be23be5a53eb18d"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e217ce4d37667df0bc1c397fdcd8de5e81018ef305aed9415c3b093faaeb10fb"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:245159e7ab20a71d989da00f280ca57da7641fa2cdcf71749c193cea540a74f7"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c4ded1a24b20021ebe677b7b08ad10bf09aac197d6943bfe6fec70ac4e4690d"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3abb691ff9e57d4a93355f60d4f4c1dd2d68326c968e7db17ea96df3c023ef73"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8608c078134f0b3cbd9f89b34bd60a943b23fd33cc5f065e8d5f840061bd0673"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:230eeae2d71594103cd5b93fd29d1ace6420d0b86f4778739cb1a5a32f607d1f"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:056470c3dc57904bbf63d6f534988bafc4e970ffd50f6271fc4ee7daad9498a5"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aa0e31fa4bb82578f3a6c74a73c273367727de397a7a0f07bd83cbea696baa"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8de718c0e1c4b982a54b41779667242bc630b2197948405b7bd8ce16bcecac92"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5c370b1e4975df846b0277b4deba86419ca77dbc25047f535b0bb03d1a544d44"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ffe8ed017e4ed70f68b7b371d84b7d4a790368db9203dfc2d222febd3a9c8863"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8aecc5e80c63f7459a1a2ab2c64df952051df196294d9f739933a9f6687e86b3"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:7a813c8bdbaaaab1f078014b9b0b13f5de757e2b5d9be6403639b298a04d218b"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00924255d7fc916ef66e4bf22f354a940c67179ad3fd7067d7a0a9c84d2fbfc"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7559bce4b505762d737172556a4e6ea8a9998ecac1e39b5233465093e8cee697"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8b58f0a96e7a1e341fc894f62c1177a7c83febebb5ff9123b579418fdc8a481"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b269105e59ac96aba877c1707c600ae55711d9dcd3fc4b5012e4af68e30c648"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:79625966e176dc97ddabc142351e0409e28acf4660b88d1cf6adb876d20c490d"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8aabf1c1a04584c168984ac678a668094d831f152859d06e055288fa515e4d30"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:19721ac03892001ee8fdd11507e6a2e01f4e37014def96379411ca99d78aeb2c"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7f5d859928e635fa3ce3477704acee0f667b3a3d3e4bb109f2b18d4005f38287"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-win32.whl", hash = "sha256:3216ccf953b3f267691c90c6fe742e45d890d8272326b4a8b20850a03d05b7b8"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:30e34c4e97964805f715206c7b789d54a78b70f3ff19fbe590104b71c45600e5"}, ] [[package]] @@ -4822,34 +4964,35 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pypdf" -version = "5.0.1" +version = "5.1.0" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" optional = true python-versions = ">=3.8" files = [ - {file = "pypdf-5.0.1-py3-none-any.whl", hash = "sha256:ff8a32da6c7a63fea9c32fa4dd837cdd0db7966adf6c14f043e3f12592e992db"}, - {file = "pypdf-5.0.1.tar.gz", hash = "sha256:a361c3c372b4a659f9c8dd438d5ce29a753c79c620dc6e1fd66977651f5547ea"}, + {file = "pypdf-5.1.0-py3-none-any.whl", hash = "sha256:3bd4f503f4ebc58bae40d81e81a9176c400cbbac2ba2d877367595fb524dfdfc"}, + {file = "pypdf-5.1.0.tar.gz", hash = "sha256:425a129abb1614183fd1aca6982f650b47f8026867c0ce7c4b9f281c443d2740"}, ] [package.dependencies] typing_extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] -crypto = ["PyCryptodome", "cryptography"] +crypto = ["cryptography"] +cryptodome = ["PyCryptodome"] dev = ["black", "flit", "pip-tools", "pre-commit (<2.18.0)", "pytest-cov", "pytest-socket", "pytest-timeout", "pytest-xdist", "wheel"] docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"] -full = ["Pillow (>=8.0.0)", "PyCryptodome", "cryptography"] +full = ["Pillow (>=8.0.0)", "cryptography"] image = ["Pillow (>=8.0.0)"] [[package]] name = "pyright" -version = "1.1.383" +version = "1.1.386" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.383-py3-none-any.whl", hash = "sha256:d864d1182a313f45aaf99e9bfc7d2668eeabc99b29a556b5344894fd73cb1959"}, - {file = "pyright-1.1.383.tar.gz", hash = "sha256:1df7f12407f3710c9c6df938d98ec53f70053e6c6bbf71ce7bcb038d42f10070"}, + {file = "pyright-1.1.386-py3-none-any.whl", hash = "sha256:7071ac495593b2258ccdbbf495f1a5c0e5f27951f6b429bed4e8b296eb5cd21d"}, + {file = "pyright-1.1.386.tar.gz", hash = "sha256:8e9975e34948ba5f8e07792a9c9d2bdceb2c6c0b61742b068d2229ca2bc4a9d9"}, ] [package.dependencies] @@ -5108,13 +5251,13 @@ pyyaml = "*" [[package]] name = "qdrant-client" -version = "1.11.3" +version = "1.12.0" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.11.3-py3-none-any.whl", hash = "sha256:fcf040b58203ed0827608c9ad957da671b1e31bf27e5e35b322c1b577b6ec133"}, - {file = "qdrant_client-1.11.3.tar.gz", hash = "sha256:5a155d8281a224ac18acef512eae2f5e9a0907975d52a7627ec66fa6586d0285"}, + {file = "qdrant_client-1.12.0-py3-none-any.whl", hash = "sha256:6db5ac1e244272f8b67e9dbc0da557816efef6f919cd8ee134469c751fe72c03"}, + {file = "qdrant_client-1.12.0.tar.gz", hash = "sha256:f443db39988aa6ff7c7a605770084ddaca8fdb5f8b22f77c10e661bdf0974cda"}, ] [package.dependencies] @@ -5154,13 +5297,13 @@ md = ["cmarkgfm (>=0.8.0)"] [[package]] name = "redis" -version = "5.1.1" +version = "5.2.0" description = "Python client for Redis database and key-value store" optional = true python-versions = ">=3.8" files = [ - {file = "redis-5.1.1-py3-none-any.whl", hash = "sha256:f8ea06b7482a668c6475ae202ed8d9bcaa409f6e87fb77ed1043d912afd62e24"}, - {file = "redis-5.1.1.tar.gz", hash = "sha256:f6c997521fedbae53387307c5d0bf784d9acc28d9f1d058abeac566ec4dbed72"}, + {file = "redis-5.2.0-py3-none-any.whl", hash = "sha256:ae174f2bb3b1bf2b09d54bf3e51fbc1469cf6c10aa03e21141f51969801a7897"}, + {file = "redis-5.2.0.tar.gz", hash = "sha256:0b1087665a771b1ff2e003aa5bdd354f15a70c9e25d5a7dbf9c722c16528a7b0"}, ] [package.dependencies] @@ -5328,13 +5471,13 @@ idna2008 = ["idna"] [[package]] name = "rich" -version = "13.9.2" +version = "13.9.3" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" files = [ - {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, - {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, + {file = "rich-13.9.3-py3-none-any.whl", hash = "sha256:9836f5096eb2172c9e77df411c1b009bace4193d6a481d534fea75ebba758283"}, + {file = "rich-13.9.3.tar.gz", hash = "sha256:bc1e01b899537598cf02579d2b9f4a415104d3fc439313a7a2c165d76557a08e"}, ] [package.dependencies] @@ -5359,31 +5502,108 @@ files = [ [package.dependencies] pyasn1 = ">=0.1.3" +[[package]] +name = "ruamel-yaml" +version = "0.18.6" +description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, +] + +[package.dependencies] +"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.13\""} + +[package.extras] +docs = ["mercurial (>5.7)", "ryd"] +jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.8" +description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +optional = false +python-versions = ">=3.6" +files = [ + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, + {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win32.whl", hash = "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, + {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, +] + [[package]] name = "ruff" -version = "0.6.9" +version = "0.7.1" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, - {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, - {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, - {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, - {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, - {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, - {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, + {file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"}, + {file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"}, + {file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"}, + {file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"}, + {file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"}, + {file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"}, + {file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"}, ] [[package]] @@ -5830,60 +6050,68 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.35" +version = "2.0.36" description = "Database Abstraction Library" optional = true python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, - {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, - {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, + {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, + {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, ] [package.dependencies] @@ -5896,7 +6124,7 @@ aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] @@ -5927,13 +6155,13 @@ files = [ [[package]] name = "sympy" -version = "1.13.3" +version = "1.13.1" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, - {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, + {file = "sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8"}, + {file = "sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f"}, ] [package.dependencies] @@ -6183,31 +6411,28 @@ files = [ [[package]] name = "torch" -version = "2.4.1" +version = "2.5.0" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.8.0" files = [ - {file = "torch-2.4.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:362f82e23a4cd46341daabb76fba08f04cd646df9bfaf5da50af97cb60ca4971"}, - {file = "torch-2.4.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:e8ac1985c3ff0f60d85b991954cfc2cc25f79c84545aead422763148ed2759e3"}, - {file = "torch-2.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91e326e2ccfb1496e3bee58f70ef605aeb27bd26be07ba64f37dcaac3d070ada"}, - {file = "torch-2.4.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d36a8ef100f5bff3e9c3cea934b9e0d7ea277cb8210c7152d34a9a6c5830eadd"}, - {file = "torch-2.4.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0b5f88afdfa05a335d80351e3cea57d38e578c8689f751d35e0ff36bce872113"}, - {file = "torch-2.4.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:ef503165f2341942bfdf2bd520152f19540d0c0e34961232f134dc59ad435be8"}, - {file = "torch-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:092e7c2280c860eff762ac08c4bdcd53d701677851670695e0c22d6d345b269c"}, - {file = "torch-2.4.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:ddddbd8b066e743934a4200b3d54267a46db02106876d21cf31f7da7a96f98ea"}, - {file = "torch-2.4.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:fdc4fe11db3eb93c1115d3e973a27ac7c1a8318af8934ffa36b0370efe28e042"}, - {file = "torch-2.4.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:18835374f599207a9e82c262153c20ddf42ea49bc76b6eadad8e5f49729f6e4d"}, - {file = "torch-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:ebea70ff30544fc021d441ce6b219a88b67524f01170b1c538d7d3ebb5e7f56c"}, - {file = "torch-2.4.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:72b484d5b6cec1a735bf3fa5a1c4883d01748698c5e9cfdbeb4ffab7c7987e0d"}, - {file = "torch-2.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c99e1db4bf0c5347107845d715b4aa1097e601bdc36343d758963055e9599d93"}, - {file = "torch-2.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b57f07e92858db78c5b72857b4f0b33a65b00dc5d68e7948a8494b0314efb880"}, - {file = "torch-2.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:f18197f3f7c15cde2115892b64f17c80dbf01ed72b008020e7da339902742cf6"}, - {file = "torch-2.4.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:5fc1d4d7ed265ef853579caf272686d1ed87cebdcd04f2a498f800ffc53dab71"}, - {file = "torch-2.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:40f6d3fe3bae74efcf08cb7f8295eaddd8a838ce89e9d26929d4edd6d5e4329d"}, - {file = "torch-2.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:c9299c16c9743001ecef515536ac45900247f4338ecdf70746f2461f9e4831db"}, - {file = "torch-2.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:6bce130f2cd2d52ba4e2c6ada461808de7e5eccbac692525337cfb4c19421846"}, - {file = "torch-2.4.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:a38de2803ee6050309aac032676536c3d3b6a9804248537e38e098d0e14817ec"}, + {file = "torch-2.5.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:7f179373a047b947dec448243f4e6598a1c960fa3bb978a9a7eecd529fbc363f"}, + {file = "torch-2.5.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:15fbc95e38d330e5b0ef1593b7bc0a19f30e5bdad76895a5cffa1a6a044235e9"}, + {file = "torch-2.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:f499212f1cffea5d587e5f06144630ed9aa9c399bba12ec8905798d833bd1404"}, + {file = "torch-2.5.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:c54db1fade17287aabbeed685d8e8ab3a56fea9dd8d46e71ced2da367f09a49f"}, + {file = "torch-2.5.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:499a68a756d3b30d10f7e0f6214dc3767b130b797265db3b1c02e9094e2a07be"}, + {file = "torch-2.5.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:9f3df8138a1126a851440b7d5a4869bfb7c9cc43563d64fd9d96d0465b581024"}, + {file = "torch-2.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:b81da3bdb58c9de29d0e1361e52f12fcf10a89673f17a11a5c6c7da1cb1a8376"}, + {file = "torch-2.5.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:ba135923295d564355326dc409b6b7f5bd6edc80f764cdaef1fb0a1b23ff2f9c"}, + {file = "torch-2.5.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:2dd40c885a05ef7fe29356cca81be1435a893096ceb984441d6e2c27aff8c6f4"}, + {file = "torch-2.5.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:bc52d603d87fe1da24439c0d5fdbbb14e0ae4874451d53f0120ffb1f6c192727"}, + {file = "torch-2.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea718746469246cc63b3353afd75698a288344adb55e29b7f814a5d3c0a7c78d"}, + {file = "torch-2.5.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:6de1fd253e27e7f01f05cd7c37929ae521ca23ca4620cfc7c485299941679112"}, + {file = "torch-2.5.0-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:83dcf518685db20912b71fc49cbddcc8849438cdb0e9dcc919b02a849e2cd9e8"}, + {file = "torch-2.5.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:65e0a60894435608334d68c8811e55fd8f73e5bf8ee6f9ccedb0064486a7b418"}, + {file = "torch-2.5.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:38c21ff1bd39f076d72ab06e3c88c2ea6874f2e6f235c9450816b6c8e7627094"}, + {file = "torch-2.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:ce4baeba9804da5a346e210b3b70826f5811330c343e4fe1582200359ee77fe5"}, + {file = "torch-2.5.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:03e53f577a96e4d41aca472da8faa40e55df89d2273664af390ce1f570e885bd"}, ] [package.dependencies] @@ -6215,25 +6440,26 @@ filelock = "*" fsspec = "*" jinja2 = "*" networkx = "*" -nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cublas-cu12 = {version = "12.4.5.8", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.4.127", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.4.127", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.4.127", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cudnn-cu12 = {version = "9.1.0.70", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -setuptools = "*" -sympy = "*" -triton = {version = "3.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\""} +nvidia-cufft-cu12 = {version = "11.2.1.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.5.147", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.6.1.9", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.3.1.170", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.21.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvjitlink-cu12 = {version = "12.4.127", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.4.127", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +setuptools = {version = "*", markers = "python_version >= \"3.12\""} +sympy = {version = "1.13.1", markers = "python_version >= \"3.9\""} +triton = {version = "3.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\""} typing-extensions = ">=4.8.0" [package.extras] opt-einsum = ["opt-einsum (>=3.3)"] -optree = ["optree (>=0.11.0)"] +optree = ["optree (>=0.12.0)"] [[package]] name = "tqdm" @@ -6281,13 +6507,13 @@ gui = ["Gooey (>=1.0.1)"] [[package]] name = "transformers" -version = "4.45.2" +version = "4.46.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = true python-versions = ">=3.8.0" files = [ - {file = "transformers-4.45.2-py3-none-any.whl", hash = "sha256:c551b33660cfc815bae1f9f097ecfd1e65be623f13c6ee0dda372bd881460210"}, - {file = "transformers-4.45.2.tar.gz", hash = "sha256:72bc390f6b203892561f05f86bbfaa0e234aab8e927a83e62b9d92ea7e3ae101"}, + {file = "transformers-4.46.0-py3-none-any.whl", hash = "sha256:e161268ae8bee315eb9e9b4c0b27f1bd6980f91e0fc292d75249193d339704c0"}, + {file = "transformers-4.46.0.tar.gz", hash = "sha256:3a9e2eb537094db11c3652334d281afa4766c0e5091c4dcdb454e9921bb0d2b7"}, ] [package.dependencies] @@ -6305,13 +6531,13 @@ tqdm = ">=4.27" [package.extras] accelerate = ["accelerate (>=0.26.0)"] agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] -all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision"] audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] benchmark = ["optimum-benchmark (>=0.3.0)"] codecarbon = ["codecarbon (==1.2.0)"] deepspeed = ["accelerate (>=0.26.0)", "deepspeed (>=0.9.3)"] deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.20,<0.21)", "urllib3 (<2.0.0)"] dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "libcst", "librosa", "nltk (<=3.8.1)", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] @@ -6345,21 +6571,21 @@ torch = ["accelerate (>=0.26.0)", "torch"] torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.20,<0.21)", "torch", "tqdm (>=4.27)"] -video = ["av (==9.2.0)", "decord (==0.6.0)"] +video = ["av (==9.2.0)"] vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "triton" -version = "3.0.0" +version = "3.1.0" description = "A language and compiler for custom Deep Learning operations" optional = false python-versions = "*" files = [ - {file = "triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a"}, - {file = "triton-3.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ce8520437c602fb633f1324cc3871c47bee3b67acf9756c1a66309b60e3216c"}, - {file = "triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb"}, - {file = "triton-3.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bcbf3b1c48af6a28011a5c40a5b3b9b5330530c3827716b5fbf6d7adcc1e53e9"}, - {file = "triton-3.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6e5727202f7078c56f91ff13ad0c1abab14a0e7f2c87e91b12b6f64f3e8ae609"}, + {file = "triton-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8"}, + {file = "triton-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f34f6e7885d1bf0eaaf7ba875a5f0ce6f3c13ba98f9503651c1e6dc6757ed5c"}, + {file = "triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8182f42fd8080a7d39d666814fa36c5e30cc00ea7eeeb1a2983dbb4c99a0fdc"}, + {file = "triton-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dadaca7fc24de34e180271b5cf864c16755702e9f63a16f62df714a8099126a"}, + {file = "triton-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aafa9a20cd0d9fee523cd4504aa7131807a864cd77dcf6efe7e981f18b8c6c11"}, ] [package.dependencies] @@ -6452,21 +6678,21 @@ files = [ [[package]] name = "typos" -version = "1.26.0" +version = "1.26.8" description = "Source Code Spelling Correction" optional = false python-versions = ">=3.7" files = [ - {file = "typos-1.26.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:4a49e389f89b7b53aaea5f956134317507ddd92a14d7ec380cb918390e04aac8"}, - {file = "typos-1.26.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2c50b4094f2252f5b4552514776f455914ae619f6a7418de002af51d981aaa7a"}, - {file = "typos-1.26.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f33cd305042642d6a0bdc5e09fa6338385a7cb608e1385fd2b6d22d482e2071"}, - {file = "typos-1.26.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb97608d6a77470bf36d378341c81ded61dd14a10baa15e46431e12688e3d504"}, - {file = "typos-1.26.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:775802bc32203a68c475e47fb9dfa69ffe427bd85d3e27b03baa5a455bc2b4de"}, - {file = "typos-1.26.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:72412d456c1c4a1cac8dd7f56410fe14aad22ab883b49aba704cbdb58b5cda0e"}, - {file = "typos-1.26.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e247246548c862e819ae541713471f60830020b051364fc6f216606945322427"}, - {file = "typos-1.26.0-py3-none-win32.whl", hash = "sha256:7488c7124ac52a66ed79e74bdd11248b87d295391937b833f64e45c6c6237c82"}, - {file = "typos-1.26.0-py3-none-win_amd64.whl", hash = "sha256:f4157c7b778e2128121f65279fa763329556e12188d200828f3e38c3029a6764"}, - {file = "typos-1.26.0.tar.gz", hash = "sha256:97f7bc943aafa040bca272cd5c0b679503876041898b7b99339c9604c0794786"}, + {file = "typos-1.26.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:77093a1aa72b3fa34b1914e73d149fa70c02157fbe39bd13d20a0cd64a7b7fdf"}, + {file = "typos-1.26.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3d9769ec255ef3291fcfadc2d270773f6491eeaf0f3120e370ccdb08d218e600"}, + {file = "typos-1.26.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:526514dda6ac262626226ad0adbe7388c2a690c0ba972118c2c3eb245cf12e10"}, + {file = "typos-1.26.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a926bc4b2ba76eda508da0d1f46eeaf4f1446cffff5cb0721aa0246e7d20654f"}, + {file = "typos-1.26.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985af8d669dd2fa124fc180de57ca82a5138d6ee49827784605c4717d0609105"}, + {file = "typos-1.26.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:34eb03b2ab984ec2e3f59f16994b7c9f7bc9f8af3d0b013e9d344ebf59018df6"}, + {file = "typos-1.26.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:77006a3246d749d7fc0e46c35075607cd94b0fc64d19797ed840e6009fad5379"}, + {file = "typos-1.26.8-py3-none-win32.whl", hash = "sha256:b7282faf0504dd5a1484c0edbaa7daf5b2965e264d92dd1754161691fd77ed29"}, + {file = "typos-1.26.8-py3-none-win_amd64.whl", hash = "sha256:3da10e7560856a042de65b099f5f9bc846f3545ae3b121172872a533bea69e06"}, + {file = "typos-1.26.8.tar.gz", hash = "sha256:b750d19531f6299d1c88d09af8db6998a5d92c2cca039220773140b3eb887cf3"}, ] [[package]] @@ -6497,6 +6723,20 @@ tzdata = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] devenv = ["check-manifest", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] +[[package]] +name = "uc-micro-py" +version = "1.0.3" +description = "Micro subset of unicode data files for linkify-it-py projects." +optional = false +python-versions = ">=3.7" +files = [ + {file = "uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a"}, + {file = "uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5"}, +] + +[package.extras] +test = ["coverage", "pytest", "pytest-cov"] + [[package]] name = "uritemplate" version = "4.1.1" @@ -6557,13 +6797,13 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [[package]] name = "voyageai" -version = "0.2.3" +version = "0.2.4" description = "" optional = true python-versions = "<4.0.0,>=3.7.1" files = [ - {file = "voyageai-0.2.3-py3-none-any.whl", hash = "sha256:59c4958bd991e83cedb5a82d5e14ac698ce67e42713ea10467631a48ee272b15"}, - {file = "voyageai-0.2.3.tar.gz", hash = "sha256:28322aa7a64cdaa774be6fcf3e4fd6a08694ea25acd5fadd1eff1b8ef8dab68a"}, + {file = "voyageai-0.2.4-py3-none-any.whl", hash = "sha256:e3070e5c78dec89adae43231334b4637aa88933dad99b1c33d3219fdfc94dfa4"}, + {file = "voyageai-0.2.4.tar.gz", hash = "sha256:b9911d8629e8a4e363291c133482fead49a3536afdf1e735f3ab3aaccd8d250d"}, ] [package.dependencies] @@ -6620,6 +6860,17 @@ files = [ [package.extras] watchmedo = ["PyYAML (>=3.10)"] +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + [[package]] name = "websockets" version = "13.0" @@ -6736,7 +6987,7 @@ watchdog = ["watchdog (>=2.3)"] name = "wrapt" version = "1.16.0" description = "Module for decorators, wrappers and monkey patching." -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, @@ -6941,7 +7192,7 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linke test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] -all = ["anthropic", "astrapy", "beautifulsoup4", "boto3", "cohere", "diffusers", "duckduckgo-search", "elevenlabs", "exa-py", "google-generativeai", "mail-parser", "markdownify", "marqo", "ollama", "opensearch-py", "opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-sdk", "pandas", "pgvector", "pillow", "pinecone-client", "playwright", "psycopg2-binary", "pusher", "pymongo", "pypdf", "qdrant-client", "redis", "snowflake-sqlalchemy", "sqlalchemy", "tavily-python", "trafilatura", "transformers", "voyageai"] +all = ["anthropic", "astrapy", "azure-core", "azure-storage-blob", "beautifulsoup4", "boto3", "cohere", "diffusers", "duckduckgo-search", "elevenlabs", "exa-py", "google-generativeai", "mail-parser", "markdownify", "marqo", "ollama", "opensearch-py", "opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-sdk", "pandas", "pgvector", "pillow", "pinecone-client", "playwright", "psycopg2-binary", "pusher", "pymongo", "pypdf", "qdrant-client", "redis", "snowflake-sqlalchemy", "sqlalchemy", "tavily-python", "trafilatura", "transformers", "voyageai"] drivers-embedding-amazon-bedrock = ["boto3"] drivers-embedding-amazon-sagemaker = ["boto3"] drivers-embedding-cohere = ["cohere"] @@ -6952,6 +7203,8 @@ drivers-embedding-voyageai = ["voyageai"] drivers-event-listener-amazon-iot = ["boto3"] drivers-event-listener-amazon-sqs = ["boto3"] drivers-event-listener-pusher = ["pusher"] +drivers-file-manager-amazon-s3 = ["boto3"] +drivers-file-manager-griptape-cloud = ["azure-core", "azure-storage-blob"] drivers-image-generation-huggingface = ["diffusers", "pillow"] drivers-memory-conversation-amazon-dynamodb = ["boto3"] drivers-memory-conversation-redis = ["redis"] @@ -6993,4 +7246,4 @@ loaders-sql = ["sqlalchemy"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "c9c19f558028a242c64f750556e3ec7e44779777a620faadd319615457c35513" +content-hash = "816a925736967c12b42ffddce1e48909348d11e7d341127d5e9e1224b44ba00e" diff --git a/pyproject.toml b/pyproject.toml index 2034ee411..7b7b5ce85 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.33.1" +version = "0.34.0" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0" @@ -30,10 +30,10 @@ requests = "^2.32.0" filetype = "^1.2" # drivers -cohere = { version = "~5.10.0", optional = true } -anthropic = { version = "^0.35.0", optional = true } +cohere = { version = "^5.11.2", optional = true } +anthropic = { version = "^0.37.1", optional = true } transformers = { version = "^4.41.1", optional = true} -huggingface-hub = { version = "^0.25.1", optional = true } +huggingface-hub = { version = "^0.26.2", optional = true } boto3 = { version = "^1.34.119", optional = true } snowflake-sqlalchemy = { version = "^1.6.1", optional = true } pinecone-client = { version = "^3", optional = true } @@ -61,15 +61,18 @@ opentelemetry-api = {version = "^1.25.0", optional = true} opentelemetry-instrumentation = {version = "^0.46b0", optional = true} opentelemetry-instrumentation-threading = {version = "^0.46b0", optional = true} opentelemetry-exporter-otlp-proto-http = {version = "^1.25.0", optional = true} -diffusers = {version = "^0.30.3", optional = true} +diffusers = {version = "^0.31.0", optional = true} tavily-python = {version = "^0.5.0", optional = true} exa-py = {version = "^1.1.4", optional = true} +azure-core = "^1.31.0" +azure-storage-blob = "^12.23.1" # loaders pandas = {version = "^1.3", optional = true} pypdf = {version = "^5.0.1", optional = true} -pillow = {version = "^10.2.0", optional = true} +pillow = {version = "^11.0.0", optional = true} mail-parser = {version = "^3.15.0", optional = true} +wrapt = "^1.16.0" [tool.poetry.extras] drivers-prompt-cohere = ["cohere"] @@ -145,6 +148,9 @@ drivers-observability-datadog = [ drivers-image-generation-huggingface = ["diffusers", "pillow"] +drivers-file-manager-amazon-s3 = ["boto3"] +drivers-file-manager-griptape-cloud = ["azure-core", "azure-storage-blob"] + loaders-pdf = ["pypdf"] loaders-image = ["pillow"] loaders-email = ["mail-parser"] @@ -188,6 +194,8 @@ all = [ "opentelemetry-exporter-otlp-proto-http", "diffusers", "pillow", + "azure-core", + "azure-storage-blob", # loaders "pandas", @@ -217,11 +225,16 @@ torch = "^2.4.1" optional = true [tool.poetry.group.dev.dependencies] -ruff = "^0.6.0" +ruff = "^0.7.0" pyright = "^1.1.376" pre-commit = "^4.0.0" boto3-stubs = {extras = ["bedrock", "iam", "opensearch", "s3", "sagemaker", "sqs", "iot-data", "dynamodb", "redshift-data"], version = "^1.34.105"} typos = "^1.22.9" +mdformat = "^0.7.17" +mdformat-gfm = "^0.3.6" +mdformat-frontmatter = "^2.0.8" +mdformat-footnote = "^0.1.1" +mdformat-admon = "^2.0.6" [tool.poetry.group.docs] @@ -230,8 +243,8 @@ optional = true [tool.poetry.group.docs.dependencies] mkdocs = "^1.5.2" mkdocs-material = "^9.2.8" -mkdocs-glightbox = ">=0.3.4,<0.5.0" -mkdocstrings = {extras = ["python"], version = ">=0.25.2,<0.27.0"} +mkdocs-glightbox = "^0.4.0" +mkdocstrings = {extras = ["python"], version = "^0.26.2"} mkdocs-gen-files = "^0.5.0" mkdocs-literate-nav = "^0.6.0" mkdocs-section-index = "^0.3.6" diff --git a/tests/integration/rules/test_rule.py b/tests/integration/rules/test_rule.py index a62263c57..91c427653 100644 --- a/tests/integration/rules/test_rule.py +++ b/tests/integration/rules/test_rule.py @@ -5,7 +5,7 @@ class TestRule: @pytest.fixture( - autouse=True, params=StructureTester.RULE_CAPABLE_PROMPT_DRIVERS, ids=StructureTester.prompt_driver_id_fn + autouse=True, params=StructureTester.RULE_CAPABLE_PROMPT_DRIVERS, ids=StructureTester.generate_prompt_driver_id ) def structure_tester(self, request): from griptape.rules import Rule diff --git a/tests/integration/tasks/test_csv_extraction_task.py b/tests/integration/tasks/test_csv_extraction_task.py index db58b9615..3e2186fae 100644 --- a/tests/integration/tasks/test_csv_extraction_task.py +++ b/tests/integration/tasks/test_csv_extraction_task.py @@ -7,7 +7,7 @@ class TestCsvExtractionTask: @pytest.fixture( autouse=True, params=StructureTester.CSV_EXTRACTION_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.engines import CsvExtractionEngine diff --git a/tests/integration/tasks/test_json_extraction_task.py b/tests/integration/tasks/test_json_extraction_task.py index 115f805da..e13fa7aa5 100644 --- a/tests/integration/tasks/test_json_extraction_task.py +++ b/tests/integration/tasks/test_json_extraction_task.py @@ -7,7 +7,7 @@ class TestJsonExtractionTask: @pytest.fixture( autouse=True, params=StructureTester.JSON_EXTRACTION_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from schema import Schema diff --git a/tests/integration/tasks/test_prompt_task.py b/tests/integration/tasks/test_prompt_task.py index 1d223b4ca..95106a9a0 100644 --- a/tests/integration/tasks/test_prompt_task.py +++ b/tests/integration/tasks/test_prompt_task.py @@ -5,7 +5,9 @@ class TestPromptTask: @pytest.fixture( - autouse=True, params=StructureTester.PROMPT_TASK_CAPABLE_PROMPT_DRIVERS, ids=StructureTester.prompt_driver_id_fn + autouse=True, + params=StructureTester.PROMPT_TASK_CAPABLE_PROMPT_DRIVERS, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.structures import Agent diff --git a/tests/integration/tasks/test_rag_task.py b/tests/integration/tasks/test_rag_task.py index ce3a9140d..255e608f3 100644 --- a/tests/integration/tasks/test_rag_task.py +++ b/tests/integration/tasks/test_rag_task.py @@ -9,7 +9,7 @@ class TestRagTask: @pytest.fixture( autouse=True, params=StructureTester.TEXT_SUMMARY_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.artifacts import TextArtifact diff --git a/tests/integration/tasks/test_text_summary_task.py b/tests/integration/tasks/test_text_summary_task.py index ff6597ba0..811ec39f6 100644 --- a/tests/integration/tasks/test_text_summary_task.py +++ b/tests/integration/tasks/test_text_summary_task.py @@ -7,7 +7,7 @@ class TestTextSummaryTask: @pytest.fixture( autouse=True, params=StructureTester.TEXT_SUMMARY_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.engines.summary.prompt_summary_engine import PromptSummaryEngine diff --git a/tests/integration/tasks/test_tool_task.py b/tests/integration/tasks/test_tool_task.py index 426dde995..712e23d26 100644 --- a/tests/integration/tasks/test_tool_task.py +++ b/tests/integration/tasks/test_tool_task.py @@ -5,7 +5,9 @@ class TestToolTask: @pytest.fixture( - autouse=True, params=StructureTester.TOOL_TASK_CAPABLE_PROMPT_DRIVERS, ids=StructureTester.prompt_driver_id_fn + autouse=True, + params=StructureTester.TOOL_TASK_CAPABLE_PROMPT_DRIVERS, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.structures import Agent diff --git a/tests/integration/tasks/test_toolkit_task.py b/tests/integration/tasks/test_toolkit_task.py index 50b4f2a97..7593c5391 100644 --- a/tests/integration/tasks/test_toolkit_task.py +++ b/tests/integration/tasks/test_toolkit_task.py @@ -7,7 +7,7 @@ class TestToolkitTask: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): import os diff --git a/tests/integration/tools/test_calculator_tool.py b/tests/integration/tools/test_calculator_tool.py index c209a9a2c..634b84803 100644 --- a/tests/integration/tools/test_calculator_tool.py +++ b/tests/integration/tools/test_calculator_tool.py @@ -7,7 +7,7 @@ class TestCalculator: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.structures import Agent diff --git a/tests/integration/tools/test_file_manager_tool.py b/tests/integration/tools/test_file_manager_tool.py index 4b5299175..ce6b331c2 100644 --- a/tests/integration/tools/test_file_manager_tool.py +++ b/tests/integration/tools/test_file_manager_tool.py @@ -7,7 +7,7 @@ class TestFileManager: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.structures import Agent diff --git a/tests/integration/tools/test_google_docs_tool.py b/tests/integration/tools/test_google_docs_tool.py index 7c8828dd3..e977e5523 100644 --- a/tests/integration/tools/test_google_docs_tool.py +++ b/tests/integration/tools/test_google_docs_tool.py @@ -9,7 +9,7 @@ class TestGoogleDocsTool: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.structures import Agent diff --git a/tests/integration/tools/test_google_drive_tool.py b/tests/integration/tools/test_google_drive_tool.py index 7fd8b9047..fdd9fde89 100644 --- a/tests/integration/tools/test_google_drive_tool.py +++ b/tests/integration/tools/test_google_drive_tool.py @@ -9,7 +9,7 @@ class TestGoogleDriveTool: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, - ids=StructureTester.prompt_driver_id_fn, + ids=StructureTester.generate_prompt_driver_id, ) def structure_tester(self, request): from griptape.structures import Agent diff --git a/tests/mocks/mock_audio_input_task.py b/tests/mocks/mock_audio_input_task.py index 95b8c88d0..cd358c92b 100644 --- a/tests/mocks/mock_audio_input_task.py +++ b/tests/mocks/mock_audio_input_task.py @@ -6,5 +6,5 @@ @define class MockAudioInputTask(BaseAudioInputTask): - def run(self) -> TextArtifact: + def try_run(self) -> TextArtifact: return TextArtifact(self.input.to_text()) diff --git a/tests/mocks/mock_chunk_event.py b/tests/mocks/mock_chunk_event.py new file mode 100644 index 000000000..4017dcd0a --- /dev/null +++ b/tests/mocks/mock_chunk_event.py @@ -0,0 +1,11 @@ +from attrs import define, field + +from griptape.events.base_chunk_event import BaseChunkEvent + + +@define +class MockChunkEvent(BaseChunkEvent): + token: str = field(kw_only=True, metadata={"serializable": True}) + + def __str__(self) -> str: + return "mock " + self.token diff --git a/tests/mocks/mock_event_listener_driver.py b/tests/mocks/mock_event_listener_driver.py index 5833dd1c0..1a17d5e69 100644 --- a/tests/mocks/mock_event_listener_driver.py +++ b/tests/mocks/mock_event_listener_driver.py @@ -1,14 +1,21 @@ from __future__ import annotations -from attrs import define +from typing import Callable, Optional + +from attrs import define, field from griptape.drivers import BaseEventListenerDriver @define class MockEventListenerDriver(BaseEventListenerDriver): + on_event_payload_publish: Optional[Callable[[dict], None]] = field(default=None, kw_only=True) + on_event_payload_batch_publish: Optional[Callable[[list[dict]], None]] = field(default=None, kw_only=True) + def try_publish_event_payload(self, event_payload: dict) -> None: - pass + if self.on_event_payload_publish is not None: + self.on_event_payload_publish(event_payload) def try_publish_event_payload_batch(self, event_payload_batch: list[dict]) -> None: - pass + if self.on_event_payload_batch_publish is not None: + self.on_event_payload_batch_publish(event_payload_batch) diff --git a/tests/mocks/mock_image_generation_task.py b/tests/mocks/mock_image_generation_task.py index b55c5c995..bc0d8e35f 100644 --- a/tests/mocks/mock_image_generation_task.py +++ b/tests/mocks/mock_image_generation_task.py @@ -1,4 +1,4 @@ -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import ImageArtifact, TextArtifact from griptape.tasks import BaseImageGenerationTask @@ -6,7 +6,7 @@ @define class MockImageGenerationTask(BaseImageGenerationTask): - _input: TextArtifact = field(default="input") + _input: TextArtifact = field(default=Factory(lambda: TextArtifact("input"))) @property def input(self) -> TextArtifact: @@ -16,5 +16,5 @@ def input(self) -> TextArtifact: def input(self, value: str) -> None: self._input = TextArtifact(value) - def run(self) -> ImageArtifact: + def try_run(self) -> ImageArtifact: return ImageArtifact(value=b"image data", format="png", width=512, height=512) diff --git a/tests/mocks/mock_prompt_driver.py b/tests/mocks/mock_prompt_driver.py index 70089430d..f308c9804 100644 --- a/tests/mocks/mock_prompt_driver.py +++ b/tests/mocks/mock_prompt_driver.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING, Callable, Union from attrs import define, field @@ -29,8 +29,8 @@ class MockPromptDriver(BasePromptDriver): model: str = "test-model" tokenizer: BaseTokenizer = MockTokenizer(model="test-model", max_input_tokens=4096, max_output_tokens=4096) - mock_input: str | Callable[[], str] = field(default="mock input", kw_only=True) - mock_output: str | Callable[[PromptStack], str] = field(default="mock output", kw_only=True) + mock_input: Union[str, Callable[[], str]] = field(default="mock input", kw_only=True) + mock_output: Union[str, Callable[[PromptStack], str]] = field(default="mock output", kw_only=True) def try_run(self, prompt_stack: PromptStack) -> Message: output = self.mock_output(prompt_stack) if isinstance(self.mock_output, Callable) else self.mock_output diff --git a/tests/mocks/mock_task.py b/tests/mocks/mock_task.py index 81aa03713..86f0254b6 100644 --- a/tests/mocks/mock_task.py +++ b/tests/mocks/mock_task.py @@ -12,5 +12,5 @@ class MockTask(BaseTask): def input(self) -> BaseArtifact: return TextArtifact(self.mock_input) - def run(self) -> BaseArtifact: + def try_run(self) -> BaseArtifact: return self.input diff --git a/tests/mocks/mock_text_input_task.py b/tests/mocks/mock_text_input_task.py index f1439bd42..149fb8059 100644 --- a/tests/mocks/mock_text_input_task.py +++ b/tests/mocks/mock_text_input_task.py @@ -6,5 +6,5 @@ @define class MockTextInputTask(BaseTextInputTask): - def run(self) -> TextArtifact: + def try_run(self) -> TextArtifact: return TextArtifact(self.input.to_text()) diff --git a/tests/mocks/mock_tool/tool.py b/tests/mocks/mock_tool/tool.py index 9c2241636..c7bacc3b2 100644 --- a/tests/mocks/mock_tool/tool.py +++ b/tests/mocks/mock_tool/tool.py @@ -12,6 +12,11 @@ class MockTool(BaseTool): test_int: int = field(default=5, kw_only=True) test_dict: dict = field(factory=dict, kw_only=True) custom_schema: dict = field(default=Factory(lambda: {"test": str}), kw_only=True) + module_name: str = field( + default=Factory(lambda self: self.__class__.__module__, takes_self=True), + kw_only=True, + metadata={"serializable": False}, + ) @activity( config={ @@ -19,8 +24,8 @@ class MockTool(BaseTool): "schema": Schema({Literal("test"): str}, description="Test input"), } ) - def test(self, value: dict) -> BaseArtifact: - return TextArtifact(f"ack {value['values']['test']}") + def test(self, test: str) -> BaseArtifact: + return TextArtifact(f"ack {test}") @activity( config={ @@ -28,8 +33,8 @@ def test(self, value: dict) -> BaseArtifact: "schema": Schema({Literal("test"): str}, description="Test input"), } ) - def test_error(self, value: dict) -> BaseArtifact: - return ErrorArtifact(f"error {value['values']['test']}") + def test_error(self, params: dict) -> BaseArtifact: + return ErrorArtifact(f"error {params['values']['test']}") @activity( config={ @@ -37,8 +42,8 @@ def test_error(self, value: dict) -> BaseArtifact: "schema": Schema({Literal("test"): str}, description="Test input"), } ) - def test_exception(self, value: dict) -> BaseArtifact: - raise Exception(f"error {value['values']['test']}") + def test_exception(self, params: dict) -> BaseArtifact: + raise Exception(f"error {params['values']['test']}") @activity( config={ @@ -46,11 +51,11 @@ def test_exception(self, value: dict) -> BaseArtifact: "schema": Schema({Literal("test"): str}, description="Test input"), } ) - def test_str_output(self, value: dict) -> str: - return f"ack {value['values']['test']}" + def test_str_output(self, params: dict) -> str: + return f"ack {params['values']['test']}" @activity(config={"description": "test description"}) - def test_no_schema(self, value: dict) -> str: + def test_no_schema(self) -> str: return "no schema" @activity( @@ -63,14 +68,14 @@ def test_callable_schema(self) -> TextArtifact: return TextArtifact("ack") @activity(config={"description": "test description"}) - def test_list_output(self, value: dict) -> ListArtifact: + def test_list_output(self) -> ListArtifact: return ListArtifact([TextArtifact("foo"), TextArtifact("bar")]) @activity( config={"description": "test description", "schema": Schema({Literal("test"): str}, description="Test input")} ) - def test_without_default_memory(self, value: dict) -> str: - return f"ack {value['values']['test']}" + def test_without_default_memory(self, params: dict) -> str: + return f"ack {params['values']['test']}" def foo(self) -> str: return "foo" diff --git a/tests/mocks/mock_tool_kwargs/__init__.py b/tests/mocks/mock_tool_kwargs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/mocks/mock_tool_kwargs/requirements.txt b/tests/mocks/mock_tool_kwargs/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/tests/mocks/mock_tool_kwargs/tool.py b/tests/mocks/mock_tool_kwargs/tool.py new file mode 100644 index 000000000..cd95f9c75 --- /dev/null +++ b/tests/mocks/mock_tool_kwargs/tool.py @@ -0,0 +1,25 @@ +from attrs import define +from schema import Literal, Schema + +from griptape.tools import BaseTool +from griptape.utils.decorators import activity + + +@define +class MockToolKwargs(BaseTool): + @activity( + config={ + "description": "test description", + "schema": Schema({Literal("test_kwarg"): str}, description="Test input"), + } + ) + def test_with_kwargs(self, params: dict, test_kwarg: str, test_kwarg_none: None, **kwargs) -> str: + if test_kwarg_none is not None: + raise ValueError("test_kwarg_none should be None") + if "test_kwarg_kwargs" not in kwargs: + raise ValueError("test_kwarg_kwargs not in kwargs") + if "values" not in kwargs: + raise ValueError("values not in params") + if "test_kwarg" not in params["values"]: + raise ValueError("test_kwarg not in params") + return f"ack {test_kwarg}" diff --git a/tests/unit/chunkers/test_pdf_chunker.py b/tests/unit/chunkers/test_pdf_chunker.py index dc072ca36..4fe196a0e 100644 --- a/tests/unit/chunkers/test_pdf_chunker.py +++ b/tests/unit/chunkers/test_pdf_chunker.py @@ -20,7 +20,7 @@ def test_chunk(self, chunker): text = "".join([p.extract_text() for p in reader.pages]) chunks = chunker.chunk(text) - assert len(chunks) == 16 + assert len(chunks) == 17 for chunk in chunks: assert chunker.tokenizer.count_tokens(chunk.value) <= MAX_TOKENS diff --git a/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py index b061e5b67..bdde495de 100644 --- a/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py +++ b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py @@ -57,6 +57,7 @@ def test_to_dict(self, config): "type": "AmazonBedrockPromptDriver", "tool_choice": {"auto": {}}, "use_native_tools": True, + "extra_params": {}, }, "vector_store_driver": { "embedding_driver": { @@ -117,6 +118,7 @@ def test_to_dict_with_values(self, config_with_values): "type": "AmazonBedrockPromptDriver", "tool_choice": {"auto": {}}, "use_native_tools": True, + "extra_params": {}, }, "vector_store_driver": { "embedding_driver": { diff --git a/tests/unit/configs/drivers/test_anthropic_drivers_config.py b/tests/unit/configs/drivers/test_anthropic_drivers_config.py index b69893560..bd232283f 100644 --- a/tests/unit/configs/drivers/test_anthropic_drivers_config.py +++ b/tests/unit/configs/drivers/test_anthropic_drivers_config.py @@ -25,6 +25,7 @@ def test_to_dict(self, config): "top_p": 0.999, "top_k": 250, "use_native_tools": True, + "extra_params": {}, }, "image_generation_driver": {"type": "DummyImageGenerationDriver"}, "image_query_driver": { @@ -33,16 +34,12 @@ def test_to_dict(self, config): "max_tokens": 256, }, "embedding_driver": { - "type": "VoyageAiEmbeddingDriver", - "model": "voyage-large-2", - "input_type": "document", + "type": "DummyEmbeddingDriver", }, "vector_store_driver": { - "type": "LocalVectorStoreDriver", + "type": "DummyVectorStoreDriver", "embedding_driver": { - "type": "VoyageAiEmbeddingDriver", - "model": "voyage-large-2", - "input_type": "document", + "type": "DummyEmbeddingDriver", }, }, "conversation_memory_driver": { diff --git a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py index 6c6d49483..a4af1692f 100644 --- a/tests/unit/configs/drivers/test_azure_openai_drivers_config.py +++ b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py @@ -35,6 +35,7 @@ def test_to_dict(self, config): "stream": False, "user": "", "use_native_tools": True, + "extra_params": {}, }, "conversation_memory_driver": { "type": "LocalConversationMemoryDriver", diff --git a/tests/unit/configs/drivers/test_cohere_drivers_config.py b/tests/unit/configs/drivers/test_cohere_drivers_config.py index b828fef41..0032b6e7d 100644 --- a/tests/unit/configs/drivers/test_cohere_drivers_config.py +++ b/tests/unit/configs/drivers/test_cohere_drivers_config.py @@ -27,6 +27,7 @@ def test_to_dict(self, config): "model": "command-r", "force_single_step": False, "use_native_tools": True, + "extra_params": {}, }, "embedding_driver": { "type": "CohereEmbeddingDriver", diff --git a/tests/unit/configs/drivers/test_drivers_config.py b/tests/unit/configs/drivers/test_drivers_config.py index 8eba0cb6a..a1138769b 100644 --- a/tests/unit/configs/drivers/test_drivers_config.py +++ b/tests/unit/configs/drivers/test_drivers_config.py @@ -18,6 +18,7 @@ def test_to_dict(self, config): "max_tokens": None, "stream": False, "use_native_tools": False, + "extra_params": {}, }, "conversation_memory_driver": { "type": "LocalConversationMemoryDriver", @@ -57,7 +58,7 @@ def test_context_manager(self): assert Defaults.drivers_config == old_drivers_config - @pytest.mark.skip_mock_config() + @pytest.mark.skip_mock_config def test_lazy_init(self): from griptape.configs import Defaults diff --git a/tests/unit/configs/drivers/test_google_drivers_config.py b/tests/unit/configs/drivers/test_google_drivers_config.py index ab695369e..8eacda7c6 100644 --- a/tests/unit/configs/drivers/test_google_drivers_config.py +++ b/tests/unit/configs/drivers/test_google_drivers_config.py @@ -25,6 +25,7 @@ def test_to_dict(self, config): "top_k": None, "tool_choice": "auto", "use_native_tools": True, + "extra_params": {}, }, "image_generation_driver": {"type": "DummyImageGenerationDriver"}, "image_query_driver": {"type": "DummyImageQueryDriver"}, diff --git a/tests/unit/configs/drivers/test_openai_driver_config.py b/tests/unit/configs/drivers/test_openai_driver_config.py index a3cca9608..09ceccfdc 100644 --- a/tests/unit/configs/drivers/test_openai_driver_config.py +++ b/tests/unit/configs/drivers/test_openai_driver_config.py @@ -27,6 +27,7 @@ def test_to_dict(self, config): "stream": False, "user": "", "use_native_tools": True, + "extra_params": {}, }, "conversation_memory_driver": { "type": "LocalConversationMemoryDriver", diff --git a/tests/unit/configs/logging/test_json_formatter.py b/tests/unit/configs/logging/test_json_formatter.py new file mode 100644 index 000000000..184e0ded5 --- /dev/null +++ b/tests/unit/configs/logging/test_json_formatter.py @@ -0,0 +1,22 @@ +import logging + +from griptape.configs.logging import JsonFormatter + + +class TestJsonFormatter: + def test_init(self): + formatter = JsonFormatter() + assert formatter + + def test_format(self): + formatter = JsonFormatter() + record = logging.LogRecord( + name="name", + level=logging.INFO, + pathname="pathname", + lineno=1, + msg={"key": "value"}, + args=None, + exc_info=None, + ) + assert formatter.format(record) == '{\n "key": "value"\n}' diff --git a/tests/unit/configs/logging/test_logging_config.py b/tests/unit/configs/logging/test_logging_config.py new file mode 100644 index 000000000..174d461e6 --- /dev/null +++ b/tests/unit/configs/logging/test_logging_config.py @@ -0,0 +1,11 @@ +import logging + +from griptape.configs import Defaults + + +class TestLoggingConfig: + def test_init(self): + logger = logging.getLogger(Defaults.logging_config.logger_name) + assert logger.level == logging.INFO + assert logger.propagate is False + assert len(logger.handlers) == 1 diff --git a/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py b/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py index 29aecfdf9..36d4618b8 100644 --- a/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py +++ b/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py @@ -14,7 +14,7 @@ def driver(self): def test_run_publish_events(self, driver, mock_config): mock_handler = Mock() - EventBus.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(on_event=mock_handler)) driver.run( AudioArtifact( diff --git a/tests/unit/drivers/event_listener/test_base_event_listener_driver.py b/tests/unit/drivers/event_listener/test_base_event_listener_driver.py index 114778f72..36c8f3711 100644 --- a/tests/unit/drivers/event_listener/test_base_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_base_event_listener_driver.py @@ -1,36 +1,112 @@ -from unittest.mock import MagicMock +from unittest.mock import ANY, MagicMock from tests.mocks.mock_event import MockEvent from tests.mocks.mock_event_listener_driver import MockEventListenerDriver class TestBaseEventListenerDriver: - def test_publish_event(self): + def test_publish_event_no_batched(self): executor = MagicMock() executor.__enter__.return_value = executor - driver = MockEventListenerDriver(futures_executor_fn=lambda: executor) + driver = MockEventListenerDriver(batched=False, futures_executor=executor) + mock_event_payload = MockEvent().to_dict() - driver.publish_event(MockEvent().to_dict()) + driver.publish_event(mock_event_payload) - executor.submit.assert_called_once() + executor.submit.assert_called_once_with(ANY, mock_event_payload) - def test__safe_try_publish_event(self): - driver = MockEventListenerDriver(batched=False) + def test_publish_event_yes_batched(self): + executor = MagicMock() + executor.__enter__.return_value = executor + driver = MockEventListenerDriver(batched=True, futures_executor=executor) + mock_event_payload = MockEvent().to_dict() - for _ in range(4): - driver._safe_try_publish_event(MockEvent().to_dict(), flush=False) - assert len(driver.batch) == 0 + # Publish 9 events to fill the batch + mock_event_payloads = [mock_event_payload for _ in range(0, 9)] + for mock_event_payload in mock_event_payloads: + driver.publish_event(mock_event_payload) - def test__safe_try_publish_event_batch(self): - driver = MockEventListenerDriver(batched=True) + assert len(driver._batch) == 9 + executor.submit.assert_not_called() - for _ in range(0, 3): - driver._safe_try_publish_event(MockEvent().to_dict(), flush=False) - assert len(driver.batch) == 3 + # Publish the 10th event to trigger the batch publish + driver.publish_event(mock_event_payload) - def test__safe_try_publish_event_batch_flush(self): - driver = MockEventListenerDriver(batched=True) + assert len(driver._batch) == 0 + executor.submit.assert_called_once_with(ANY, [*mock_event_payloads, mock_event_payload]) - for _ in range(0, 3): - driver._safe_try_publish_event(MockEvent().to_dict(), flush=True) + def test_flush_events(self): + executor = MagicMock() + executor.__enter__.return_value = executor + driver = MockEventListenerDriver(batched=True, futures_executor=executor) + driver.try_publish_event_payload_batch = MagicMock(side_effect=driver.try_publish_event_payload) + + driver.flush_events() + driver.try_publish_event_payload_batch.assert_not_called() + assert driver.batch == [] + mock_event_payloads = [MockEvent().to_dict() for _ in range(0, 3)] + for mock_event_payload in mock_event_payloads: + driver.publish_event(mock_event_payload) + assert len(driver.batch) == 3 + + driver.flush_events() + executor.submit.assert_called_once_with(ANY, mock_event_payloads) assert len(driver.batch) == 0 + + def test__safe_publish_event_payload(self): + mock_fn = MagicMock() + driver = MockEventListenerDriver( + batched=False, + on_event_payload_publish=mock_fn, + ) + mock_event_payload = MockEvent().to_dict() + + driver._safe_publish_event_payload(mock_event_payload) + + mock_fn.assert_called_once_with(mock_event_payload) + + def test__safe_publish_event_payload_batch(self): + mock_fn = MagicMock() + driver = MockEventListenerDriver( + batched=True, + on_event_payload_batch_publish=mock_fn, + ) + mock_event_payloads = [MockEvent().to_dict() for _ in range(0, 3)] + + driver._safe_publish_event_payload_batch(mock_event_payloads) + + mock_fn.assert_called_once_with(mock_event_payloads) + + def test__safe_publish_event_payload_error(self): + mock_fn = MagicMock() + driver = MockEventListenerDriver( + batched=False, + on_event_payload_publish=mock_fn, + max_attempts=2, + max_retry_delay=0.1, + min_retry_delay=0.1, + ) + mock_fn.side_effect = Exception("Test Exception") + mock_event_payload = MockEvent().to_dict() + + driver._safe_publish_event_payload(mock_event_payload) + + assert mock_fn.call_count == driver.max_attempts + mock_fn.assert_called_with(mock_event_payload) + + def test__safe_publish_event_payload_batch_error(self): + mock_fn = MagicMock() + driver = MockEventListenerDriver( + batched=True, + on_event_payload_batch_publish=mock_fn, + max_attempts=2, + max_retry_delay=0.1, + min_retry_delay=0.1, + ) + mock_fn.side_effect = Exception("Test Exception") + mock_event_payloads = [MockEvent().to_dict() for _ in range(0, 3)] + + driver._safe_publish_event_payload_batch(mock_event_payloads) + + assert mock_fn.call_count == driver.max_attempts + mock_fn.assert_called_with(mock_event_payloads) diff --git a/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py b/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py index 441589774..472f249cf 100644 --- a/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py @@ -1,4 +1,5 @@ import os +import time from unittest.mock import MagicMock, Mock import pytest @@ -45,8 +46,10 @@ def test_init(self, driver): def test_publish_event_without_span_id(self, mock_post, driver): event = MockEvent() - driver.publish_event(event, flush=True) + driver.publish_event(event) + driver.flush_events() + time.sleep(1) # Happens asynchronously, so need to wait for it to finish mock_post.assert_called_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", json=[driver._get_event_request(event.to_dict())], @@ -59,8 +62,10 @@ def test_publish_event_with_span_id(self, mock_post, driver): observability_driver.get_span_id.return_value = "test" with Observability(observability_driver=observability_driver): - driver.publish_event(event, flush=True) + driver.publish_event(event) + driver.flush_events() + time.sleep(1) # Happens asynchronously, so need to wait for it to finish mock_post.assert_called_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", json=[driver._get_event_request({**event.to_dict(), "span_id": "test"})], @@ -71,6 +76,7 @@ def test_try_publish_event_payload(self, mock_post, driver): event = MockEvent() driver.try_publish_event_payload(event.to_dict()) + time.sleep(1) # Happens asynchronously, so need to wait for it to finish mock_post.assert_called_once_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", json=driver._get_event_request(event.to_dict()), @@ -82,6 +88,7 @@ def try_publish_event_payload_batch(self, mock_post, driver): event = MockEvent() driver.try_publish_event_payload(event.to_dict()) + time.sleep(1) # Happens asynchronously, so need to wait for it to finish mock_post.assert_called_with( url="https://cloud123.griptape.ai/api/structure-runs/bar baz/events", json=driver._get_event_request(event.to_dict()), diff --git a/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py b/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py index 2240dee58..efeb14dc6 100644 --- a/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py +++ b/tests/unit/drivers/file_manager/test_amazon_s3_file_manager_driver.py @@ -207,7 +207,7 @@ def test_save_file(self, workdir, path, content, driver, get_s3_value): result = driver.save_file(path, content) assert isinstance(result, InfoArtifact) - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") expected_s3_key = f"{workdir}/{path}".lstrip("/") content_str = content if isinstance(content, str) else content.decode() assert get_s3_value(expected_s3_key) == content_str @@ -245,7 +245,7 @@ def test_save_file_with_encoding(self, session, bucket, get_s3_value): expected_s3_key = f"{workdir}/{path}".lstrip("/") assert get_s3_value(expected_s3_key) == "foobar" - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") def test_save_and_load_file_with_encoding(self, session, bucket, get_s3_value): workdir = "/sub-folder" @@ -256,7 +256,7 @@ def test_save_and_load_file_with_encoding(self, session, bucket, get_s3_value): expected_s3_key = f"{workdir}/{path}".lstrip("/") assert get_s3_value(expected_s3_key) == "foobar" - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") driver = AmazonS3FileManagerDriver(session=session, bucket=bucket, encoding="ascii", workdir=workdir) path = "test/foobar.txt" diff --git a/tests/unit/drivers/file_manager/test_base_file_manager_driver.py b/tests/unit/drivers/file_manager/test_base_file_manager_driver.py new file mode 100644 index 000000000..41bda51df --- /dev/null +++ b/tests/unit/drivers/file_manager/test_base_file_manager_driver.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +import pytest + +from griptape.artifacts import BaseArtifact, TextArtifact +from griptape.drivers import BaseFileManagerDriver + + +class MockFileManagerDriver(BaseFileManagerDriver): + def try_list_files(self, path: str) -> list[str]: + return ["foo", "bar"] + + def try_save_file(self, path: str, value: bytes) -> str: + assert path == "foo" + assert BaseArtifact.from_json(value.decode()).value == TextArtifact(value="value").value + + return "mock_save_location" + + def try_load_file(self, path: str) -> bytes: + assert path == "foo" + + return TextArtifact(value="value").to_json().encode() + + +class TestBaseFileManagerDriver: + @pytest.fixture() + def driver(self): + return MockFileManagerDriver(workdir="/") + + def test_load_artifact(self, driver): + response = driver.load_artifact("foo") + + assert response.value == "value" + + def test_save_artifact(self, driver): + response = driver.save_artifact("foo", TextArtifact(value="value")) + + assert response.value == "Successfully saved artifact at: mock_save_location" diff --git a/tests/unit/drivers/file_manager/test_griptape_cloud_file_manager_driver.py b/tests/unit/drivers/file_manager/test_griptape_cloud_file_manager_driver.py new file mode 100644 index 000000000..0ce837dc1 --- /dev/null +++ b/tests/unit/drivers/file_manager/test_griptape_cloud_file_manager_driver.py @@ -0,0 +1,192 @@ +from unittest import mock + +import pytest +import requests +from azure.core.exceptions import ResourceNotFoundError + + +class TestGriptapeCloudFileManagerDriver: + @pytest.fixture() + def driver(self, mocker): + from griptape.drivers import GriptapeCloudFileManagerDriver + + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {} + mocker.patch("requests.request", return_value=mock_response) + + return GriptapeCloudFileManagerDriver(base_url="https://api.griptape.ai", api_key="foo bar", bucket_id="1") + + def test_instantiate_bucket_id(self, mocker): + from griptape.drivers import GriptapeCloudFileManagerDriver + + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {} + mocker.patch("requests.request", return_value=mock_response) + + GriptapeCloudFileManagerDriver(base_url="https://api.griptape.ai", api_key="foo bar", bucket_id="1") + + def test_instantiate_no_bucket_id(self): + from griptape.drivers import GriptapeCloudFileManagerDriver + + with pytest.raises(ValueError, match="GriptapeCloudFileManagerDriver requires an Bucket ID"): + GriptapeCloudFileManagerDriver(api_key="foo bar") + + def test_instantiate_bucket_not_found(self, mocker): + from griptape.drivers import GriptapeCloudFileManagerDriver + + mocker.patch("requests.request", side_effect=requests.exceptions.HTTPError(response=mock.Mock(status_code=404))) + + with pytest.raises(ValueError, match="No Bucket found with ID: 1"): + return GriptapeCloudFileManagerDriver(api_key="foo bar", bucket_id="1") + + def test_instantiate_bucket_500(self, mocker): + from griptape.drivers import GriptapeCloudFileManagerDriver + + mocker.patch("requests.request", side_effect=requests.exceptions.HTTPError(response=mock.Mock(status_code=500))) + + with pytest.raises(ValueError, match="Unexpected error when retrieving Bucket with ID: 1"): + return GriptapeCloudFileManagerDriver(api_key="foo bar", bucket_id="1") + + def test_instantiate_no_api_key(self): + from griptape.drivers import GriptapeCloudFileManagerDriver + + with pytest.raises(ValueError, match="GriptapeCloudFileManagerDriver requires an API key"): + GriptapeCloudFileManagerDriver(bucket_id="1") + + def test_instantiate_invalid_work_dir(self): + from griptape.drivers import GriptapeCloudFileManagerDriver + + with pytest.raises( + ValueError, + match="GriptapeCloudFileManagerDriver requires 'workdir' to be an absolute path, starting with `/`", + ): + GriptapeCloudFileManagerDriver(api_key="foo bar", bucket_id="1", workdir="no_slash") + + def test_try_list_files(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"assets": [{"name": "foo/bar.pdf"}, {"name": "foo/baz.pdf"}]} + mocker.patch("requests.request", return_value=mock_response) + + files = driver.try_list_files("foo/") + + assert len(files) == 2 + assert files[0] == "foo/bar.pdf" + assert files[1] == "foo/baz.pdf" + + def test_try_list_files_postfix(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"assets": [{"name": "foo/bar.pdf"}, {"name": "foo/baz.pdf"}]} + mocker.patch("requests.request", return_value=mock_response) + + files = driver.try_list_files("foo/", ".pdf") + + assert len(files) == 2 + assert files[0] == "foo/bar.pdf" + assert files[1] == "foo/baz.pdf" + + def test_try_list_files_not_directory(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"assets": [{"name": "foo/bar"}, {"name": "foo/baz"}]} + mocker.patch("requests.request", return_value=mock_response) + + with pytest.raises(NotADirectoryError): + driver.try_list_files("foo") + + def test_try_load_file(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"url": "https://foo.bar"} + mocker.patch("requests.request", return_value=mock_response) + + mock_bytes = b"bytes" + mock_blob_client = mocker.Mock() + mock_blob_client.download_blob.return_value.readall.return_value = mock_bytes + mocker.patch("azure.storage.blob.BlobClient.from_blob_url", return_value=mock_blob_client) + + response = driver.try_load_file("foo") + + assert response == mock_bytes + + def test_try_load_file_directory(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"url": "https://foo.bar"} + mocker.patch("requests.request", return_value=mock_response) + + with pytest.raises(IsADirectoryError): + driver.try_load_file("foo/") + + def test_try_load_file_sas_404(self, mocker, driver): + mocker.patch("requests.request", side_effect=requests.exceptions.HTTPError(response=mock.Mock(status_code=404))) + + with pytest.raises(FileNotFoundError): + driver.try_load_file("foo") + + def test_try_load_file_sas_500(self, mocker, driver): + mocker.patch("requests.request", side_effect=requests.exceptions.HTTPError(response=mock.Mock(status_code=500))) + + with pytest.raises(requests.exceptions.HTTPError): + driver.try_load_file("foo") + + def test_try_load_file_blob_404(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"url": "https://foo.bar"} + mocker.patch("requests.request", return_value=mock_response) + + mock_blob_client = mocker.Mock() + mock_blob_client.download_blob.side_effect = ResourceNotFoundError() + mocker.patch("azure.storage.blob.BlobClient.from_blob_url", return_value=mock_blob_client) + + with pytest.raises(FileNotFoundError): + driver.try_load_file("foo") + + def test_try_save_files(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"url": "https://foo.bar"} + mocker.patch("requests.request", return_value=mock_response) + + mock_blob_client = mocker.Mock() + mocker.patch("azure.storage.blob.BlobClient.from_blob_url", return_value=mock_blob_client) + + response = driver.try_save_file("foo", b"value") + + assert response == "buckets/1/assets/foo" + + def test_try_save_file_directory(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"url": "https://foo.bar"} + mocker.patch("requests.request", return_value=mock_response) + + with pytest.raises(IsADirectoryError): + driver.try_save_file("foo/", b"value") + + def test_try_save_file_sas_404(self, mocker, driver): + mock_response = mocker.Mock() + mock_response.json.return_value = {"url": "https://foo.bar"} + mock_response.raise_for_status.side_effect = [ + requests.exceptions.HTTPError(response=mock.Mock(status_code=404)), + None, + None, + ] + mocker.patch("requests.request", return_value=mock_response) + + mock_blob_client = mocker.Mock() + mocker.patch("azure.storage.blob.BlobClient.from_blob_url", return_value=mock_blob_client) + + response = driver.try_save_file("foo", b"value") + + assert response == "buckets/1/assets/foo" + + def test_try_save_file_sas_500(self, mocker, driver): + mocker.patch("requests.request", side_effect=requests.exceptions.HTTPError(response=mock.Mock(status_code=500))) + + with pytest.raises(requests.exceptions.HTTPError): + driver.try_save_file("foo", b"value") diff --git a/tests/unit/drivers/file_manager/test_local_file_manager_driver.py b/tests/unit/drivers/file_manager/test_local_file_manager_driver.py index 99f0285bc..b772941b8 100644 --- a/tests/unit/drivers/file_manager/test_local_file_manager_driver.py +++ b/tests/unit/drivers/file_manager/test_local_file_manager_driver.py @@ -176,7 +176,7 @@ def test_save_file(self, workdir, path, content, temp_dir, driver): result = driver.save_file(path, content) assert isinstance(result, InfoArtifact) - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") content_bytes = content if isinstance(content, str) else content.decode() assert Path(driver.workdir, path).read_text() == content_bytes @@ -210,14 +210,14 @@ def test_save_file_with_encoding(self, temp_dir): result = driver.save_file(os.path.join("test", "foobar.txt"), "foobar") assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") def test_save_and_load_file_with_encoding(self, temp_dir): driver = LocalFileManagerDriver(encoding="ascii", workdir=temp_dir) result = driver.save_file(os.path.join("test", "foobar.txt"), "foobar") assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") driver = LocalFileManagerDriver(encoding="ascii", workdir=temp_dir) result = driver.load_file(os.path.join("test", "foobar.txt")) diff --git a/tests/unit/drivers/image_generation/test_base_image_generation_driver.py b/tests/unit/drivers/image_generation/test_base_image_generation_driver.py index 96b615a58..0545f6c83 100644 --- a/tests/unit/drivers/image_generation/test_base_image_generation_driver.py +++ b/tests/unit/drivers/image_generation/test_base_image_generation_driver.py @@ -15,7 +15,7 @@ def driver(self): def test_run_text_to_image_publish_events(self, driver): mock_handler = Mock() - EventBus.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(on_event=mock_handler)) driver.run_text_to_image( ["foo", "bar"], @@ -31,7 +31,7 @@ def test_run_text_to_image_publish_events(self, driver): def test_run_image_variation_publish_events(self, driver): mock_handler = Mock() - EventBus.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(on_event=mock_handler)) driver.run_image_variation( ["foo", "bar"], @@ -53,7 +53,7 @@ def test_run_image_variation_publish_events(self, driver): def test_run_image_image_inpainting_publish_events(self, driver): mock_handler = Mock() - EventBus.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(on_event=mock_handler)) driver.run_image_inpainting( ["foo", "bar"], @@ -81,7 +81,7 @@ def test_run_image_image_inpainting_publish_events(self, driver): def test_run_image_image_outpainting_publish_events(self, driver): mock_handler = Mock() - EventBus.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(on_event=mock_handler)) driver.run_image_outpainting( ["foo", "bar"], diff --git a/tests/unit/drivers/image_query/test_base_image_query_driver.py b/tests/unit/drivers/image_query/test_base_image_query_driver.py index a77fb268e..652ee11c5 100644 --- a/tests/unit/drivers/image_query/test_base_image_query_driver.py +++ b/tests/unit/drivers/image_query/test_base_image_query_driver.py @@ -13,7 +13,7 @@ def driver(self): def test_query_publishes_events(self, driver): mock_handler = Mock() - EventBus.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(on_event=mock_handler)) driver.query("foo", []) diff --git a/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py index 0c76d6ecd..2c376ef4d 100644 --- a/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py @@ -25,7 +25,7 @@ def request(*args, **kwargs): "message_id": f"{thread_id}_message", "input": '{"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}', "output": '{"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}', - "metadata": {"run_id": "1234"}, + "metadata": {"run_id": "1234"} if thread_id != "no_meta" else {}, } ] } @@ -118,3 +118,9 @@ def test_load(self, driver): assert len(runs) == 1 assert runs[0].id == "1234" assert metadata == {"foo": "bar"} + + def test_load_no_message_meta(self, driver): + driver.thread_id = "no_meta" + runs, metadata = driver.load() + assert len(runs) == 1 + assert metadata == {"foo": "bar"} diff --git a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py index 40b0a8a0e..6d0dd757e 100644 --- a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py @@ -359,7 +359,9 @@ def messages(self): @pytest.mark.parametrize("use_native_tools", [True, False]) def test_try_run(self, mock_converse, prompt_stack, messages, use_native_tools): # Given - driver = AmazonBedrockPromptDriver(model="ai21.j2", use_native_tools=use_native_tools) + driver = AmazonBedrockPromptDriver( + model="ai21.j2", use_native_tools=use_native_tools, extra_params={"foo": "bar"} + ) # When message = driver.try_run(prompt_stack) @@ -376,6 +378,7 @@ def test_try_run(self, mock_converse, prompt_stack, messages, use_native_tools): if use_native_tools else {} ), + foo="bar", ) assert isinstance(message.value[0], TextArtifact) assert message.value[0].value == "model-output" @@ -390,7 +393,9 @@ def test_try_run(self, mock_converse, prompt_stack, messages, use_native_tools): @pytest.mark.parametrize("use_native_tools", [True, False]) def test_try_stream_run(self, mock_converse_stream, prompt_stack, messages, use_native_tools): # Given - driver = AmazonBedrockPromptDriver(model="ai21.j2", stream=True, use_native_tools=use_native_tools) + driver = AmazonBedrockPromptDriver( + model="ai21.j2", stream=True, use_native_tools=use_native_tools, extra_params={"foo": "bar"} + ) # When stream = driver.try_stream(prompt_stack) @@ -408,6 +413,7 @@ def test_try_stream_run(self, mock_converse_stream, prompt_stack, messages, use_ if prompt_stack.tools and use_native_tools else {} ), + foo="bar", ) event = next(stream) diff --git a/tests/unit/drivers/prompt/test_amazon_sagemaker_jumpstart_prompt_driver.py b/tests/unit/drivers/prompt/test_amazon_sagemaker_jumpstart_prompt_driver.py index c894524f5..c7b0682c2 100644 --- a/tests/unit/drivers/prompt/test_amazon_sagemaker_jumpstart_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_amazon_sagemaker_jumpstart_prompt_driver.py @@ -37,7 +37,7 @@ def test_init(self): def test_try_run(self, mock_client): # Given - driver = AmazonSageMakerJumpstartPromptDriver(endpoint="model", model="model") + driver = AmazonSageMakerJumpstartPromptDriver(endpoint="model", model="model", extra_params={"foo": "bar"}) prompt_stack = PromptStack() prompt_stack.add_user_message("prompt-stack") @@ -61,6 +61,7 @@ def test_try_run(self, mock_client): "eos_token_id": 1, "stop_strings": [], "return_full_text": False, + "foo": "bar", }, } ), @@ -91,6 +92,7 @@ def test_try_run(self, mock_client): "eos_token_id": 1, "stop_strings": [], "return_full_text": False, + "foo": "bar", }, } ), diff --git a/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py b/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py index 40c983f7d..2b84b5a17 100644 --- a/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py @@ -345,7 +345,9 @@ def test_init(self): @pytest.mark.parametrize("use_native_tools", [True, False]) def test_try_run(self, mock_client, prompt_stack, messages, use_native_tools): # Given - driver = AnthropicPromptDriver(model="claude-3-haiku", api_key="api-key", use_native_tools=use_native_tools) + driver = AnthropicPromptDriver( + model="claude-3-haiku", api_key="api-key", use_native_tools=use_native_tools, extra_params={"foo": "bar"} + ) # When message = driver.try_run(prompt_stack) @@ -361,6 +363,7 @@ def test_try_run(self, mock_client, prompt_stack, messages, use_native_tools): top_k=250, **{"system": "system-input"} if prompt_stack.system_messages else {}, **{"tools": self.ANTHROPIC_TOOLS, "tool_choice": driver.tool_choice} if use_native_tools else {}, + foo="bar", ) assert isinstance(message.value[0], TextArtifact) assert message.value[0].value == "model-output" @@ -376,7 +379,11 @@ def test_try_run(self, mock_client, prompt_stack, messages, use_native_tools): def test_try_stream_run(self, mock_stream_client, prompt_stack, messages, use_native_tools): # Given driver = AnthropicPromptDriver( - model="claude-3-haiku", api_key="api-key", stream=True, use_native_tools=use_native_tools + model="claude-3-haiku", + api_key="api-key", + stream=True, + use_native_tools=use_native_tools, + extra_params={"foo": "bar"}, ) # When @@ -395,6 +402,7 @@ def test_try_stream_run(self, mock_stream_client, prompt_stack, messages, use_na top_k=250, **{"system": "system-input"} if prompt_stack.system_messages else {}, **{"tools": self.ANTHROPIC_TOOLS, "tool_choice": driver.tool_choice} if use_native_tools else {}, + foo="bar", ) assert event.usage.input_tokens == 5 diff --git a/tests/unit/drivers/prompt/test_azure_openai_chat_prompt_driver.py b/tests/unit/drivers/prompt/test_azure_openai_chat_prompt_driver.py index dc0b54b0a..f9ac6bd59 100644 --- a/tests/unit/drivers/prompt/test_azure_openai_chat_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_azure_openai_chat_prompt_driver.py @@ -74,6 +74,7 @@ def test_try_run(self, mock_chat_completion_create, prompt_stack, messages, use_ azure_deployment="deployment-id", model="gpt-4", use_native_tools=use_native_tools, + extra_params={"foo": "bar"}, ) # When @@ -86,6 +87,7 @@ def test_try_run(self, mock_chat_completion_create, prompt_stack, messages, use_ user=driver.user, messages=messages, **{"tools": self.OPENAI_TOOLS, "tool_choice": driver.tool_choice} if use_native_tools else {}, + foo="bar", ) assert isinstance(message.value[0], TextArtifact) assert message.value[0].value == "model-output" @@ -104,6 +106,7 @@ def test_try_stream_run(self, mock_chat_completion_stream_create, prompt_stack, model="gpt-4", stream=True, use_native_tools=use_native_tools, + extra_params={"foo": "bar"}, ) # When @@ -118,6 +121,7 @@ def test_try_stream_run(self, mock_chat_completion_stream_create, prompt_stack, stream=True, messages=messages, **{"tools": self.OPENAI_TOOLS, "tool_choice": driver.tool_choice} if use_native_tools else {}, + foo="bar", ) assert isinstance(event.content, TextDeltaMessageContent) diff --git a/tests/unit/drivers/prompt/test_cohere_prompt_driver.py b/tests/unit/drivers/prompt/test_cohere_prompt_driver.py index e110d9469..a42a899f1 100644 --- a/tests/unit/drivers/prompt/test_cohere_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_cohere_prompt_driver.py @@ -136,7 +136,9 @@ def test_init(self): @pytest.mark.parametrize("use_native_tools", [True, False]) def test_try_run(self, mock_client, prompt_stack, use_native_tools): # Given - driver = CoherePromptDriver(model="command", api_key="api-key", use_native_tools=use_native_tools) + driver = CoherePromptDriver( + model="command", api_key="api-key", use_native_tools=use_native_tools, extra_params={"foo": "bar"} + ) # When message = driver.try_run(prompt_stack) @@ -171,6 +173,7 @@ def test_try_run(self, mock_client, prompt_stack, use_native_tools): ], stop_sequences=[], temperature=0.1, + foo="bar", ) assert isinstance(message.value[0], TextArtifact) @@ -187,7 +190,13 @@ def test_try_run(self, mock_client, prompt_stack, use_native_tools): @pytest.mark.parametrize("use_native_tools", [True, False]) def test_try_stream_run(self, mock_stream_client, prompt_stack, use_native_tools): # Given - driver = CoherePromptDriver(model="command", api_key="api-key", stream=True, use_native_tools=use_native_tools) + driver = CoherePromptDriver( + model="command", + api_key="api-key", + stream=True, + use_native_tools=use_native_tools, + extra_params={"foo": "bar"}, + ) # When stream = driver.try_stream(prompt_stack) @@ -223,6 +232,7 @@ def test_try_stream_run(self, mock_stream_client, prompt_stack, use_native_tools ], stop_sequences=[], temperature=0.1, + foo="bar", ) assert isinstance(event.content, TextDeltaMessageContent) diff --git a/tests/unit/drivers/prompt/test_google_prompt_driver.py b/tests/unit/drivers/prompt/test_google_prompt_driver.py index 776664eb1..72cf51d03 100644 --- a/tests/unit/drivers/prompt/test_google_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_google_prompt_driver.py @@ -169,7 +169,12 @@ def test_init(self): def test_try_run(self, mock_generative_model, prompt_stack, messages, use_native_tools): # Given driver = GooglePromptDriver( - model="gemini-pro", api_key="api-key", top_p=0.5, top_k=50, use_native_tools=use_native_tools + model="gemini-pro", + api_key="api-key", + top_p=0.5, + top_k=50, + use_native_tools=use_native_tools, + extra_params={"max_output_tokens": 10}, ) # When @@ -185,7 +190,9 @@ def test_try_run(self, mock_generative_model, prompt_stack, messages, use_native call_args = mock_generative_model.return_value.generate_content.call_args assert messages == call_args.args[0] generation_config = call_args.kwargs["generation_config"] - assert generation_config == GenerationConfig(temperature=0.1, top_p=0.5, top_k=50, stop_sequences=[]) + assert generation_config == GenerationConfig( + temperature=0.1, top_p=0.5, top_k=50, stop_sequences=[], max_output_tokens=10 + ) if use_native_tools: tool_declarations = call_args.kwargs["tools"] assert [ @@ -206,7 +213,13 @@ def test_try_run(self, mock_generative_model, prompt_stack, messages, use_native def test_try_stream(self, mock_stream_generative_model, prompt_stack, messages, use_native_tools): # Given driver = GooglePromptDriver( - model="gemini-pro", api_key="api-key", stream=True, top_p=0.5, top_k=50, use_native_tools=use_native_tools + model="gemini-pro", + api_key="api-key", + stream=True, + top_p=0.5, + top_k=50, + use_native_tools=use_native_tools, + extra_params={"max_output_tokens": 10}, ) # When @@ -225,7 +238,7 @@ def test_try_stream(self, mock_stream_generative_model, prompt_stack, messages, assert messages == call_args.args[0] assert call_args.kwargs["stream"] is True assert call_args.kwargs["generation_config"] == GenerationConfig( - temperature=0.1, top_p=0.5, top_k=50, stop_sequences=[] + temperature=0.1, top_p=0.5, top_k=50, stop_sequences=[], max_output_tokens=10 ) if use_native_tools: tool_declarations = call_args.kwargs["tools"] diff --git a/tests/unit/drivers/prompt/test_hugging_face_hub_prompt_driver.py b/tests/unit/drivers/prompt/test_hugging_face_hub_prompt_driver.py index 1a4e1b25b..4b7aa4d13 100644 --- a/tests/unit/drivers/prompt/test_hugging_face_hub_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_hugging_face_hub_prompt_driver.py @@ -47,25 +47,40 @@ def test_init(self): def test_try_run(self, prompt_stack, mock_client): # Given - driver = HuggingFaceHubPromptDriver(api_token="api-token", model="repo-id") + driver = HuggingFaceHubPromptDriver(api_token="api-token", model="repo-id", extra_params={"foo": "bar"}) # When message = driver.try_run(prompt_stack) # Then + mock_client.text_generation.assert_called_once_with( + "foo\n\nUser: bar", + return_full_text=False, + max_new_tokens=250, + foo="bar", + ) assert message.value == "model-output" assert message.usage.input_tokens == 3 assert message.usage.output_tokens == 3 def test_try_stream(self, prompt_stack, mock_client_stream): # Given - driver = HuggingFaceHubPromptDriver(api_token="api-token", model="repo-id", stream=True) + driver = HuggingFaceHubPromptDriver( + api_token="api-token", model="repo-id", stream=True, extra_params={"foo": "bar"} + ) # When stream = driver.try_stream(prompt_stack) event = next(stream) # Then + mock_client_stream.text_generation.assert_called_once_with( + "foo\n\nUser: bar", + return_full_text=False, + max_new_tokens=250, + foo="bar", + stream=True, + ) assert isinstance(event.content, TextDeltaMessageContent) assert event.content.text == "model-output" diff --git a/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py b/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py index 0ece6c976..e3c99f402 100644 --- a/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py @@ -10,11 +10,11 @@ def mock_pipeline(self, mocker): return mocker.patch("transformers.pipeline") @pytest.fixture(autouse=True) - def mock_generator(self, mock_pipeline): - mock_generator = mock_pipeline.return_value - mock_generator.task = "text-generation" - mock_generator.return_value = [{"generated_text": [{"content": "model-output"}]}] - return mock_generator + def mock_provider(self, mock_pipeline): + mock_provider = mock_pipeline.return_value + mock_provider.task = "text-generation" + mock_provider.return_value = [{"generated_text": [{"content": "model-output"}]}] + return mock_provider @pytest.fixture(autouse=True) def mock_autotokenizer(self, mocker): @@ -33,17 +33,28 @@ def prompt_stack(self): prompt_stack.add_assistant_message("assistant-input") return prompt_stack + @pytest.fixture() + def messages(self): + return [ + {"role": "system", "content": "system-input"}, + {"role": "user", "content": "user-input"}, + {"role": "assistant", "content": "assistant-input"}, + ] + def test_init(self): assert HuggingFacePipelinePromptDriver(model="gpt2", max_tokens=42) - def test_try_run(self, prompt_stack): + def test_try_run(self, prompt_stack, messages, mock_pipeline): # Given - driver = HuggingFacePipelinePromptDriver(model="foo", max_tokens=42) + driver = HuggingFacePipelinePromptDriver(model="foo", max_tokens=42, extra_params={"foo": "bar"}) # When message = driver.try_run(prompt_stack) # Then + mock_pipeline.return_value.assert_called_once_with( + messages, max_new_tokens=42, temperature=0.1, do_sample=True, foo="bar" + ) assert message.value == "model-output" assert message.usage.input_tokens == 3 assert message.usage.output_tokens == 3 @@ -59,10 +70,10 @@ def test_try_stream(self, prompt_stack): assert e.value.args[0] == "streaming is not supported" @pytest.mark.parametrize("choices", [[], [1, 2]]) - def test_try_run_throws_when_multiple_choices_returned(self, choices, mock_generator, prompt_stack): + def test_try_run_throws_when_multiple_choices_returned(self, choices, mock_provider, prompt_stack): # Given driver = HuggingFacePipelinePromptDriver(model="foo", max_tokens=42) - mock_generator.return_value = choices + mock_provider.return_value = choices # When with pytest.raises(Exception) as e: @@ -71,10 +82,10 @@ def test_try_run_throws_when_multiple_choices_returned(self, choices, mock_gener # Then assert e.value.args[0] == "completion with more than one choice is not supported yet" - def test_try_run_throws_when_non_list(self, mock_generator, prompt_stack): + def test_try_run_throws_when_non_list(self, mock_provider, prompt_stack): # Given driver = HuggingFacePipelinePromptDriver(model="foo", max_tokens=42) - mock_generator.return_value = {} + mock_provider.return_value = {} # When with pytest.raises(Exception) as e: diff --git a/tests/unit/drivers/prompt/test_ollama_prompt_driver.py b/tests/unit/drivers/prompt/test_ollama_prompt_driver.py index e4e9c4712..1ee075809 100644 --- a/tests/unit/drivers/prompt/test_ollama_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_ollama_prompt_driver.py @@ -205,7 +205,7 @@ def test_init(self): @pytest.mark.parametrize("use_native_tools", [True]) def test_try_run(self, mock_client, prompt_stack, messages, use_native_tools): # Given - driver = OllamaPromptDriver(model="llama") + driver = OllamaPromptDriver(model="llama", extra_params={"foo": "bar"}) # When message = driver.try_run(prompt_stack) @@ -220,6 +220,7 @@ def test_try_run(self, mock_client, prompt_stack, messages, use_native_tools): "num_predict": driver.max_tokens, }, **{"tools": self.OLLAMA_TOOLS} if use_native_tools else {}, + foo="bar", ) assert isinstance(message.value[0], TextArtifact) assert message.value[0].value == "model-output" @@ -256,7 +257,7 @@ def test_try_stream_run(self, mock_stream_client): {"role": "user", "content": "user-input", "images": ["aW1hZ2UtZGF0YQ=="]}, {"role": "assistant", "content": "assistant-input"}, ] - driver = OllamaPromptDriver(model="llama", stream=True) + driver = OllamaPromptDriver(model="llama", stream=True, extra_params={"foo": "bar"}) # When text_artifact = next(driver.try_stream(prompt_stack)) @@ -267,6 +268,7 @@ def test_try_stream_run(self, mock_stream_client): model=driver.model, options={"temperature": driver.temperature, "stop": [], "num_predict": driver.max_tokens}, stream=True, + foo="bar", ) if isinstance(text_artifact, TextDeltaMessageContent): assert text_artifact.text == "model-output" diff --git a/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py b/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py index 26358e132..f61df782e 100644 --- a/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py @@ -251,7 +251,6 @@ def prompt_stack(self): prompt_stack.add_assistant_message( ListArtifact( [ - TextArtifact("thought"), ActionArtifact(ToolAction(tag="MockTool_test", name="MockTool", path="test", input={"foo": "bar"})), ] ) @@ -288,7 +287,7 @@ def messages(self): }, {"role": "assistant", "content": "assistant-input"}, { - "content": [{"text": "thought", "type": "text"}], + "content": "", "role": "assistant", "tool_calls": [ { @@ -344,7 +343,9 @@ def test_init(self): def test_try_run(self, mock_chat_completion_create, prompt_stack, messages, use_native_tools): # Given driver = OpenAiChatPromptDriver( - model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, use_native_tools=use_native_tools + model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, + use_native_tools=use_native_tools, + extra_params={"foo": "bar"}, ) # When @@ -358,6 +359,7 @@ def test_try_run(self, mock_chat_completion_create, prompt_stack, messages, use_ messages=messages, seed=driver.seed, **{"tools": self.OPENAI_TOOLS, "tool_choice": driver.tool_choice} if use_native_tools else {}, + foo="bar", ) assert isinstance(message.value[0], TextArtifact) assert message.value[0].value == "model-output" @@ -440,7 +442,10 @@ def test_try_run_response_format_json_schema(self, mock_chat_completion_create, def test_try_stream_run(self, mock_chat_completion_stream_create, prompt_stack, messages, use_native_tools): # Given driver = OpenAiChatPromptDriver( - model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, stream=True, use_native_tools=use_native_tools + model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, + stream=True, + use_native_tools=use_native_tools, + extra_params={"foo": "bar"}, ) # When @@ -457,6 +462,7 @@ def test_try_stream_run(self, mock_chat_completion_stream_create, prompt_stack, seed=driver.seed, stream_options={"include_usage": True}, **{"tools": self.OPENAI_TOOLS, "tool_choice": driver.tool_choice} if use_native_tools else {}, + foo="bar", ) assert isinstance(event.content, TextDeltaMessageContent) diff --git a/tests/unit/drivers/sql/test_snowflake_sql_driver.py b/tests/unit/drivers/sql/test_snowflake_sql_driver.py index a758bb3a2..b13efbad3 100644 --- a/tests/unit/drivers/sql/test_snowflake_sql_driver.py +++ b/tests/unit/drivers/sql/test_snowflake_sql_driver.py @@ -63,32 +63,32 @@ def driver(self, mock_snowflake_engine, mock_snowflake_connection): def get_connection(): return mock_snowflake_connection - return SnowflakeSqlDriver(connection_func=get_connection, engine=mock_snowflake_engine) + return SnowflakeSqlDriver(get_connection=get_connection, engine=mock_snowflake_engine) - def test_connection_function_wrong_return_type(self): + def test_get_connectiontion_wrong_return_type(self): def get_connection() -> Any: return object with pytest.raises(ValueError): - SnowflakeSqlDriver(connection_func=get_connection) + SnowflakeSqlDriver(get_connection=get_connection) def test_connection_validation_no_schema(self, mock_snowflake_connection_no_schema): def get_connection(): return mock_snowflake_connection_no_schema with pytest.raises(ValueError): - SnowflakeSqlDriver(connection_func=get_connection) + SnowflakeSqlDriver(get_connection=get_connection) def test_connection_validation_no_database(self, mock_snowflake_connection_no_database): def get_connection(): return mock_snowflake_connection_no_database with pytest.raises(ValueError): - SnowflakeSqlDriver(connection_func=get_connection) + SnowflakeSqlDriver(get_connection=get_connection) def test_engine_url_validation_wrong_engine(self, mock_snowflake_connection): with pytest.raises(ValueError): - SnowflakeSqlDriver(connection_func=mock_snowflake_connection, engine=create_engine("sqlite:///:memory:")) + SnowflakeSqlDriver(get_connection=mock_snowflake_connection, engine=create_engine("sqlite:///:memory:")) def test_execute_query(self, driver): assert driver.execute_query("query") == [ diff --git a/tests/unit/drivers/structure_run/test_local_structure_run_driver.py b/tests/unit/drivers/structure_run/test_local_structure_run_driver.py index 2dd68e24e..4be4caf77 100644 --- a/tests/unit/drivers/structure_run/test_local_structure_run_driver.py +++ b/tests/unit/drivers/structure_run/test_local_structure_run_driver.py @@ -9,7 +9,7 @@ class TestLocalStructureRunDriver: def test_run(self): pipeline = Pipeline() - driver = LocalStructureRunDriver(structure_factory_fn=lambda: Agent()) + driver = LocalStructureRunDriver(create_structure=lambda: Agent()) task = StructureRunTask(driver=driver) @@ -22,7 +22,7 @@ def test_run_with_env(self, mock_config): mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output=lambda _: os.environ["KEY"]) agent = Agent() - driver = LocalStructureRunDriver(structure_factory_fn=lambda: agent, env={"KEY": "value"}) + driver = LocalStructureRunDriver(create_structure=lambda: agent, env={"KEY": "value"}) task = StructureRunTask(driver=driver) pipeline.add_task(task) diff --git a/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py b/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py index ab448c7c1..099fbc1ae 100644 --- a/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py +++ b/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py @@ -13,7 +13,7 @@ def driver(self): def test_text_to_audio_publish_events(self, driver): mock_handler = Mock() - EventBus.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(on_event=mock_handler)) driver.run_text_to_audio( ["foo", "bar"], diff --git a/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py b/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py index 430f67ef9..e1d65457d 100644 --- a/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py +++ b/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py @@ -15,7 +15,7 @@ def test_run(self, module): assert module.run(RagContext(query="test")).value == "mock output" def test_prompt(self, module): - system_message = module.default_system_template_generator( + system_message = module.default_generate_system_template( RagContext(query="test", before_query=["*RULESET*", "*META*"]), artifacts=[ TextArtifact("*TEXT SEGMENT 1*", reference=Reference(title="source 1")), diff --git a/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py b/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py index cc8d35f0e..bf7f23fab 100644 --- a/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py +++ b/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py @@ -20,7 +20,7 @@ def test_run(self, module): assert module.run(RagContext(query="test")).value == "mock output" def test_prompt(self, module): - system_message = module.default_system_template_generator( + system_message = module.default_generate_system_template( RagContext(query="test"), artifacts=[TextArtifact("*TEXT SEGMENT 1*"), TextArtifact("*TEXT SEGMENT 2*")], ) diff --git a/tests/unit/events/test_action_chunk_event.py b/tests/unit/events/test_action_chunk_event.py new file mode 100644 index 000000000..6c242a475 --- /dev/null +++ b/tests/unit/events/test_action_chunk_event.py @@ -0,0 +1,38 @@ +import pytest + +from griptape.events import ActionChunkEvent + + +class TestCompletionChunkEvent: + TEST_PARAMS = [ + {"name": "foo", "tag": None, "path": None, "partial_input": None}, + {"name": "foo", "tag": "bar", "path": None, "partial_input": None}, + {"name": "foo", "tag": "bar", "path": "baz", "partial_input": None}, + {"name": "foo", "tag": None, "path": "baz", "partial_input": None}, + {"name": "foo", "tag": "bar", "path": "baz", "partial_input": "qux"}, + {"name": None, "tag": None, "path": None, "partial_input": "qux"}, + ] + + @pytest.fixture() + def action_chunk_event(self): + return ActionChunkEvent( + partial_input="foo bar", + tag="foo", + name="bar", + path="baz", + ) + + def test_token(self, action_chunk_event): + assert action_chunk_event.partial_input == "foo bar" + assert action_chunk_event.index == 0 + assert action_chunk_event.tag == "foo" + assert action_chunk_event.name == "bar" + assert action_chunk_event.path == "baz" + + def test_to_dict(self, action_chunk_event): + assert action_chunk_event.to_dict()["partial_input"] == "foo bar" + + @pytest.mark.parametrize("params", TEST_PARAMS) + def test_str(self, params): + event = ActionChunkEvent(**params) + assert str(event) == event.__str__() diff --git a/tests/unit/events/test_base_chunk_event.py b/tests/unit/events/test_base_chunk_event.py new file mode 100644 index 000000000..80cedf353 --- /dev/null +++ b/tests/unit/events/test_base_chunk_event.py @@ -0,0 +1,18 @@ +import pytest + +from tests.mocks.mock_chunk_event import MockChunkEvent + + +class TestBaseChunkEvent: + @pytest.fixture() + def base_chunk_event(self): + return MockChunkEvent(token="foo", index=1) + + def test_token(self, base_chunk_event): + assert base_chunk_event.index == 1 + assert base_chunk_event.token == "foo" + assert str(base_chunk_event) == "mock foo" + + def test_to_dict(self, base_chunk_event): + assert base_chunk_event.to_dict()["index"] == 1 + assert base_chunk_event.to_dict()["token"] == "foo" diff --git a/tests/unit/events/test_base_event.py b/tests/unit/events/test_base_event.py index 6ce010ee9..58535eaac 100644 --- a/tests/unit/events/test_base_event.py +++ b/tests/unit/events/test_base_event.py @@ -4,8 +4,8 @@ from griptape.artifacts.base_artifact import BaseArtifact from griptape.events import ( + ActionChunkEvent, BaseEvent, - CompletionChunkEvent, FinishActionsSubtaskEvent, FinishPromptEvent, FinishStructureRunEvent, @@ -14,6 +14,7 @@ StartPromptEvent, StartStructureRunEvent, StartTaskEvent, + TextChunkEvent, ) from tests.mocks.mock_event import MockEvent @@ -244,15 +245,38 @@ def test_finish_structure_run_event_from_dict(self): assert event.output_task_output.value == "bar" assert event.meta == {"foo": "bar"} - def test_completion_chunk_event_from_dict(self): - dict_value = {"type": "CompletionChunkEvent", "timestamp": 123.0, "token": "foo", "meta": {}} + def test_text_chunk_event_from_dict(self): + dict_value = {"type": "TextChunkEvent", "timestamp": 123.0, "token": "foo", "index": 0, "meta": {}} event = BaseEvent.from_dict(dict_value) - assert isinstance(event, CompletionChunkEvent) + assert isinstance(event, TextChunkEvent) + assert event.index == 0 assert event.token == "foo" assert event.meta == {} + def test_action_chunk_event_from_dict(self): + dict_value = { + "type": "ActionChunkEvent", + "timestamp": 123.0, + "partial_input": "foo", + "tag": None, + "index": 1, + "name": "bar", + "path": "foobar", + "meta": {}, + } + + event = BaseEvent.from_dict(dict_value) + + assert isinstance(event, ActionChunkEvent) + assert event.partial_input == "foo" + assert event.tag is None + assert event.index == 1 + assert event.name == "bar" + assert event.path == "foobar" + assert event.meta == {} + def test_unsupported_from_dict(self): dict_value = {"type": "foo", "value": "foobar"} with pytest.raises(ValueError): diff --git a/tests/unit/events/test_completion_chunk_event.py b/tests/unit/events/test_completion_chunk_event.py deleted file mode 100644 index 943ea483f..000000000 --- a/tests/unit/events/test_completion_chunk_event.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest - -from griptape.events import CompletionChunkEvent - - -class TestCompletionChunkEvent: - @pytest.fixture() - def completion_chunk_event(self): - return CompletionChunkEvent(token="foo bar") - - def test_token(self, completion_chunk_event): - assert completion_chunk_event.token == "foo bar" - - def test_to_dict(self, completion_chunk_event): - assert completion_chunk_event.to_dict()["token"] == "foo bar" diff --git a/tests/unit/events/test_event_bus.py b/tests/unit/events/test_event_bus.py index cc432dafb..7afe0b466 100644 --- a/tests/unit/events/test_event_bus.py +++ b/tests/unit/events/test_event_bus.py @@ -1,6 +1,9 @@ from unittest.mock import Mock from griptape.events import EventBus, EventListener +from griptape.events.finish_prompt_event import FinishPromptEvent +from griptape.events.start_prompt_event import StartPromptEvent +from griptape.utils import with_contextvars from tests.mocks.mock_event import MockEvent @@ -10,19 +13,28 @@ def test_init(self): assert _EventBus() is _EventBus() - def test_add_event_listeners(self): + def test_add_event_listeners_same(self): EventBus.add_event_listeners([EventListener(), EventListener()]) + assert len(EventBus.event_listeners) == 1 + + def test_add_event_listeners(self): + EventBus.add_event_listeners([EventListener(on_event=lambda e: e), EventListener()]) assert len(EventBus.event_listeners) == 2 def test_remove_event_listeners(self): - listeners = [EventListener(), EventListener()] + listeners = [EventListener(on_event=lambda e: e), EventListener()] EventBus.add_event_listeners(listeners) EventBus.remove_event_listeners(listeners) assert len(EventBus.event_listeners) == 0 - def test_add_event_listener(self): + def test_add_event_listener_same(self): EventBus.add_event_listener(EventListener()) EventBus.add_event_listener(EventListener()) + assert len(EventBus.event_listeners) == 1 + + def test_add_event_listener(self): + EventBus.add_event_listener(EventListener(on_event=lambda e: e)) + EventBus.add_event_listener(EventListener()) assert len(EventBus.event_listeners) == 2 @@ -40,7 +52,7 @@ def test_publish_event(self): # Given mock_handler = Mock() mock_handler.return_value = None - EventBus.add_event_listeners([EventListener(handler=mock_handler)]) + EventBus.add_event_listeners([EventListener(on_event=mock_handler)]) mock_event = MockEvent() # When @@ -48,3 +60,132 @@ def test_publish_event(self): # Then mock_handler.assert_called_once_with(mock_event) + + def test_context_manager(self): + e1 = EventListener() + EventBus.add_event_listeners([e1]) + + with EventListener(lambda e: e) as e2: + assert EventBus.event_listeners == [e1, e2] + + assert EventBus.event_listeners == [e1] + + def test_context_manager_multiple(self): + e1 = EventListener() + EventBus.add_event_listener(e1) + + with EventListener(lambda e: e) as e2, EventListener(lambda e: e) as e3: + assert EventBus.event_listeners == [e1, e2, e3] + + assert EventBus.event_listeners == [e1] + + def test_nested_context_manager(self): + e1 = EventListener() + EventBus.add_event_listener(e1) + + with EventListener(lambda e: e) as e2: + assert EventBus.event_listeners == [e1, e2] + with EventListener(lambda e: e) as e3: + assert EventBus.event_listeners == [e1, e2, e3] + assert EventBus.event_listeners == [e1, e2] + + assert EventBus.event_listeners == [e1] + + def test_thread_pool_with_context_vars(self): + from concurrent import futures + + e1 = EventListener(event_types=[StartPromptEvent]) + EventBus.add_event_listener(e1) + + def handler(_) -> None: + with EventListener(event_types=[FinishPromptEvent]) as e2: + assert EventBus.event_listeners == [e1, e2] + + with futures.ThreadPoolExecutor() as executor: + list(executor.map(with_contextvars(handler), range(10))) + + assert EventBus.event_listeners == [e1] + + def test_thread_pool_without_context_vars(self): + from concurrent import futures + + e1 = EventListener(event_types=[StartPromptEvent]) + EventBus.add_event_listener(e1) + + def handler(_) -> None: + with EventListener(event_types=[FinishPromptEvent]) as e2: + assert EventBus.event_listeners == [e2] + + with futures.ThreadPoolExecutor() as executor: + list(executor.map(handler, range(10))) + + assert EventBus.event_listeners == [e1] + + def test_thread_with_contextvars(self): + import threading + + e1 = EventListener(lambda e: e) + EventBus.add_event_listener(e1) + + def handler() -> None: + assert EventBus.event_listeners == [e1] + e2 = EventListener(lambda e: e) + EventBus.add_event_listener(e2) + assert EventBus.event_listeners == [e1, e2] + EventBus.remove_event_listener(e2) + assert EventBus.event_listeners == [e1] + EventBus.clear_event_listeners() + assert EventBus.event_listeners == [] + EventBus.add_event_listener(e2) + assert EventBus.event_listeners == [e2] + + for _ in range(10): + thread = threading.Thread(target=with_contextvars(handler)) + thread.start() + thread.join() + + assert EventBus.event_listeners == [e1] + + def test_thread_without_contextvars(self): + import threading + + e1 = EventListener(lambda e: e) + EventBus.add_event_listener(e1) + + def handler() -> None: + assert EventBus.event_listeners == [] + e2 = EventListener(lambda e: e) + EventBus.add_event_listener(e2) + assert EventBus.event_listeners == [e2] + EventBus.remove_event_listener(e2) + assert EventBus.event_listeners == [] + EventBus.clear_event_listeners() + assert EventBus.event_listeners == [] + EventBus.add_event_listener(e2) + + for _ in range(10): + thread = threading.Thread(target=handler) + thread.start() + thread.join() + + assert EventBus.event_listeners == [e1] + + def test_coroutine(self): + import asyncio + + e1 = EventListener(lambda e: e) + EventBus.add_event_listener(e1) + + async def handler() -> None: + e2 = EventListener(lambda e: e) + EventBus.add_event_listener(e2) + assert EventBus.event_listeners == [e1, e2] + EventBus.remove_event_listener(e2) + assert EventBus.event_listeners == [e1] + EventBus.clear_event_listeners() + assert EventBus.event_listeners == [] + EventBus.add_event_listener(e2) + + asyncio.run(handler()) + + assert EventBus.event_listeners == [e1] diff --git a/tests/unit/events/test_event_listener.py b/tests/unit/events/test_event_listener.py index f35bc5416..d26107bc6 100644 --- a/tests/unit/events/test_event_listener.py +++ b/tests/unit/events/test_event_listener.py @@ -3,7 +3,8 @@ import pytest from griptape.events import ( - CompletionChunkEvent, + ActionChunkEvent, + BaseChunkEvent, EventBus, EventListener, FinishActionsSubtaskEvent, @@ -14,11 +15,13 @@ StartPromptEvent, StartStructureRunEvent, StartTaskEvent, + TextChunkEvent, ) from griptape.events.base_event import BaseEvent from griptape.structures import Pipeline from griptape.tasks import ActionsSubtask, ToolkitTask from tests.mocks.mock_event import MockEvent +from tests.mocks.mock_event_listener_driver import MockEventListenerDriver from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool @@ -26,7 +29,7 @@ class TestEventListener: @pytest.fixture() def pipeline(self, mock_config): - mock_config.drivers_config.prompt_driver = MockPromptDriver(stream=True) + mock_config.drivers_config.prompt_driver = MockPromptDriver(stream=True, use_native_tools=True) task = ToolkitTask("test", tools=[MockTool(name="Tool1")]) pipeline = Pipeline() @@ -39,15 +42,15 @@ def test_untyped_listeners(self, pipeline, mock_config): event_handler_1 = Mock() event_handler_2 = Mock() - EventBus.add_event_listeners([EventListener(handler=event_handler_1), EventListener(handler=event_handler_2)]) + EventBus.add_event_listeners([EventListener(on_event=event_handler_1), EventListener(on_event=event_handler_2)]) # can't mock subtask events, so must manually call pipeline.tasks[0].subtasks[0].before_run() pipeline.tasks[0].subtasks[0].after_run() pipeline.run() - assert event_handler_1.call_count == 9 - assert event_handler_2.call_count == 9 + assert event_handler_1.call_count == 10 + assert event_handler_2.call_count == 10 def test_typed_listeners(self, pipeline, mock_config): start_prompt_event_handler = Mock() @@ -58,7 +61,9 @@ def test_typed_listeners(self, pipeline, mock_config): finish_subtask_event_handler = Mock() start_structure_run_event_handler = Mock() finish_structure_run_event_handler = Mock() - completion_chunk_handler = Mock() + base_chunk_handler = Mock() + text_chunk_handler = Mock() + action_chunk_handler = Mock() EventBus.add_event_listeners( [ @@ -70,7 +75,9 @@ def test_typed_listeners(self, pipeline, mock_config): EventListener(finish_subtask_event_handler, event_types=[FinishActionsSubtaskEvent]), EventListener(start_structure_run_event_handler, event_types=[StartStructureRunEvent]), EventListener(finish_structure_run_event_handler, event_types=[FinishStructureRunEvent]), - EventListener(completion_chunk_handler, event_types=[CompletionChunkEvent]), + EventListener(base_chunk_handler, event_types=[BaseChunkEvent]), + EventListener(text_chunk_handler, event_types=[TextChunkEvent]), + EventListener(action_chunk_handler, event_types=[ActionChunkEvent]), ] ) @@ -87,7 +94,12 @@ def test_typed_listeners(self, pipeline, mock_config): finish_subtask_event_handler.assert_called_once() start_structure_run_event_handler.assert_called_once() finish_structure_run_event_handler.assert_called_once() - completion_chunk_handler.assert_called_once() + assert base_chunk_handler.call_count == 2 + assert action_chunk_handler.call_count == 2 + + pipeline.tasks[0].prompt_driver.use_native_tools = False + pipeline.run() + text_chunk_handler.assert_called_once() def test_add_remove_event_listener(self, pipeline): EventBus.clear_event_listeners() @@ -110,7 +122,7 @@ def test_add_remove_event_listener(self, pipeline): EventBus.remove_event_listener(event_listener_5) assert len(EventBus.event_listeners) == 0 - def test_publish_event(self): + def test_drop_event(self): mock_event_listener_driver = Mock() mock_event_listener_driver.try_publish_event_payload.return_value = None @@ -118,10 +130,27 @@ def event_handler(_: BaseEvent) -> None: return None mock_event = MockEvent() - event_listener = EventListener(event_handler, driver=mock_event_listener_driver, event_types=[MockEvent]) + event_listener = EventListener( + event_handler, event_listener_driver=mock_event_listener_driver, event_types=[MockEvent] + ) event_listener.publish_event(mock_event) - mock_event_listener_driver.publish_event.assert_called_once_with(mock_event, flush=False) + mock_event_listener_driver.publish_event.assert_not_called() + + def test_publish_event(self): + mock_event_listener_driver = Mock() + mock_event_listener_driver.try_publish_event_payload.return_value = None + + def event_handler(e: BaseEvent) -> BaseEvent: + return e + + mock_event = MockEvent() + event_listener = EventListener( + event_handler, event_listener_driver=mock_event_listener_driver, event_types=[MockEvent] + ) + event_listener.publish_event(mock_event) + + mock_event_listener_driver.publish_event.assert_called_once_with(mock_event) def test_publish_transformed_event(self): mock_event_listener_driver = Mock() @@ -131,16 +160,18 @@ def event_handler(event: BaseEvent): return {"event": event.to_dict()} mock_event = MockEvent() - event_listener = EventListener(event_handler, driver=mock_event_listener_driver, event_types=[MockEvent]) + event_listener = EventListener( + event_handler, event_listener_driver=mock_event_listener_driver, event_types=[MockEvent] + ) event_listener.publish_event(mock_event) - mock_event_listener_driver.publish_event.assert_called_once_with({"event": mock_event.to_dict()}, flush=False) + mock_event_listener_driver.publish_event.assert_called_once_with({"event": mock_event.to_dict()}) def test_context_manager(self): e1 = EventListener() EventBus.add_event_listeners([e1]) - with EventListener() as e2: + with EventListener(lambda e: e) as e2: assert EventBus.event_listeners == [e1, e2] assert EventBus.event_listeners == [e1] @@ -149,7 +180,42 @@ def test_context_manager_multiple(self): e1 = EventListener() EventBus.add_event_listener(e1) - with EventListener() as e2, EventListener() as e3: + with EventListener(lambda e: e) as e2, EventListener(lambda e: e) as e3: assert EventBus.event_listeners == [e1, e2, e3] assert EventBus.event_listeners == [e1] + + def test_context_manager_nested(self): + e1 = EventListener() + EventBus.add_event_listener(e1) + + with EventListener(lambda e: e) as e2: + assert EventBus.event_listeners == [e1, e2] + with EventListener(lambda e: e) as e3: + assert EventBus.event_listeners == [e1, e2, e3] + assert EventBus.event_listeners == [e1, e2] + + assert EventBus.event_listeners == [e1] + + def test_publish_event_yes_flush(self): + mock_event_listener_driver = MockEventListenerDriver() + mock_event_listener_driver.flush_events = Mock(side_effect=mock_event_listener_driver.flush_events) + + event_listener = EventListener(event_listener_driver=mock_event_listener_driver, event_types=[MockEvent]) + event_listener.publish_event(MockEvent(), flush=True) + + mock_event_listener_driver.flush_events.assert_called_once() + assert mock_event_listener_driver.batch == [] + + def test_publish_event_no_flush(self): + mock_event_listener_driver = MockEventListenerDriver() + mock_event_listener_driver.flush_events = Mock(side_effect=mock_event_listener_driver.flush_events) + + event_listener = EventListener(event_listener_driver=mock_event_listener_driver, event_types=[MockEvent]) + mock_event = MockEvent() + event_listener.publish_event(mock_event, flush=False) + + mock_event_listener_driver.flush_events.assert_not_called() + assert mock_event_listener_driver.batch == [ + mock_event.to_dict(), + ] diff --git a/tests/unit/events/test_text_chunk_event.py b/tests/unit/events/test_text_chunk_event.py new file mode 100644 index 000000000..582de3ca1 --- /dev/null +++ b/tests/unit/events/test_text_chunk_event.py @@ -0,0 +1,16 @@ +import pytest + +from griptape.events import TextChunkEvent + + +class TestCompletionChunkEvent: + @pytest.fixture() + def text_chunk_event(self): + return TextChunkEvent(token="foo bar") + + def test_token(self, text_chunk_event): + assert text_chunk_event.token == "foo bar" + assert str(text_chunk_event) == "foo bar" + + def test_to_dict(self, text_chunk_event): + assert text_chunk_event.to_dict()["token"] == "foo bar" diff --git a/tests/unit/memory/tool/test_task_memory.py b/tests/unit/memory/tool/test_task_memory.py index d2575959a..f4ea3579a 100644 --- a/tests/unit/memory/tool/test_task_memory.py +++ b/tests/unit/memory/tool/test_task_memory.py @@ -92,3 +92,27 @@ def test_load_artifacts_for_blob_list_artifact(self, memory): ) assert len(memory.load_artifacts("test")) == 2 + + def test_to_dict(self, memory): + expected_task_memory_dict = { + "type": memory.type, + "name": memory.name, + "namespace_storage": memory.namespace_storage, + "namespace_metadata": memory.namespace_metadata, + } + assert expected_task_memory_dict == memory.to_dict() + + def test_from_dict(self, memory): + serialized_memory = memory.to_dict() + assert isinstance(serialized_memory, dict) + + deserialized_memory = memory.from_dict(serialized_memory) + assert isinstance(deserialized_memory, TaskMemory) + + deserialized_memory.process_output( + MockTool().test, + ActionsSubtask(), + ListArtifact([BlobArtifact(b"foo", name="test1"), BlobArtifact(b"foo", name="test2")], name="test"), + ) + + assert len(deserialized_memory.load_artifacts("test")) == 2 diff --git a/tests/unit/mixins/test_futures_executor_mixin.py b/tests/unit/mixins/test_futures_executor_mixin.py index 3be336687..437903fe3 100644 --- a/tests/unit/mixins/test_futures_executor_mixin.py +++ b/tests/unit/mixins/test_futures_executor_mixin.py @@ -7,4 +7,4 @@ class TestFuturesExecutorMixin: def test_futures_executor(self): executor = futures.ThreadPoolExecutor() - assert MockFuturesExecutor(futures_executor_fn=lambda: executor).futures_executor == executor + assert MockFuturesExecutor(create_futures_executor=lambda: executor).futures_executor == executor diff --git a/tests/unit/mixins/test_runnable_mixin.py b/tests/unit/mixins/test_runnable_mixin.py new file mode 100644 index 000000000..b3e0a3f53 --- /dev/null +++ b/tests/unit/mixins/test_runnable_mixin.py @@ -0,0 +1,21 @@ +from unittest.mock import Mock + +from tests.unit.tasks.test_base_task import MockTask + + +class TestRunnableMixin: + def test_before_run(self): + mock_on_before_run = Mock() + mock_task = MockTask(on_before_run=mock_on_before_run) + + mock_task.run() + + assert mock_on_before_run.called + + def test_after_run(self): + mock_on_after_run = Mock() + mock_task = MockTask(on_after_run=mock_on_after_run) + + mock_task.run() + + assert mock_on_after_run.called diff --git a/tests/unit/mixins/test_seriliazable_mixin.py b/tests/unit/mixins/test_seriliazable_mixin.py index afb3d1eb4..dc30848f2 100644 --- a/tests/unit/mixins/test_seriliazable_mixin.py +++ b/tests/unit/mixins/test_seriliazable_mixin.py @@ -7,7 +7,11 @@ from griptape.memory import TaskMemory from griptape.memory.structure import ConversationMemory from griptape.schemas import BaseSchema +from griptape.tasks.base_task import BaseTask +from griptape.tasks.tool_task import ToolTask +from griptape.tools.base_tool import BaseTool from tests.mocks.mock_serializable import MockSerializable +from tests.mocks.mock_tool.tool import MockTool class TestSerializableMixin: @@ -15,10 +19,19 @@ def test_get_schema(self): assert isinstance(BaseArtifact.get_schema("TextArtifact"), BaseSchema) assert isinstance(TextArtifact.get_schema(), BaseSchema) + assert isinstance(BaseTool.get_schema("MockTool", module_name="tests.mocks.mock_tool.tool"), BaseSchema) + def test_from_dict(self): assert isinstance(BaseArtifact.from_dict({"type": "TextArtifact", "value": "foobar"}), TextArtifact) assert isinstance(TextArtifact.from_dict({"value": "foobar"}), TextArtifact) + assert isinstance( + BaseTask.from_dict( + {"type": "ToolTask", "tool": {"type": "MockTool", "module_name": "tests.mocks.mock_tool.tool"}}, + ), + ToolTask, + ) + def test_from_json(self): assert isinstance(BaseArtifact.from_json('{"type": "TextArtifact", "value": "foobar"}'), TextArtifact) assert isinstance(TextArtifact.from_json('{"value": "foobar"}'), TextArtifact) @@ -56,6 +69,8 @@ def test_import_class_rec(self): with pytest.raises(ValueError): MockSerializable._import_cls_rec("griptape.memory.task", "ConversationMemory") + assert MockSerializable._import_cls_rec("tests.mocks.mock_tool.tool", "MockTool") == MockTool + def test_nested_optional_serializable(self): assert MockSerializable(nested=None).to_dict().get("nested") is None diff --git a/tests/unit/schemas/test_base_schema.py b/tests/unit/schemas/test_base_schema.py index f3a3f0c1f..4138d585f 100644 --- a/tests/unit/schemas/test_base_schema.py +++ b/tests/unit/schemas/test_base_schema.py @@ -1,6 +1,7 @@ from __future__ import annotations from datetime import datetime +from enum import Enum from typing import Literal, Optional, Union import pytest @@ -11,19 +12,35 @@ from griptape.schemas import PolymorphicSchema from griptape.schemas.base_schema import BaseSchema from griptape.schemas.bytes_field import Bytes +from griptape.schemas.union_field import Union as UnionField from tests.mocks.mock_serializable import MockSerializable +class MockEnum(Enum): + FOO = ("BAR",) + BAZ = ("QUX",) + BAR = ("FOO",) + + +class UnsupportedType: + pass + + class TestBaseSchema: def test_from_attrs_cls(self): schema = BaseSchema.from_attrs_cls(MockSerializable)() assert isinstance(schema, BaseSchema) assert isinstance(schema.fields["foo"], fields.Str) - assert isinstance(schema.fields["bar"], fields.Str) - assert schema.fields["bar"].allow_none - assert isinstance(schema.fields["baz"], fields.List) - assert isinstance(schema.fields["baz"].inner, fields.Int) + # Check if "bar" is a String that allows None (Optional) + assert isinstance(schema.fields["bar"], UnionField) + assert isinstance(schema.fields["bar"]._candidate_fields[0], fields.Str) + assert schema.fields["bar"].allow_none is True + + assert isinstance(schema.fields["baz"], UnionField) + assert isinstance(schema.fields["baz"]._candidate_fields[0], fields.List) + assert isinstance(schema.fields["baz"]._candidate_fields[0].inner, fields.Integer) + assert schema.fields["baz"].allow_none is True with pytest.raises(ValueError): BaseSchema.from_attrs_cls(TextLoader) @@ -47,7 +64,6 @@ def test_get_field_for_type(self): assert isinstance(BaseSchema._get_field_for_type(bool), fields.Bool) assert isinstance(BaseSchema._get_field_for_type(tuple), fields.Raw) assert isinstance(BaseSchema._get_field_for_type(dict), fields.Dict) - with pytest.raises(ValueError): BaseSchema._get_field_for_type(list) @@ -69,11 +85,19 @@ def test_get_field_type_info(self): assert BaseSchema._get_field_type_info(Literal[5]) == (int, (), False) # pyright: ignore[reportArgumentType] def test_is_list_sequence(self): - assert BaseSchema.is_list_sequence(list) - assert not BaseSchema.is_list_sequence(tuple) - assert not BaseSchema.is_list_sequence(bytes) - assert not BaseSchema.is_list_sequence(str) - assert not BaseSchema.is_list_sequence(int) + assert BaseSchema._is_list_sequence(list) + assert not BaseSchema._is_list_sequence(tuple) + assert not BaseSchema._is_list_sequence(bytes) + assert not BaseSchema._is_list_sequence(str) + assert not BaseSchema._is_list_sequence(int) + + def test_is_union(self): + assert BaseSchema._is_union(Union[str, int]) + assert BaseSchema._is_union(Union[str, Union[int, str]]) + assert not BaseSchema._is_union(tuple) + assert not BaseSchema._is_union(bytes) + assert not BaseSchema._is_union(str) + assert not BaseSchema._is_union(int) def test_load(self): schema = BaseSchema.from_attrs_cls(MockSerializable)() @@ -86,3 +110,61 @@ def test_load_with_unknown_attribute(self): schema = BaseSchema.from_attrs_cls(MockSerializable)() with pytest.raises(TypeError): schema.load({"foo": "baz", "bar": "qux", "baz": [1, 2, 3], "zoop": "bop"}) + + def test_handle_union_in_list(self): + field = BaseSchema._get_field_for_type(list[Union[str, list[str]]]) + assert isinstance(field, fields.List) + assert isinstance(field.inner, UnionField) + + union_field = field.inner + assert isinstance(union_field, UnionField) + + candidate_fields = [type(f) for f in union_field._candidate_fields] + assert fields.Str in candidate_fields + assert fields.List in candidate_fields + + def test_handle_union_outside_list(self): + field = BaseSchema._get_field_for_type(Union[str, int]) + assert isinstance(field, UnionField) + + candidate_fields = [type(f) for f in field._candidate_fields] + assert fields.Str in candidate_fields + assert fields.Integer in candidate_fields + + def test_handle_none(self): + field = BaseSchema._get_field_for_type(None) + assert isinstance(field, fields.Constant) + assert field.allow_none is True + assert field.constant is None + + def test_is_enum(self): + result = BaseSchema._is_enum(MockEnum) + assert result is True + + def test_handle_enum(self): + field = BaseSchema._get_field_for_type(MockEnum) + assert isinstance(field, fields.Str) + + def test_handle_optional_enum(self): + field = BaseSchema._get_field_for_type(Union[MockEnum, None]) + assert isinstance(field, UnionField) + assert isinstance(field._candidate_fields[0], fields.Str) + assert field.allow_none is True + + def test_handle_unsupported_type(self): + with pytest.raises(ValueError): + BaseSchema._get_field_for_type(UnsupportedType) + + def test_handle_none_list_field(self): + # Test that _handle_list raises a ValueError for list elements that are None + with pytest.raises(ValueError, match="List elements cannot be None: None"): + BaseSchema._handle_list(list[None], optional=False) + + def test_handle_union_exception(self): + with pytest.raises(ValueError, match="Unsupported UnionType field: "): + BaseSchema._handle_union(Union[None], optional=False) + + def test_handle_union_optional(self): + field = BaseSchema._handle_union(Union[str, None], optional=True) + assert isinstance(field, UnionField) + assert field.allow_none is True diff --git a/tests/unit/schemas/test_union_field.py b/tests/unit/schemas/test_union_field.py new file mode 100644 index 000000000..cb432e58a --- /dev/null +++ b/tests/unit/schemas/test_union_field.py @@ -0,0 +1,74 @@ +import marshmallow +import pytest +from marshmallow import fields + +from griptape.schemas.union_field import ExceptionGroupError, Union + + +class InvalidType: + """A custom class that will fail when attempting to serialize with Integer or String fields.""" + + def __str__(self) -> str: + raise TypeError("Cannot serialize InvalidType to string") + + def __int__(self) -> int: + raise ValueError("Cannot serialize InvalidType to int") + + +class TestUnionField: + @pytest.fixture() + def sample_schema(self): + class SampleSchema(marshmallow.Schema): + name = Union(fields=[fields.Integer(), fields.String()]) + + return SampleSchema() + + def test_union_field_valid_string(self, sample_schema): + input_data = {"name": "Alice"} + result = sample_schema.load(input_data) + assert result["name"] == "Alice" + + def test_union_field_valid_integer(self, sample_schema): + input_data = {"name": 42} + result = sample_schema.load(input_data) + assert result["name"] == 42 + + def test_union_field_invalid_value(self, sample_schema): + input_data = {"name": InvalidType} + with pytest.raises(marshmallow.exceptions.ValidationError) as exc_info: + sample_schema.load(input_data) + assert "name" in exc_info.value.messages + assert len(exc_info.value.messages["name"]) > 0 + + def test_union_field_serialize_string(self, sample_schema): + input_data = {"name": "Alice"} + result = sample_schema.dump(input_data) + assert result["name"] == "Alice" + + def test_union_field_serialize_integer(self, sample_schema): + input_data = {"name": 42} + result = sample_schema.dump(input_data) + assert result["name"] == 42 + + def test_union_field_reverse_serialization(self, sample_schema): + class ReverseSchema(marshmallow.Schema): + value = Union(fields=[fields.Integer(), fields.String()], reverse_serialize_candidates=True) + + schema = ReverseSchema() + input_data = {"value": "Test"} + result = schema.dump(input_data) + assert result["value"] == "Test" + + def test_union_field_serialize_type_error(self): + class SampleSchema(marshmallow.Schema): + name = Union(fields=[fields.Integer(), fields.String()]) + + schema = SampleSchema() + + input_data = {"name": InvalidType()} + + with pytest.raises(ExceptionGroupError) as exc_info: + schema.dump(input_data) + + assert "All serializers raised exceptions" in str(exc_info.value) + assert len(exc_info.value.errors) > 0 diff --git a/tests/unit/structures/test_agent.py b/tests/unit/structures/test_agent.py index d522a43a2..86f4a1141 100644 --- a/tests/unit/structures/test_agent.py +++ b/tests/unit/structures/test_agent.py @@ -1,3 +1,5 @@ +from unittest.mock import Mock + import pytest from griptape.memory import TaskMemory @@ -239,3 +241,71 @@ def finished_tasks(self): def test_fail_fast(self): with pytest.raises(ValueError): Agent(prompt_driver=MockPromptDriver(), fail_fast=True) + + def test_task_outputs(self): + task = PromptTask("test prompt") + agent = Agent(prompt_driver=MockPromptDriver()) + + agent.add_task(task) + + assert len(agent.task_outputs) == 1 + assert agent.task_outputs[task.id] is None + agent.run("hello") + + assert len(agent.task_outputs) == 1 + assert agent.task_outputs[task.id] == task.output + + def test_to_dict(self): + task = PromptTask("test prompt") + agent = Agent(prompt_driver=MockPromptDriver()) + agent.add_task(task) + expected_agent_dict = { + "type": "Agent", + "id": agent.id, + "tasks": [ + { + "type": agent.tasks[0].type, + "id": agent.tasks[0].id, + "state": str(agent.tasks[0].state), + "parent_ids": agent.tasks[0].parent_ids, + "child_ids": agent.tasks[0].child_ids, + "max_meta_memory_entries": agent.tasks[0].max_meta_memory_entries, + "context": agent.tasks[0].context, + } + ], + "conversation_memory": { + "type": agent.conversation_memory.type, + "runs": agent.conversation_memory.runs, + "meta": agent.conversation_memory.meta, + "max_runs": agent.conversation_memory.max_runs, + }, + } + assert agent.to_dict() == expected_agent_dict + + def test_from_dict(self): + task = PromptTask("test prompt") + agent = Agent(prompt_driver=MockPromptDriver()) + agent.add_task(task) + + serialized_agent = agent.to_dict() + assert isinstance(serialized_agent, dict) + + deserialized_agent = Agent.from_dict(serialized_agent) + assert isinstance(deserialized_agent, Agent) + + assert deserialized_agent.task_outputs[task.id] is None + deserialized_agent.run() + + assert len(deserialized_agent.task_outputs) == 1 + assert deserialized_agent.task_outputs[task.id].value == "mock output" + + def test_runnable_mixin(self): + mock_on_before_run = Mock() + mock_after_run = Mock() + agent = Agent(prompt_driver=MockPromptDriver(), on_before_run=mock_on_before_run, on_after_run=mock_after_run) + + args = "test" + agent.run(args) + + mock_on_before_run.assert_called_once_with(agent) + mock_after_run.assert_called_once_with(agent) diff --git a/tests/unit/structures/test_pipeline.py b/tests/unit/structures/test_pipeline.py index e6fa6d770..6452ad5e4 100644 --- a/tests/unit/structures/test_pipeline.py +++ b/tests/unit/structures/test_pipeline.py @@ -18,14 +18,14 @@ def fn(task): time.sleep(2) return TextArtifact("done") - return CodeExecutionTask(run_fn=fn) + return CodeExecutionTask(on_run=fn) @pytest.fixture() def error_artifact_task(self): def fn(task): return ErrorArtifact("error") - return CodeExecutionTask(run_fn=fn) + return CodeExecutionTask(on_run=fn) def test_init(self): pipeline = Pipeline(rulesets=[Ruleset("TestRuleset", [Rule("test")])]) @@ -360,7 +360,8 @@ def test_context(self): context = pipeline.context(task) - assert context["parent_output"] == parent.output.to_text() + assert context["parent_output"] == parent.output + assert context["task_outputs"] == pipeline.task_outputs assert context["structure"] == pipeline assert context["parent"] == parent assert context["child"] == child @@ -398,3 +399,59 @@ def test_add_duplicate_task_directly(self): with pytest.raises(ValueError, match=f"Duplicate task with id {task.id} found."): pipeline.run() + + def test_task_outputs(self): + task = PromptTask("test") + pipeline = Pipeline() + + pipeline + [task] + + assert len(pipeline.task_outputs) == 1 + assert pipeline.task_outputs[task.id] is None + pipeline.run() + assert len(pipeline.task_outputs) == 1 + assert pipeline.task_outputs[task.id] == task.output + + def test_to_dict(self): + task = PromptTask("test") + pipeline = Pipeline() + pipeline + [task] + expected_pipeline_dict = { + "type": pipeline.type, + "id": pipeline.id, + "tasks": [ + { + "type": pipeline.tasks[0].type, + "id": pipeline.tasks[0].id, + "state": str(pipeline.tasks[0].state), + "parent_ids": pipeline.tasks[0].parent_ids, + "child_ids": pipeline.tasks[0].child_ids, + "max_meta_memory_entries": pipeline.tasks[0].max_meta_memory_entries, + "context": pipeline.tasks[0].context, + } + ], + "conversation_memory": { + "type": pipeline.conversation_memory.type, + "runs": pipeline.conversation_memory.runs, + "meta": pipeline.conversation_memory.meta, + "max_runs": pipeline.conversation_memory.max_runs, + }, + "fail_fast": pipeline.fail_fast, + } + assert pipeline.to_dict() == expected_pipeline_dict + + def test_from_dict(self): + task = PromptTask("test") + pipeline = Pipeline(tasks=[task]) + + serialized_pipeline = pipeline.to_dict() + assert isinstance(serialized_pipeline, dict) + + deserialized_pipeline = Pipeline.from_dict(serialized_pipeline) + assert isinstance(deserialized_pipeline, Pipeline) + + assert deserialized_pipeline.task_outputs[task.id] is None + deserialized_pipeline.run() + + assert len(deserialized_pipeline.task_outputs) == 1 + assert deserialized_pipeline.task_outputs[task.id].value == "mock output" diff --git a/tests/unit/structures/test_workflow.py b/tests/unit/structures/test_workflow.py index e3bd8e886..9cd34291f 100644 --- a/tests/unit/structures/test_workflow.py +++ b/tests/unit/structures/test_workflow.py @@ -17,14 +17,14 @@ def fn(task): time.sleep(2) return TextArtifact("done") - return CodeExecutionTask(run_fn=fn) + return CodeExecutionTask(on_run=fn) @pytest.fixture() def error_artifact_task(self): def fn(task): return ErrorArtifact("error") - return CodeExecutionTask(run_fn=fn) + return CodeExecutionTask(on_run=fn) def test_init(self): workflow = Workflow(rulesets=[Ruleset("TestRuleset", [Rule("test")])]) @@ -737,13 +737,14 @@ def test_context(self): context = workflow.context(task) - assert context["parent_outputs"] == {parent.id: ""} + assert context["parent_outputs"] == {} workflow.run() context = workflow.context(task) - assert context["parent_outputs"] == {parent.id: parent.output.to_text()} + assert context["task_outputs"] == workflow.task_outputs + assert context["parent_outputs"] == {parent.id: parent.output} assert context["parents_output_text"] == "mock output" assert context["structure"] == workflow assert context["parents"] == {parent.id: parent} @@ -966,3 +967,59 @@ def _validate_topology_4(workflow) -> None: publish_website = workflow.find_task("publish_website") assert publish_website.parent_ids == ["compare_movies"] assert publish_website.child_ids == ["summarize_to_slack"] + + def test_task_outputs(self): + task = PromptTask("test") + workflow = Workflow(tasks=[task]) + + assert len(workflow.task_outputs) == 1 + assert workflow.task_outputs[task.id] is None + + workflow.run() + + assert len(workflow.task_outputs) == 1 + assert workflow.task_outputs[task.id].value == "mock output" + + def test_to_dict(self): + task = PromptTask("test") + workflow = Workflow(tasks=[task]) + + expected_workflow_dict = { + "type": workflow.type, + "id": workflow.id, + "tasks": [ + { + "type": workflow.tasks[0].type, + "id": workflow.tasks[0].id, + "state": str(workflow.tasks[0].state), + "parent_ids": workflow.tasks[0].parent_ids, + "child_ids": workflow.tasks[0].child_ids, + "max_meta_memory_entries": workflow.tasks[0].max_meta_memory_entries, + "context": workflow.tasks[0].context, + } + ], + "conversation_memory": { + "type": workflow.conversation_memory.type, + "runs": workflow.conversation_memory.runs, + "meta": workflow.conversation_memory.meta, + "max_runs": workflow.conversation_memory.max_runs, + }, + "fail_fast": workflow.fail_fast, + } + assert workflow.to_dict() == expected_workflow_dict + + def test_from_dict(self): + task = PromptTask("test") + workflow = Workflow(tasks=[task]) + + serialized_workflow = workflow.to_dict() + assert isinstance(serialized_workflow, dict) + + deserialized_workflow = Workflow.from_dict(serialized_workflow) + assert isinstance(deserialized_workflow, Workflow) + + assert deserialized_workflow.task_outputs[task.id] is None + deserialized_workflow.run() + + assert len(deserialized_workflow.task_outputs) == 1 + assert deserialized_workflow.task_outputs[task.id].value == "mock output" diff --git a/tests/unit/tasks/test_actions_subtask.py b/tests/unit/tasks/test_actions_subtask.py index e25a42120..9cf404a9d 100644 --- a/tests/unit/tasks/test_actions_subtask.py +++ b/tests/unit/tasks/test_actions_subtask.py @@ -9,7 +9,7 @@ class TestActionsSubtask: - def test_basic_input(self): + def test_prompt_input(self): valid_input = ( "Thought: need to test\n" 'Actions: [{"tag": "foo", "name": "MockTool", "path": "test", "input": {"values": {"test": "value"}}}]\n' @@ -25,22 +25,31 @@ def test_basic_input(self): assert json_dict[0]["name"] == "MockTool" assert json_dict[0]["path"] == "test" assert json_dict[0]["input"] == {"values": {"test": "value"}} + assert subtask.thought == "need to test" + assert subtask.output is None - def test_action_input(self): - valid_input = ActionArtifact( - ToolAction(tag="foo", name="MockTool", path="test", input={"values": {"test": "value"}}) + def test_artifact_input(self): + valid_input = ListArtifact( + [ + TextArtifact("need to test"), + ActionArtifact( + ToolAction(tag="foo", name="MockTool", path="test", input={"values": {"test": "value"}}) + ), + TextArtifact("answer"), + ] ) task = ToolkitTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) - assert subtask.thought is None assert json_dict[0]["name"] == "MockTool" assert json_dict[0]["path"] == "test" assert json_dict[0]["input"] == {"values": {"test": "value"}} + assert subtask.thought == "need to test" + assert subtask.output is None - def test_action_and_thought_input(self): + def test_artifact_action_and_thought_input(self): valid_input = ListArtifact( [ TextArtifact("thought"), @@ -59,6 +68,42 @@ def test_action_and_thought_input(self): assert json_dict[0]["path"] == "test" assert json_dict[0]["input"] == {"values": {"test": "value"}} + def test_prompt_answer(self): + valid_input = "Answer: test output" + + task = ToolkitTask(tools=[MockTool()]) + Agent().add_task(task) + subtask = task.add_subtask(ActionsSubtask(valid_input)) + + assert subtask.thought is None + assert subtask.actions == [] + assert subtask.output.value == "test output" + + def test_prompt_implicit_answer(self): + valid_input = "test output" + + task = ToolkitTask(tools=[MockTool()]) + Agent().add_task(task) + subtask = task.add_subtask(ActionsSubtask(valid_input)) + + assert subtask.thought is None + assert subtask.actions == [] + assert subtask.output.value == "test output" + + def test_artifact_answer(self): + valid_input = ListArtifact( + [ + TextArtifact("answer"), + ] + ) + task = ToolkitTask(tools=[MockTool()]) + Agent().add_task(task) + subtask = task.add_subtask(ActionsSubtask(valid_input)) + + assert subtask.thought is None + assert subtask.actions == [] + assert subtask.output.value == "answer" + def test_callable_input(self): valid_input = ListArtifact( [ @@ -146,6 +191,8 @@ def test_invalid_actions(self): assert isinstance(subtask.output, ErrorArtifact) assert "Actions JSON decoding error" in subtask.output.value + assert subtask.thought == "need to test" + assert subtask.actions == [] def test_implicit_values(self): valid_input = ( @@ -173,7 +220,7 @@ def test_execute_tool(self): task = ToolkitTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) - subtask.execute() + subtask.run() assert isinstance(subtask.output, ListArtifact) assert isinstance(subtask.output.value[0], TextArtifact) @@ -188,7 +235,7 @@ def test_execute_tool_exception(self): task = ToolkitTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) - subtask.execute() + subtask.run() assert isinstance(subtask.output, ListArtifact) assert isinstance(subtask.output.value[0], ErrorArtifact) diff --git a/tests/unit/tasks/test_base_task.py b/tests/unit/tasks/test_base_task.py index 94a13b938..4c5697621 100644 --- a/tests/unit/tasks/test_base_task.py +++ b/tests/unit/tasks/test_base_task.py @@ -14,11 +14,11 @@ class TestBaseTask: @pytest.fixture() def task(self): - EventBus.add_event_listeners([EventListener(handler=Mock())]) + EventBus.add_event_listeners([EventListener(on_event=Mock())]) agent = Agent( tools=[MockTool()], ) - EventBus.add_event_listeners([EventListener(handler=Mock())]) + EventBus.add_event_listeners([EventListener(on_event=Mock())]) agent.add_task(MockTask("foobar", max_meta_memory_entries=2)) @@ -52,9 +52,8 @@ def test_parent_outputs(self, task): parent_3.output = None assert child.parent_outputs == { - parent_1.id: parent_1.output.to_text(), - parent_2.id: parent_2.output.to_text(), - parent_3.id: "", + parent_1.id: parent_1.output, + parent_2.id: parent_2.output, } def test_parents_output(self, task): @@ -112,29 +111,37 @@ def test_children_property_no_structure(self, task): assert len(parent.children) == 3 - def test_execute_publish_events(self, task): - task.execute() + def test_run_publish_events(self, task): + task.run() - assert EventBus.event_listeners[0].handler.call_count == 2 + assert EventBus.event_listeners[0].on_event.call_count == 2 def test_add_parent(self, task): - parent = MockTask("parent foobar", id="parent_foobar") + agent = Agent() + parent = MockTask("parent foobar", id="parent_foobar", structure=agent) + result = task.add_parent(parent) result = task.add_parent(parent) assert parent.id in task.parent_ids assert task.id in parent.child_ids assert result == task + assert agent.tasks == [parent] + def test_add_child(self, task): - child = MockTask("child foobar", id="child_foobar") + agent = Agent() + child = MockTask("child foobar", id="child_foobar", structure=agent) + result = task.add_child(child) result = task.add_child(child) assert child.id in task.child_ids assert task.id in child.parent_ids assert result == task + assert agent.tasks == [child] + def test_add_parent_bitshift(self, task): parent = MockTask("parent foobar", id="parent_foobar") @@ -152,3 +159,51 @@ def test_add_child_bitshift(self, task): assert child.id in task.child_ids assert task.id in child.parent_ids assert added_task == child + + def test_to_dict(self, task): + expected_task_dict = { + "type": task.type, + "id": task.id, + "state": str(task.state), + "parent_ids": task.parent_ids, + "child_ids": task.child_ids, + "max_meta_memory_entries": task.max_meta_memory_entries, + "context": task.context, + } + assert expected_task_dict == task.to_dict() + + def test_from_dict(self): + task = MockTask("Foobar2", id="Foobar2") + + serialized_task = task.to_dict() + assert isinstance(serialized_task, dict) + + deserialized_task = MockTask.from_dict(serialized_task) + assert isinstance(deserialized_task, MockTask) + + workflow = Workflow() + workflow.add_task(deserialized_task) + + assert workflow.tasks == [deserialized_task] + + workflow.run() + + assert str(workflow.tasks[0].state) == "State.FINISHED" + assert workflow.tasks[0].id == deserialized_task.id + assert workflow.tasks[0].output.value == "foobar" + + def test_runnable_mixin(self): + mock_on_before_run = Mock() + mock_after_run = Mock() + task = MockTask("foobar", on_before_run=mock_on_before_run, on_after_run=mock_after_run) + + task.run() + + mock_on_before_run.assert_called_once_with(task) + mock_after_run.assert_called_once_with(task) + + def test_full_context(self, task): + task.structure = Agent() + task.structure._execution_args = ("foo", "bar") + + assert task.full_context == {"args": ("foo", "bar"), "structure": task.structure} diff --git a/tests/unit/tasks/test_base_text_input_task.py b/tests/unit/tasks/test_base_text_input_task.py index 0adab72ff..1be3904c5 100644 --- a/tests/unit/tasks/test_base_text_input_task.py +++ b/tests/unit/tasks/test_base_text_input_task.py @@ -44,7 +44,7 @@ def test_full_context(self): context = subtask.full_context assert context["foo"] == "bar" - assert context["parent_output"] == parent.output.to_text() + assert context["parent_output"] == parent.output assert context["structure"] == pipeline assert context["parent"] == parent assert context["child"] == child diff --git a/tests/unit/tasks/test_code_execution_task.py b/tests/unit/tasks/test_code_execution_task.py index f0eb37ede..436e8ba87 100644 --- a/tests/unit/tasks/test_code_execution_task.py +++ b/tests/unit/tasks/test_code_execution_task.py @@ -21,21 +21,21 @@ def deliberate_exception(task: CodeExecutionTask) -> BaseArtifact: class TestCodeExecutionTask: def test_hello_world_fn(self): - task = CodeExecutionTask(run_fn=hello_world) + task = CodeExecutionTask(on_run=hello_world) - assert task.run().value == "Hello World!" + assert task.try_run().value == "Hello World!" # Using a Pipeline # Overriding the input because we are implementing the task not the Pipeline def test_noop_fn(self): pipeline = Pipeline() - task = CodeExecutionTask("No Op", run_fn=non_outputting) + task = CodeExecutionTask("No Op", on_run=non_outputting) pipeline.add_task(task) - temp = task.run() + temp = task.try_run() assert temp.value == "No Op" def test_error_fn(self): - task = CodeExecutionTask(run_fn=deliberate_exception) + task = CodeExecutionTask(on_run=deliberate_exception) with pytest.raises(ValueError): - task.run() + task.try_run() diff --git a/tests/unit/tasks/test_image_query_task.py b/tests/unit/tasks/test_image_query_task.py index 01c116772..349340ad4 100644 --- a/tests/unit/tasks/test_image_query_task.py +++ b/tests/unit/tasks/test_image_query_task.py @@ -73,4 +73,4 @@ def test_run(self, image_query_engine, text_artifact, image_artifact): def test_bad_run(self, image_query_engine, text_artifact, image_artifact): with pytest.raises(ValueError, match="All inputs"): - ImageQueryTask(("foo", [image_artifact, text_artifact]), image_query_engine=image_query_engine).run() + ImageQueryTask(("foo", [image_artifact, text_artifact]), image_query_engine=image_query_engine).try_run() diff --git a/tests/unit/tasks/test_inpainting_image_generation_task.py b/tests/unit/tasks/test_inpainting_image_generation_task.py index 5c4507d49..94f2d69a8 100644 --- a/tests/unit/tasks/test_inpainting_image_generation_task.py +++ b/tests/unit/tasks/test_inpainting_image_generation_task.py @@ -43,10 +43,10 @@ def test_list_input(self, text_artifact: TextArtifact, image_artifact: ImageArti def test_bad_input(self, image_artifact): with pytest.raises(ValueError): - InpaintingImageGenerationTask(("foo", "bar", image_artifact)).run() # pyright: ignore[reportArgumentType] + InpaintingImageGenerationTask(("foo", "bar", image_artifact)).try_run() # pyright: ignore[reportArgumentType] with pytest.raises(ValueError): - InpaintingImageGenerationTask(("foo", image_artifact, "baz")).run() # pyright: ignore[reportArgumentType] + InpaintingImageGenerationTask(("foo", image_artifact, "baz")).try_run() # pyright: ignore[reportArgumentType] def test_config_image_generation_engine(self, text_artifact, image_artifact): task = InpaintingImageGenerationTask((text_artifact, image_artifact, image_artifact)) diff --git a/tests/unit/tasks/test_outpainting_image_generation_task.py b/tests/unit/tasks/test_outpainting_image_generation_task.py index ba5e52a82..6218c4a60 100644 --- a/tests/unit/tasks/test_outpainting_image_generation_task.py +++ b/tests/unit/tasks/test_outpainting_image_generation_task.py @@ -43,10 +43,10 @@ def test_list_input(self, text_artifact: TextArtifact, image_artifact: ImageArti def test_bad_input(self, image_artifact): with pytest.raises(ValueError): - OutpaintingImageGenerationTask(("foo", "bar", image_artifact)).run() # pyright: ignore[reportArgumentType] + OutpaintingImageGenerationTask(("foo", "bar", image_artifact)).try_run() # pyright: ignore[reportArgumentType] with pytest.raises(ValueError): - OutpaintingImageGenerationTask(("foo", image_artifact, "baz")).run() # pyright: ignore[reportArgumentType] + OutpaintingImageGenerationTask(("foo", image_artifact, "baz")).try_run() # pyright: ignore[reportArgumentType] def test_config_image_generation_engine(self, text_artifact, image_artifact): task = OutpaintingImageGenerationTask((text_artifact, image_artifact, image_artifact)) diff --git a/tests/unit/tasks/test_prompt_task.py b/tests/unit/tasks/test_prompt_task.py index cfe853226..0b60f09bd 100644 --- a/tests/unit/tasks/test_prompt_task.py +++ b/tests/unit/tasks/test_prompt_task.py @@ -32,6 +32,15 @@ def test_config_prompt_driver(self): assert isinstance(task.prompt_driver, MockPromptDriver) def test_input(self): + # Structure context + pipeline = Pipeline() + task = PromptTask() + pipeline.add_task(task) + pipeline._execution_args = ("foo", "bar") + assert task.input.value == "foo" + pipeline._execution_args = ("fizz", "buzz") + assert task.input.value == "fizz" + # Str task = PromptTask("test") @@ -118,6 +127,22 @@ def test_input(self): assert task.input.value == str({"default": "test"}) + def test_input_context(self): + pipeline = Pipeline( + tasks=[ + PromptTask( + "foo", + prompt_driver=MockPromptDriver(), + on_before_run=lambda task: task.children[0].input, + ), + PromptTask("{{ parent_output }}", prompt_driver=MockPromptDriver()), + ] + ) + + pipeline.run() + + assert pipeline.tasks[1].input.value == "mock output" + def test_prompt_stack(self): task = PromptTask("{{ test }}", context={"test": "test value"}, rules=[Rule("test rule")]) diff --git a/tests/unit/tasks/test_structure_run_task.py b/tests/unit/tasks/test_structure_run_task.py index 2973c4a05..1df8ca8bf 100644 --- a/tests/unit/tasks/test_structure_run_task.py +++ b/tests/unit/tasks/test_structure_run_task.py @@ -10,7 +10,7 @@ def test_run_single_input(self, mock_config): agent = Agent() mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="pipeline mock output") pipeline = Pipeline() - driver = LocalStructureRunDriver(structure_factory_fn=lambda: agent) + driver = LocalStructureRunDriver(create_structure=lambda: agent) task = StructureRunTask(driver=driver) @@ -23,7 +23,7 @@ def test_run_multiple_inputs(self, mock_config): agent = Agent() mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="pipeline mock output") pipeline = Pipeline() - driver = LocalStructureRunDriver(structure_factory_fn=lambda: agent) + driver = LocalStructureRunDriver(create_structure=lambda: agent) task = StructureRunTask(input=["foo", "bar", "baz"], driver=driver) diff --git a/tests/unit/tasks/test_tool_task.py b/tests/unit/tasks/test_tool_task.py index f92f6a887..cb2a6b341 100644 --- a/tests/unit/tasks/test_tool_task.py +++ b/tests/unit/tasks/test_tool_task.py @@ -237,3 +237,39 @@ def test_actions_schema(self): Agent().add_task(task) assert task.actions_schema().json_schema("Actions Schema") == self.TARGET_TOOLS_SCHEMA + + def test_to_dict(self): + tool = MockTool() + task = ToolTask("test", tool=tool) + + expected_tool_task_dict = { + "type": task.type, + "id": task.id, + "state": str(task.state), + "parent_ids": task.parent_ids, + "child_ids": task.child_ids, + "max_meta_memory_entries": task.max_meta_memory_entries, + "context": task.context, + "tool": { + "type": task.tool.type, + "name": task.tool.name, + "input_memory": task.tool.input_memory, + "output_memory": task.tool.output_memory, + "install_dependencies_on_init": task.tool.install_dependencies_on_init, + "dependencies_install_directory": task.tool.dependencies_install_directory, + "verbose": task.tool.verbose, + "off_prompt": task.tool.off_prompt, + }, + } + assert expected_tool_task_dict == task.to_dict() + + def test_from_dict(self): + tool = MockTool() + task = ToolTask("test", tool=tool) + + serialized_tool_task = task.to_dict() + serialized_tool_task["tool"]["module_name"] = "tests.mocks.mock_tool.tool" + assert isinstance(serialized_tool_task, dict) + + deserialized_tool_task = ToolTask.from_dict(serialized_tool_task) + assert isinstance(deserialized_tool_task, ToolTask) diff --git a/tests/unit/tasks/test_variation_image_generation_task.py b/tests/unit/tasks/test_variation_image_generation_task.py index f6afbf03e..4c471a4f7 100644 --- a/tests/unit/tasks/test_variation_image_generation_task.py +++ b/tests/unit/tasks/test_variation_image_generation_task.py @@ -43,7 +43,7 @@ def test_list_input(self, text_artifact: TextArtifact, image_artifact: ImageArti def test_bad_input(self, image_artifact): with pytest.raises(ValueError): - VariationImageGenerationTask(("foo", "bar")).run() # pyright: ignore[reportArgumentType] + VariationImageGenerationTask(("foo", "bar")).try_run() # pyright: ignore[reportArgumentType] def test_config_image_generation_engine(self, text_artifact, image_artifact): task = VariationImageGenerationTask((text_artifact, image_artifact)) diff --git a/tests/unit/tools/test_aws_s3_tool.py b/tests/unit/tools/test_aws_s3_tool.py index 9c4c34e0b..8be8ebc5c 100644 --- a/tests/unit/tools/test_aws_s3_tool.py +++ b/tests/unit/tools/test_aws_s3_tool.py @@ -1,63 +1,52 @@ import boto3 import pytest +from moto import mock_aws from griptape.tools import AwsS3Tool -from tests.utils.aws import mock_aws_credentials class TestAwsS3Tool: - @pytest.fixture(autouse=True) - def _run_before_and_after_tests(self): - mock_aws_credentials() - - def test_get_bucket_acl(self): + @pytest.fixture() + def session(self): + mock = mock_aws() + mock.start() + yield boto3.Session(region_name="us-east-1") + mock.stop() + + def test_get_bucket_acl(self, session): value = {"bucket_name": "bucket_test"} - assert "error getting bucket acl" in AwsS3Tool(session=boto3.Session()).get_bucket_acl({"values": value}).value + assert "error getting bucket acl" in AwsS3Tool(session=session).get_bucket_acl({"values": value}).value - def test_get_bucket_policy(self): + def test_get_bucket_policy(self, session): value = {"bucket_name": "bucket_test"} - assert ( - "error getting bucket policy" - in AwsS3Tool(session=boto3.Session()).get_bucket_policy({"values": value}).value - ) + assert "error getting bucket policy" in AwsS3Tool(session=session).get_bucket_policy({"values": value}).value - def test_get_object_acl(self): + def test_get_object_acl(self, session): value = {"bucket_name": "bucket_test", "object_key": "key_test"} - assert "error getting object acl" in AwsS3Tool(session=boto3.Session()).get_object_acl({"values": value}).value + assert "error getting object acl" in AwsS3Tool(session=session).get_object_acl({"values": value}).value - def test_list_s3_buckets(self): - assert "error listing s3 buckets" in AwsS3Tool(session=boto3.Session()).list_s3_buckets({}).value + def test_list_s3_buckets(self, session): + assert AwsS3Tool(session=session).list_s3_buckets({}).value == [] - def test_list_objects(self): + def test_list_objects(self, session): value = {"bucket_name": "bucket_test"} - assert ( - "error listing objects in bucket" - in AwsS3Tool(session=boto3.Session()).list_objects({"values": value}).value - ) + assert "error listing objects in bucket" in AwsS3Tool(session=session).list_objects({"values": value}).value - def test_upload_memory_artifacts_to_s3(self): + def test_upload_memory_artifacts_to_s3(self, session): value = { "memory_name": "foobar", "bucket_name": "bucket_test", "artifact_namespace": "foo", "object_key": "test.txt", } - assert ( - "memory not found" - in AwsS3Tool(session=boto3.Session()).upload_memory_artifacts_to_s3({"values": value}).value - ) + assert "memory not found" in AwsS3Tool(session=session).upload_memory_artifacts_to_s3({"values": value}).value - def test_upload_content_to_s3(self): + def test_upload_content_to_s3(self, session): value = {"content": "foobar", "bucket_name": "bucket_test", "object_key": "test.txt"} - assert ( - "error uploading objects" - in AwsS3Tool(session=boto3.Session()).upload_content_to_s3({"values": value}).value - ) + assert "uploaded successfully" in AwsS3Tool(session=session).upload_content_to_s3({"values": value}).value - def test_download_objects(self): + def test_download_objects(self, session): value = {"objects": {"bucket_name": "bucket_test", "object_key": "test.txt"}} - assert ( - "error downloading objects" in AwsS3Tool(session=boto3.Session()).download_objects({"values": value}).value - ) + assert "error downloading objects" in AwsS3Tool(session=session).download_objects({"values": value}).value diff --git a/tests/unit/tools/test_base_tool.py b/tests/unit/tools/test_base_tool.py index 60c9f6825..4c6b1e587 100644 --- a/tests/unit/tools/test_base_tool.py +++ b/tests/unit/tools/test_base_tool.py @@ -1,12 +1,16 @@ import inspect import os +import tempfile +from unittest.mock import Mock import pytest from schema import Or, Schema, SchemaMissingKeyError from griptape.common import ToolAction from griptape.tasks import ActionsSubtask, ToolkitTask +from griptape.tools import BaseTool from tests.mocks.mock_tool.tool import MockTool +from tests.mocks.mock_tool_kwargs.tool import MockToolKwargs from tests.utils import defaults @@ -246,9 +250,9 @@ def test_find_input_memory(self): assert MockTool().find_input_memory("foo") is None assert MockTool(input_memory=[defaults.text_task_memory("foo")]).find_input_memory("foo") is not None - def test_execute(self, tool): + def test_run(self, tool): action = ToolAction(input={}, name="", tag="") - assert tool.execute(tool.test_list_output, ActionsSubtask("foo"), action).to_text() == "foo\n\nbar" + assert tool.run(tool.test_list_output, ActionsSubtask("foo"), action).to_text() == "foo\n\nbar" def test_schema(self, tool): tool = MockTool() @@ -279,3 +283,75 @@ def test_to_native_tool_name(self, tool, mocker): tool.name = "MockTool" with pytest.raises(ValueError, match="Activity name"): tool.to_native_tool_name(tool.test) + + def test_to_dict(self, tool): + tool = MockTool() + + expected_tool_dict = { + "type": tool.type, + "name": tool.name, + "input_memory": tool.input_memory, + "output_memory": tool.output_memory, + "install_dependencies_on_init": tool.install_dependencies_on_init, + "dependencies_install_directory": tool.dependencies_install_directory, + "verbose": tool.verbose, + "off_prompt": tool.off_prompt, + } + + assert expected_tool_dict == tool.to_dict() + + def test_from_dict(self, tool): + tool = MockTool() + action = ToolAction(input={}, name="", tag="") + + serialized_tool = tool.to_dict() + assert isinstance(serialized_tool, dict) + + deserialized_tool = MockTool.from_dict(serialized_tool) + assert isinstance(deserialized_tool, BaseTool) + + assert deserialized_tool.run(tool.test_list_output, ActionsSubtask("foo"), action).to_text() == "foo\n\nbar" + + def test_method_kwargs_var_injection(self, tool): + tool = MockToolKwargs() + + params = {"values": {"test_kwarg": "foo", "test_kwarg_kwargs": "bar"}} + assert tool.test_with_kwargs(params) == "ack foo" + + def test_has_requirements(self, tool): + assert tool.has_requirements + + class InlineTool(BaseTool): + pass + + assert InlineTool().has_requirements is False + + def test_are_requirements_met(self, tool): + assert tool.are_requirements_met(tool.requirements_path) + + class InlineTool(BaseTool): + pass + + # Temp file does not work on Github Actions Windows runner. + if os.name != "nt": + with tempfile.NamedTemporaryFile() as temp: + temp.write(b"nonexistent-package==1.0.0\nanother-package==2.0.0") + temp.seek(0) + + assert InlineTool().are_requirements_met(temp.name) is False + + with tempfile.NamedTemporaryFile() as temp: + temp.write(b"pip") + temp.seek(0) + + assert InlineTool().are_requirements_met(temp.name) is True + + def test_runnable_mixin(self, tool): + mock_on_before_run = Mock() + mock_after_run = Mock() + tool = MockTool(on_before_run=mock_on_before_run, on_after_run=mock_after_run) + + tool.run(tool.test_list_output, ActionsSubtask("foo"), ToolAction(input={}, name="", tag="")).to_text() + + mock_on_before_run.assert_called_once_with(tool) + mock_after_run.assert_called_once_with(tool) diff --git a/tests/unit/tools/test_file_manager.py b/tests/unit/tools/test_file_manager.py index 4e035bdee..9cbfe6859 100644 --- a/tests/unit/tools/test_file_manager.py +++ b/tests/unit/tools/test_file_manager.py @@ -111,7 +111,7 @@ def test_save_content_to_file(self, temp_dir): ) assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") def test_save_content_to_file_with_encoding(self, temp_dir): file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(encoding="utf-8", workdir=temp_dir)) @@ -120,7 +120,7 @@ def test_save_content_to_file_with_encoding(self, temp_dir): ) assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") def test_save_and_load_content_to_file_with_encoding(self, temp_dir): file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(encoding="ascii", workdir=temp_dir)) @@ -129,7 +129,7 @@ def test_save_and_load_content_to_file_with_encoding(self, temp_dir): ) assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" - assert result.value == "Successfully saved file" + assert result.value.startswith("Successfully saved file at:") file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(encoding="ascii", workdir=temp_dir)) result = file_manager.load_files_from_disk({"values": {"paths": [os.path.join("test", "foobar.txt")]}}) diff --git a/tests/unit/tools/test_structure_run_tool.py b/tests/unit/tools/test_structure_run_tool.py index f62cdeea7..8b581103e 100644 --- a/tests/unit/tools/test_structure_run_tool.py +++ b/tests/unit/tools/test_structure_run_tool.py @@ -10,9 +10,7 @@ class TestStructureRunTool: def client(self): agent = Agent() - return StructureRunTool( - description="foo bar", driver=LocalStructureRunDriver(structure_factory_fn=lambda: agent) - ) + return StructureRunTool(description="foo bar", driver=LocalStructureRunDriver(create_structure=lambda: agent)) def test_run_structure(self, client): assert client.run_structure({"values": {"args": "foo bar"}}).value == "mock output" diff --git a/tests/unit/tools/test_vector_store_tool.py b/tests/unit/tools/test_vector_store_tool.py index 30596f09f..a8896c757 100644 --- a/tests/unit/tools/test_vector_store_tool.py +++ b/tests/unit/tools/test_vector_store_tool.py @@ -23,12 +23,12 @@ def test_search_with_namespace(self): assert len(tool1.search({"values": {"query": "test"}})) == 2 assert len(tool2.search({"values": {"query": "test"}})) == 0 - def test_custom_process_query_output_fn(self): + def test_custom_process_query_output(self): driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) tool1 = VectorStoreTool( description="Test", vector_store_driver=driver, - process_query_output_fn=lambda es: ListArtifact([e.vector for e in es]), + process_query_output=lambda es: ListArtifact([e.vector for e in es]), query_params={"include_vectors": True}, ) diff --git a/tests/unit/utils/test_chat.py b/tests/unit/utils/test_chat.py index a8ffb1fff..4cb43e05e 100644 --- a/tests/unit/utils/test_chat.py +++ b/tests/unit/utils/test_chat.py @@ -1,5 +1,7 @@ import logging -from unittest.mock import patch +from unittest.mock import Mock, call, patch + +import pytest from griptape.configs import Defaults from griptape.memory.structure import ConversationMemory @@ -19,7 +21,8 @@ def test_init(self): intro_text="hello...", prompt_prefix="Question: ", response_prefix="Answer: ", - output_fn=logging.info, + handle_input=input, + handle_output=logging.info, logger_level=logging.INFO, ) assert chat.structure == agent @@ -28,11 +31,12 @@ def test_init(self): assert chat.intro_text == "hello..." assert chat.prompt_prefix == "Question: " assert chat.response_prefix == "Answer: " - assert callable(chat.output_fn) + assert callable(chat.handle_input) + assert callable(chat.handle_output) assert chat.logger_level == logging.INFO @patch("builtins.input", side_effect=["exit"]) - def test_chat_logger_level(self, mock_input): + def test_start_chat_logger_level(self, mock_input): agent = Agent(conversation_memory=ConversationMemory()) chat = Chat(agent) @@ -46,3 +50,37 @@ def test_chat_logger_level(self, mock_input): assert logger.getEffectiveLevel() == logging.DEBUG assert mock_input.call_count == 1 + + def test_chat_prompt(self): + assert Chat.ChatPrompt.prompt_suffix == "" + + @pytest.mark.parametrize("stream", [True, False]) + @patch("builtins.input", side_effect=["foo", "exit"]) + def test_start(self, mock_input, stream): + mock_handle_output = Mock() + agent = Agent(conversation_memory=ConversationMemory(), stream=stream) + + chat = Chat(agent, intro_text="foo", handle_output=mock_handle_output) + + chat.start() + + mock_input.assert_has_calls([call(), call()]) + if stream: + mock_handle_output.assert_has_calls( + [ + call("foo"), + call("Thinking..."), + call("Assistant: mock output", stream=True), + call("\n", stream=True), + call("Exiting..."), + ] + ) + else: + mock_handle_output.assert_has_calls( + [ + call("foo"), + call("Thinking..."), + call("Assistant: mock output"), + call("Exiting..."), + ] + ) diff --git a/tests/unit/utils/test_contextvars_utils.py b/tests/unit/utils/test_contextvars_utils.py new file mode 100644 index 000000000..001f6200e --- /dev/null +++ b/tests/unit/utils/test_contextvars_utils.py @@ -0,0 +1,31 @@ +import contextvars +import threading + +from griptape.utils import with_contextvars + +context_var = contextvars.ContextVar("context_var") + + +class TestContextvarsUtils: + def test_with_contextvars(self): + context_var.set("test") + + def function(vals: list) -> None: + try: + vals.append(context_var.get()) + except LookupError: + vals.append("fallback") + + return_values = [] + thread = threading.Thread(target=with_contextvars(function), args=(return_values,)) + thread.start() + thread.join() + + assert return_values == ["test"] + + return_values = [] + thread = threading.Thread(target=function, args=(return_values,)) + thread.start() + thread.join() + + assert return_values == ["fallback"] diff --git a/tests/unit/utils/test_stream.py b/tests/unit/utils/test_stream.py index caddbb1a3..e16403a06 100644 --- a/tests/unit/utils/test_stream.py +++ b/tests/unit/utils/test_stream.py @@ -1,15 +1,21 @@ +import json from collections.abc import Iterator import pytest from griptape.structures import Agent, Pipeline from griptape.utils import Stream +from tests.mocks.mock_prompt_driver import MockPromptDriver +from tests.mocks.mock_tool.tool import MockTool class TestStream: @pytest.fixture(params=[True, False]) def agent(self, request): - return Agent(stream=request.param) + driver = MockPromptDriver( + use_native_tools=request.param, + ) + return Agent(stream=request.param, tools=[MockTool()], prompt_driver=driver) def test_init(self, agent): if agent.stream: @@ -18,9 +24,10 @@ def test_init(self, agent): assert chat_stream.structure == agent chat_stream_run = chat_stream.run() assert isinstance(chat_stream_run, Iterator) - chat_stream_artifact = next(chat_stream_run) - assert chat_stream_artifact.value == "mock output" - + assert next(chat_stream_run).value == "MockTool.mock-tag (test)" + assert next(chat_stream_run).value == json.dumps({"values": {"test": "test-value"}}, indent=2) + next(chat_stream_run) + assert next(chat_stream_run).value == "Answer: mock output" next(chat_stream_run) with pytest.raises(StopIteration): next(chat_stream_run) diff --git a/tests/utils/structure_tester.py b/tests/utils/structure_tester.py index c943525b6..a34871013 100644 --- a/tests/utils/structure_tester.py +++ b/tests/utils/structure_tester.py @@ -224,7 +224,7 @@ class TesterPromptDriverOption: structure: Structure = field() @classmethod - def prompt_driver_id_fn(cls, prompt_driver) -> str: + def generate_prompt_driver_id(cls, prompt_driver) -> str: return f"{prompt_driver.__class__.__name__}-{prompt_driver.model}" def verify_structure_output(self, structure) -> dict: