diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..a5066892 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +docs/ +.github/ +.mypy_cache/ +.pytest_cache/ +.ruff_cache/ +.tox/ +.scripts/ +.tests/ diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 53883d31..b8ba6630 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -1,5 +1,10 @@ on: pull_request: + paths-ignore: + - "docs/**" + - "**.md" + - "examples/**" + - "mkdocs.yml" branches: - "main" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 6aef44ce..d8128e93 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,5 +1,9 @@ on: push: + paths-ignore: + - "docs/**" + - "**.md" + - "examples/**" branches: - "main" - "rc" diff --git a/.gitignore b/.gitignore index 40695b3a..ee5beae4 100644 --- a/.gitignore +++ b/.gitignore @@ -152,5 +152,3 @@ cov.xml .DS_Store data/ - -example_bak/ diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index efa7eb7d..00000000 --- a/.pylintrc +++ /dev/null @@ -1,4 +0,0 @@ -[MASTER] -ignore-patterns=**/tests/magnus/*.py -disable= - C0114,W1202,W1203 diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..bd28b9c5 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.9 diff --git a/README.md b/README.md index ddb61a2a..707c0377 100644 --- a/README.md +++ b/README.md @@ -1,52 +1,69 @@ -# Hello from magnus

- Logo + + ,////, + /// 6| + // _| + _/_,-' + _.-/'/ \ ,/;, + ,-' /' \_ \ / _/ + `\ / _/\ ` / + | /, `\_/ + | \' + /\_ /` /\ + /' /_``--.__/\ `,. / \ + |_/` `-._ `\/ `\ `. + `-.__/' `\ | + `\ \ + `\ \ + \_\__ + \___) +


-python: -Pypi - +python: +Pypi + Code style: black MyPy Checked -Tests: -Docs: +Tests: +Docs:


-Magnus is a simplified workflow definition language that helps in: +runnable is a simplified workflow definition language that helps in: -- **Streamlined Design Process:** Magnus enables users to efficiently plan their pipelines with -[stubbed nodes](https://astrazeneca.github.io/magnus-core/concepts/stub), along with offering support for various structures such as -[tasks](https://astrazeneca.github.io/magnus-core/concepts/task), [parallel branches](https://astrazeneca.github.io/magnus-core/concepts/parallel), and [loops or map branches](https://astrazeneca.github.io/magnus-core/concepts/map) -in both [yaml](https://astrazeneca.github.io/magnus-core/concepts/pipeline) or a [python SDK](https://astrazeneca.github.io/magnus-core/sdk) for maximum flexibility. +- **Streamlined Design Process:** runnable enables users to efficiently plan their pipelines with +[stubbed nodes](https://astrazeneca.github.io/runnable-core/concepts/stub), along with offering support for various structures such as +[tasks](https://astrazeneca.github.io/runnable-core/concepts/task), [parallel branches](https://astrazeneca.github.io/runnable-core/concepts/parallel), and [loops or map branches](https://astrazeneca.github.io/runnable-core/concepts/map) +in both [yaml](https://astrazeneca.github.io/runnable-core/concepts/pipeline) or a [python SDK](https://astrazeneca.github.io/runnable-core/sdk) for maximum flexibility. -- **Incremental Development:** Build your pipeline piece by piece with Magnus, which allows for the -implementation of tasks as [python functions](https://astrazeneca.github.io/magnus-core/concepts/task/#python_functions), -[notebooks](https://astrazeneca.github.io/magnus-core/concepts/task/#notebooks), or [shell scripts](https://astrazeneca.github.io/magnus-core/concepts/task/#shell), +- **Incremental Development:** Build your pipeline piece by piece with runnable, which allows for the +implementation of tasks as [python functions](https://astrazeneca.github.io/runnable-core/concepts/task/#python_functions), +[notebooks](https://astrazeneca.github.io/runnable-core/concepts/task/#notebooks), or [shell scripts](https://astrazeneca.github.io/runnable-core/concepts/task/#shell), adapting to the developer's preferred tools and methods. -- **Robust Testing:** Ensure your pipeline performs as expected with the ability to test using sampled data. Magnus -also provides the capability to [mock and patch tasks](https://astrazeneca.github.io/magnus-core/configurations/executors/mocked) +- **Robust Testing:** Ensure your pipeline performs as expected with the ability to test using sampled data. runnable +also provides the capability to [mock and patch tasks](https://astrazeneca.github.io/runnable-core/configurations/executors/mocked) for thorough evaluation before full-scale deployment. - **Seamless Deployment:** Transition from the development stage to production with ease. -Magnus simplifies the process by requiring [only configuration changes](https://astrazeneca.github.io/magnus-core/configurations/overview) -to adapt to different environments, including support for [argo workflows](https://astrazeneca.github.io/magnus-core/configurations/executors/argo). +runnable simplifies the process by requiring [only configuration changes](https://astrazeneca.github.io/runnable-core/configurations/overview) +to adapt to different environments, including support for [argo workflows](https://astrazeneca.github.io/runnable-core/configurations/executors/argo). -- **Efficient Debugging:** Quickly identify and resolve issues in pipeline execution with Magnus's local -debugging features. Retrieve data from failed tasks and [retry failures](https://astrazeneca.github.io/magnus-core/concepts/run-log/#retrying_failures) +- **Efficient Debugging:** Quickly identify and resolve issues in pipeline execution with runnable's local +debugging features. Retrieve data from failed tasks and [retry failures](https://astrazeneca.github.io/runnable-core/concepts/run-log/#retrying_failures) using your chosen debugging tools to maintain a smooth development experience. -Along with the developer friendly features, magnus also acts as an interface to production grade concepts -such as [data catalog](https://astrazeneca.github.io/magnus-core/concepts/catalog), [reproducibility](https://astrazeneca.github.io/magnus-core/concepts/run-log), -[experiment tracking](https://astrazeneca.github.io/magnus-core/concepts/experiment-tracking) -and secure [access to secrets](https://astrazeneca.github.io/magnus-core/concepts/secrets). +Along with the developer friendly features, runnable also acts as an interface to production grade concepts +such as [data catalog](https://astrazeneca.github.io/runnable-core/concepts/catalog), [reproducibility](https://astrazeneca.github.io/runnable-core/concepts/run-log), +[experiment tracking](https://astrazeneca.github.io/runnable-core/concepts/experiment-tracking) +and secure [access to secrets](https://astrazeneca.github.io/runnable-core/concepts/secrets).
@@ -59,19 +76,19 @@ and secure [access to secrets](https://astrazeneca.github.io/magnus-core/concept ## Documentation -[More details about the project and how to use it available here](https://astrazeneca.github.io/magnus-core/). +[More details about the project and how to use it available here](https://astrazeneca.github.io/runnable-core/).
## Installation -The minimum python version that magnus supports is 3.8 +The minimum python version that runnable supports is 3.8 ```shell -pip install magnus +pip install runnable ``` -Please look at the [installation guide](https://astrazeneca.github.io/magnus-core/usage) +Please look at the [installation guide](https://astrazeneca.github.io/runnable-core/usage) for more information.
@@ -117,7 +134,7 @@ def return_parameter() -> Parameter: def display_parameter(x: int, y: InnerModel): """ Annotating the arguments of the function is important for - magnus to understand the type of parameters you want. + runnable to understand the type of parameters you want. Input args can be a pydantic model or the individual attributes. """ @@ -138,7 +155,7 @@ my_param = return_parameter() display_parameter(my_param.x, my_param.y) ``` -### Orchestration using magnus +### Orchestration using runnable @@ -153,7 +170,7 @@ Example present at: ```examples/python-tasks.py``` Run it as: ```python examples/python-tasks.py``` ```python -from magnus import Pipeline, Task +from runnable import Pipeline, Task def main(): step1 = Task( @@ -188,7 +205,7 @@ if __name__ == "__main__": Example present at: ```examples/python-tasks.yaml``` -Execute via the cli: ```magnus execute -f examples/python-tasks.yaml``` +Execute via the cli: ```runnable execute -f examples/python-tasks.yaml``` ```yaml dag: @@ -231,9 +248,9 @@ No code change, just change the configuration. executor: type: "argo" config: - image: magnus:demo + image: runnable:demo persistent_volumes: - - name: magnus-volume + - name: runnable-volume mount_path: /mnt run_log_store: @@ -242,9 +259,9 @@ run_log_store: log_folder: /mnt/run_log_store ``` -More details can be found in [argo configuration](https://astrazeneca.github.io/magnus-core/configurations/executors/argo). +More details can be found in [argo configuration](https://astrazeneca.github.io/runnable-core/configurations/executors/argo). -Execute the code as ```magnus execute -f examples/python-tasks.yaml -c examples/configs/argo-config.yam``` +Execute the code as ```runnable execute -f examples/python-tasks.yaml -c examples/configs/argo-config.yam```
Expand @@ -253,12 +270,12 @@ Execute the code as ```magnus execute -f examples/python-tasks.yaml -c examples/ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: magnus-dag- + generateName: runnable-dag- annotations: {} labels: {} spec: activeDeadlineSeconds: 172800 - entrypoint: magnus-dag + entrypoint: runnable-dag podGC: strategy: OnPodCompletion retryStrategy: @@ -270,7 +287,7 @@ spec: maxDuration: '3600' serviceAccountName: default-editor templates: - - name: magnus-dag + - name: runnable-dag failFast: true dag: tasks: @@ -285,9 +302,9 @@ spec: depends: step-2-task-772vg3.Succeeded - name: step-1-task-uvdp7h container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - step%1 @@ -310,9 +327,9 @@ spec: cpu: 250m - name: step-2-task-772vg3 container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - step%2 @@ -335,9 +352,9 @@ spec: cpu: 250m - name: success-success-igzq2e container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - success @@ -368,7 +385,7 @@ spec: volumes: - name: executor-0 persistentVolumeClaim: - claimName: magnus-volume + claimName: runnable-volume ``` @@ -379,22 +396,22 @@ spec: ### Linear A simple linear pipeline with tasks either -[python functions](https://astrazeneca.github.io/magnus-core/concepts/task/#python_functions), -[notebooks](https://astrazeneca.github.io/magnus-core/concepts/task/#notebooks), or [shell scripts](https://astrazeneca.github.io/magnus-core/concepts/task/#shell) +[python functions](https://astrazeneca.github.io/runnable-core/concepts/task/#python_functions), +[notebooks](https://astrazeneca.github.io/runnable-core/concepts/task/#notebooks), or [shell scripts](https://astrazeneca.github.io/runnable-core/concepts/task/#shell) [![](https://mermaid.ink/img/pako:eNpl0bFuwyAQBuBXQVdZTqTESpxMDJ0ytkszhgwnOCcoNo4OaFVZfvcSx20tGSQ4fn0wHB3o1hBIyLJOWGeDFJ3Iq7r90lfkkA9HHfmTUpnX1hFyLvrHzDLl_qB4-1BOOZGGD3TfSikvTDSNFqdj2sT2vBTr9euQlXNWjqycsN2c7UZWFMUE7udwP0L3y6JenNKiyfvz8t8_b-gavT9QJYY0PcDtjeTLptrAChriBq1JzeoeWkG4UkMKZCoN8k2Bcn1yGEN7_HYaZOBIK4h3g4EOFi-MDcgKa59SMja0_P7s_vAJ_Q_YOH6o?type=png)](https://mermaid.live/edit#pako:eNpl0bFuwyAQBuBXQVdZTqTESpxMDJ0ytkszhgwnOCcoNo4OaFVZfvcSx20tGSQ4fn0wHB3o1hBIyLJOWGeDFJ3Iq7r90lfkkA9HHfmTUpnX1hFyLvrHzDLl_qB4-1BOOZGGD3TfSikvTDSNFqdj2sT2vBTr9euQlXNWjqycsN2c7UZWFMUE7udwP0L3y6JenNKiyfvz8t8_b-gavT9QJYY0PcDtjeTLptrAChriBq1JzeoeWkG4UkMKZCoN8k2Bcn1yGEN7_HYaZOBIK4h3g4EOFi-MDcgKa59SMja0_P7s_vAJ_Q_YOH6o) -### [Parallel branches](https://astrazeneca.github.io/magnus-core/concepts/parallel) +### [Parallel branches](https://astrazeneca.github.io/runnable-core/concepts/parallel) Execute branches in parallel [![](https://mermaid.ink/img/pako:eNp9k01rwzAMhv-K8S4ZtJCzDzuMLmWwwkh2KMQ7eImShiZ2sB1KKf3vs52PpsWNT7LySHqlyBeciRwwwUUtTtmBSY2-YsopR8MpQUfAdCdBBekWNBpvv6-EkFICzGAtWcUTDW3wYy20M7lr5QGBK2j-anBAkH4M1z6grnjpy17xAiTwDII07jj6HK8-VnVZBspITnpjztyoVkLLJOy3Qfrdm6gQEu2370Io7WLORo84PbRoA_oOl9BBg4UHbHR58UkMWq_fxjrOnhLRx1nH0SgkjlBjh7ekxNKGc0NelDLknhePI8qf7MVNr_31nm1wwNTeM2Ao6pmf-3y3Mp7WlqA7twOnXfKs17zt-6azmim1gQL1A0NKS3EE8hKZE4Yezm3chIVFiFe4AdmwKjdv7mIjKNYHaIBiYsycySPFlF8NxzotkjPPMNGygxXu2pxp2FSslKzBpGC1Ml7IKy3krn_E7i1f_wEayTcn?type=png)](https://mermaid.live/edit#pako:eNp9k01rwzAMhv-K8S4ZtJCzDzuMLmWwwkh2KMQ7eImShiZ2sB1KKf3vs52PpsWNT7LySHqlyBeciRwwwUUtTtmBSY2-YsopR8MpQUfAdCdBBekWNBpvv6-EkFICzGAtWcUTDW3wYy20M7lr5QGBK2j-anBAkH4M1z6grnjpy17xAiTwDII07jj6HK8-VnVZBspITnpjztyoVkLLJOy3Qfrdm6gQEu2370Io7WLORo84PbRoA_oOl9BBg4UHbHR58UkMWq_fxjrOnhLRx1nH0SgkjlBjh7ekxNKGc0NelDLknhePI8qf7MVNr_31nm1wwNTeM2Ao6pmf-3y3Mp7WlqA7twOnXfKs17zt-6azmim1gQL1A0NKS3EE8hKZE4Yezm3chIVFiFe4AdmwKjdv7mIjKNYHaIBiYsycySPFlF8NxzotkjPPMNGygxXu2pxp2FSslKzBpGC1Ml7IKy3krn_E7i1f_wEayTcn) -### [loops or map](https://astrazeneca.github.io/magnus-core/concepts/map) +### [loops or map](https://astrazeneca.github.io/runnable-core/concepts/map) Execute a pipeline over an iterable parameter. [![](https://mermaid.ink/img/pako:eNqVlF1rwjAUhv9KyG4qKNR-3AS2m8nuBgN3Z0Sy5tQG20SSdE7E_76kVVEr2CY3Ied9Tx6Sk3PAmeKACc5LtcsKpi36nlGZFbXciHwfLN79CuWiBLMcEULWGkBSaeosA2OCxbxdXMd89Get2bZASsLiSyuvQE2mJZXIjW27t2rOmQZ3Gp9rD6UjatWnwy7q6zPPukd50WTydmemEiS_QbQ79RwxGoQY9UaMuojRA8TCXexzyHgQZNwbMu5Cxl3IXNX6OWMyiDHpzZh0GZMHjOK3xz2mgxjT3oxplzG9MPp5_nVOhwJjteDwOg3HyFj3L1dCcvh7DUc-iftX18n6Waet1xX8cG908vpKHO6OW7cvkeHm5GR2b3drdvaSGTODHLW37mxabYC8fLgRhlfxpjNdwmEets-Dx7gCXTHBXQc8-D2KbQEVUEzckjO9oZjKo9Ox2qr5XmaYWF3DGNdbzizMBHOVVWGSs9K4XeDCKv3ZttSmsx7_AYa341E?type=png)](https://mermaid.live/edit#pako:eNqVlF1rwjAUhv9KyG4qKNR-3AS2m8nuBgN3Z0Sy5tQG20SSdE7E_76kVVEr2CY3Ied9Tx6Sk3PAmeKACc5LtcsKpi36nlGZFbXciHwfLN79CuWiBLMcEULWGkBSaeosA2OCxbxdXMd89Get2bZASsLiSyuvQE2mJZXIjW27t2rOmQZ3Gp9rD6UjatWnwy7q6zPPukd50WTydmemEiS_QbQ79RwxGoQY9UaMuojRA8TCXexzyHgQZNwbMu5Cxl3IXNX6OWMyiDHpzZh0GZMHjOK3xz2mgxjT3oxplzG9MPp5_nVOhwJjteDwOg3HyFj3L1dCcvh7DUc-iftX18n6Waet1xX8cG908vpKHO6OW7cvkeHm5GR2b3drdvaSGTODHLW37mxabYC8fLgRhlfxpjNdwmEets-Dx7gCXTHBXQc8-D2KbQEVUEzckjO9oZjKo9Ox2qr5XmaYWF3DGNdbzizMBHOVVWGSs9K4XeDCKv3ZttSmsx7_AYa341E) -### [Arbitrary nesting](https://astrazeneca.github.io/magnus-core/concepts/nesting/) +### [Arbitrary nesting](https://astrazeneca.github.io/runnable-core/concepts/nesting/) Any nesting of parallel within map and so on. diff --git a/assets/favicon.png b/assets/favicon.png deleted file mode 100644 index 101427f5..00000000 Binary files a/assets/favicon.png and /dev/null differ diff --git a/assets/logo-readme.png b/assets/logo-readme.png deleted file mode 100644 index b859410d..00000000 Binary files a/assets/logo-readme.png and /dev/null differ diff --git a/assets/logo.png b/assets/logo.png deleted file mode 100644 index f170eb03..00000000 Binary files a/assets/logo.png and /dev/null differ diff --git a/assets/work.png b/assets/work.png deleted file mode 100644 index ca4f48cb..00000000 Binary files a/assets/work.png and /dev/null differ diff --git a/docs/.DS_Store b/docs/.DS_Store deleted file mode 100644 index b996586c..00000000 Binary files a/docs/.DS_Store and /dev/null differ diff --git a/docs/assets/cropped.png b/docs/assets/cropped.png new file mode 100644 index 00000000..ee79c445 Binary files /dev/null and b/docs/assets/cropped.png differ diff --git a/docs/assets/favicon.png b/docs/assets/favicon.png deleted file mode 100644 index 101427f5..00000000 Binary files a/docs/assets/favicon.png and /dev/null differ diff --git a/docs/assets/logo.png b/docs/assets/logo.png deleted file mode 100644 index f170eb03..00000000 Binary files a/docs/assets/logo.png and /dev/null differ diff --git a/docs/assets/logo1.png b/docs/assets/logo1.png deleted file mode 100644 index a29af02d..00000000 Binary files a/docs/assets/logo1.png and /dev/null differ diff --git a/docs/assets/speed.png b/docs/assets/speed.png new file mode 100644 index 00000000..fb02a09e Binary files /dev/null and b/docs/assets/speed.png differ diff --git a/docs/assets/sport.png b/docs/assets/sport.png new file mode 100644 index 00000000..800f8e15 Binary files /dev/null and b/docs/assets/sport.png differ diff --git a/docs/assets/whatdo.png b/docs/assets/whatdo.png deleted file mode 100644 index 77f2ce04..00000000 Binary files a/docs/assets/whatdo.png and /dev/null differ diff --git a/docs/assets/work.png b/docs/assets/work.png deleted file mode 100644 index ca4f48cb..00000000 Binary files a/docs/assets/work.png and /dev/null differ diff --git a/docs/concepts/catalog.md b/docs/concepts/catalog.md index af02b1af..384eb196 100644 --- a/docs/concepts/catalog.md +++ b/docs/concepts/catalog.md @@ -4,6 +4,8 @@ data between tasks. The default configuration of ```do-nothing``` is no-op by design. We kindly request to raise a feature request to make us aware of the eco-system. +# TODO: Simplify this + Catalog provides a way to store and retrieve data generated by the individual steps of the dag to downstream steps of the dag. It can be any storage system that indexes its data by a unique identifier. @@ -20,7 +22,7 @@ The directory structure within a partition is the same as the project directory get/put data in the catalog as if you are working with local directory structure. Every interaction with the catalog (either by API or configuration) results in an entry in the [```run log```](../concepts/run-log.md/#step_log) -Internally, magnus also uses the catalog to store execution logs of tasks i.e stdout and stderr from +Internally, runnable also uses the catalog to store execution logs of tasks i.e stdout and stderr from [python](../concepts/task.md/#python) or [shell](../concepts/task.md/#shell) and executed notebook from [notebook tasks](../concepts/task.md/#notebook). @@ -153,7 +155,7 @@ The execution results in the ```catalog``` populated with the artifacts and the "code_identifier": "6029841c3737fe1163e700b4324d22a469993bb0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -199,7 +201,7 @@ The execution results in the ```catalog``` populated with the artifacts and the "code_identifier": "6029841c3737fe1163e700b4324d22a469993bb0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -245,7 +247,7 @@ The execution results in the ```catalog``` populated with the artifacts and the "code_identifier": "6029841c3737fe1163e700b4324d22a469993bb0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -284,7 +286,7 @@ The execution results in the ```catalog``` populated with the artifacts and the "code_identifier": "6029841c3737fe1163e700b4324d22a469993bb0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -337,7 +339,7 @@ The execution results in the ```catalog``` populated with the artifacts and the "code_identifier": "6029841c3737fe1163e700b4324d22a469993bb0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -467,7 +469,7 @@ and [notebook](../concepts/task.md/#notebook) tasks. Data objects can be shared between [python](../concepts/task.md/#python_functions) or [notebook](../concepts/task.md/#notebook) tasks, instead of serializing data and deserializing to file structure, using -[get_object](../interactions.md/#magnus.get_object) and [put_object](../interactions.md/#magnus.put_object). +[get_object](../interactions.md/#runnable.get_object) and [put_object](../interactions.md/#runnable.put_object). Internally, we use [pickle](https:/docs.python.org/3/library/pickle.html) to serialize and deserialize python objects. Please ensure that the object can be serialized via pickle. diff --git a/docs/concepts/executor.md b/docs/concepts/executor.md index 307f7fe0..d9dc3208 100644 --- a/docs/concepts/executor.md +++ b/docs/concepts/executor.md @@ -1,4 +1,7 @@ -Executors are the heart of magnus, they traverse the workflow and execute the tasks within the + +## TODO: Simplify + +Executors are the heart of runnable, they traverse the workflow and execute the tasks within the workflow while coordinating with different services (eg. [run log](../concepts/run-log.md), [catalog](../concepts/catalog.md), [secrets](../concepts/secrets.md) etc) @@ -23,7 +26,7 @@ any workflow engine. ## Graph Traversal -In magnus, the graph traversal can be performed by magnus itself or can be handed over to other +In runnable, the graph traversal can be performed by runnable itself or can be handed over to other orchestration frameworks (e.g Argo workflows, AWS step functions). ### Example @@ -44,7 +47,7 @@ translated to argo specification just by changing the configuration. You can execute the pipeline in default configuration by: - ```magnus execute -f examples/concepts/task_shell_simple.yaml``` + ```runnable execute -f examples/concepts/task_shell_simple.yaml``` ``` yaml linenums="1" --8<-- "examples/configs/default.yaml" @@ -60,16 +63,16 @@ translated to argo specification just by changing the configuration. In this configuration, we are using [argo workflows](https://argoproj.github.io/argo-workflows/) as our workflow engine. We are also instructing the workflow engine to use a docker image, - ```magnus:demo``` defined in line #4, as our execution environment. Please read + ```runnable:demo``` defined in line #4, as our execution environment. Please read [containerised environments](../configurations/executors/container-environments.md) for more information. - Since magnus needs to track the execution status of the workflow, we are using a ```run log``` + Since runnable needs to track the execution status of the workflow, we are using a ```run log``` which is persistent and available in for jobs in kubernetes environment. You can execute the pipeline in argo configuration by: - ```magnus execute -f examples/concepts/task_shell_simple.yaml -c examples/configs/argo-config.yaml``` + ```runnable execute -f examples/concepts/task_shell_simple.yaml -c examples/configs/argo-config.yaml``` ``` yaml linenums="1" --8<-- "examples/configs/argo-config.yaml" @@ -78,7 +81,7 @@ translated to argo specification just by changing the configuration. 1. Use argo workflows as the execution engine to run the pipeline. 2. Run this docker image for every step of the pipeline. The docker image should have the same directory structure as the project directory. - 3. Mount the volume from Kubernetes persistent volumes (magnus-volume) to /mnt directory. + 3. Mount the volume from Kubernetes persistent volumes (runnable-volume) to /mnt directory. 4. Resource constraints for the container runtime. 5. Since every step runs in a container, the run log should be persisted. Here we are using the file-system as our run log store. @@ -94,7 +97,7 @@ translated to argo specification just by changing the configuration. - The graph traversal rules follow the the same rules as our workflow. The step ```success-success-ou7qlf``` in line #15 only happens if the step ```shell-task-dz3l3t``` defined in line #12 succeeds. - - The execution fails if any of the tasks fail. Both argo workflows and magnus ```run log``` + - The execution fails if any of the tasks fail. Both argo workflows and runnable ```run log``` mark the execution as failed. @@ -102,12 +105,12 @@ translated to argo specification just by changing the configuration. apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: magnus-dag- + generateName: runnable-dag- annotations: {} labels: {} spec: activeDeadlineSeconds: 172800 - entrypoint: magnus-dag + entrypoint: runnable-dag podGC: strategy: OnPodCompletion retryStrategy: @@ -119,7 +122,7 @@ translated to argo specification just by changing the configuration. maxDuration: '3600' serviceAccountName: default-editor templates: - - name: magnus-dag + - name: runnable-dag failFast: true dag: tasks: @@ -131,9 +134,9 @@ translated to argo specification just by changing the configuration. depends: shell-task-4jy8pl.Succeeded - name: shell-task-4jy8pl container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - shell @@ -156,9 +159,9 @@ translated to argo specification just by changing the configuration. cpu: 250m - name: success-success-djhm6j container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - success @@ -189,13 +192,13 @@ translated to argo specification just by changing the configuration. volumes: - name: executor-0 persistentVolumeClaim: - claimName: magnus-volume + claimName: runnable-volume ``` -As seen from the above example, once a [pipeline is defined in magnus](../concepts/pipeline.md) either via yaml or SDK, we can +As seen from the above example, once a [pipeline is defined in runnable](../concepts/pipeline.md) either via yaml or SDK, we can run the pipeline in different environments just by providing a different configuration. Most often, there is no need to change the code or deviate from standard best practices while coding. @@ -204,11 +207,11 @@ no need to change the code or deviate from standard best practices while coding. !!! note - This section is to understand the internal mechanism of magnus and not required if you just want to + This section is to understand the internal mechanism of runnable and not required if you just want to use different executors. -Independent of traversal, all the tasks are executed within the ```context``` of magnus. +Independent of traversal, all the tasks are executed within the ```context``` of runnable. A closer look at the actual task implemented as part of transpiled workflow in argo specification details the inner workings. Below is a snippet of the argo specification from @@ -217,9 +220,9 @@ lines 18 to 34. ```yaml linenums="18" - name: shell-task-dz3l3t container: - image: magnus-example:latest + image: runnable-example:latest command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - shell @@ -235,17 +238,17 @@ lines 18 to 34. ``` The actual ```command``` to run is not the ```command``` defined in the workflow, -i.e ```echo hello world```, but a command in the CLI of magnus which specifies the workflow file, +i.e ```echo hello world```, but a command in the CLI of runnable which specifies the workflow file, the step name and the configuration file. -### Context of magnus +### Context of runnable Any ```task``` defined by the user as part of the workflow always runs as a *sub-command* of -magnus. In that sense, magnus follows the +runnable. In that sense, runnable follows the [decorator pattern](https://en.wikipedia.org/wiki/Decorator_pattern) without being part of the application codebase. -In a very simplistic sense, the below stubbed-code explains the context of magnus during +In a very simplistic sense, the below stubbed-code explains the context of runnable during execution of a task. ```python linenums="1" diff --git a/docs/concepts/experiment-tracking.md b/docs/concepts/experiment-tracking.md index 4f72d125..9c47ff93 100644 --- a/docs/concepts/experiment-tracking.md +++ b/docs/concepts/experiment-tracking.md @@ -9,7 +9,7 @@ during the execution of the pipeline. === "Using the API" - The highlighted lines in the below example show how to [use the API](../interactions.md/#magnus.track_this) + The highlighted lines in the below example show how to [use the API](../interactions.md/#runnable.track_this) Any pydantic model as a value would be dumped as a dict, respecting the alias, before tracking it. @@ -61,7 +61,7 @@ during the execution of the pipeline. "code_identifier": "793b052b8b603760ff1eb843597361219832b61c", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -106,7 +106,7 @@ during the execution of the pipeline. "code_identifier": "793b052b8b603760ff1eb843597361219832b61c", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -162,7 +162,7 @@ during the execution of the pipeline. "start_at": "shell", "name": "", "description": "An example pipeline to demonstrate setting experiment tracking metrics\nusing environment variables. Any environment variable with - prefix\n'MAGNUS_TRACK_' will be recorded as a metric captured during the step.\n\nYou can run this pipeline as:\n magnus execute -f + prefix\n'runnable_TRACK_' will be recorded as a metric captured during the step.\n\nYou can run this pipeline as:\n runnable execute -f examples/concepts/experiment_tracking_env.yaml\n", "internal_branch_name": "", "steps": { @@ -207,7 +207,7 @@ The step is defaulted to be 0. === "Using the API" - The highlighted lines in the below example show how to [use the API](../interactions.md/#magnus.track_this) with + The highlighted lines in the below example show how to [use the API](../interactions.md/#runnable.track_this) with the step parameter. You can run this example by ```python run examples/concepts/experiment_tracking_step.py``` @@ -247,7 +247,7 @@ The step is defaulted to be 0. "code_identifier": "858c4df44f15d81139341641c63ead45042e0d89", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -301,7 +301,7 @@ The step is defaulted to be 0. "code_identifier": "858c4df44f15d81139341641c63ead45042e0d89", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -393,14 +393,14 @@ The step is defaulted to be 0. !!! note "Opt out" Pipelines need not use the ```experiment-tracking``` if the preferred tools of choice is - not implemented in magnus. The default configuration of ```do-nothing``` is no-op by design. + not implemented in runnable. The default configuration of ```do-nothing``` is no-op by design. We kindly request to raise a feature request to make us aware of the eco-system. -The default experiment tracking tool of magnus is a no-op as the ```run log``` captures all the +The default experiment tracking tool of runnable is a no-op as the ```run log``` captures all the required details. To make it compatible with other experiment tracking tools like [mlflow](https://mlflow.org/docs/latest/tracking.html) or -[Weights and Biases](https://wandb.ai/site/experiment-tracking), we map attributes of magnus +[Weights and Biases](https://wandb.ai/site/experiment-tracking), we map attributes of runnable to the underlying tool. For example, for mlflow: @@ -420,7 +420,7 @@ Since mlflow does not support step wise logging of parameters, the key name is f !!! note inline end "Shortcomings" - Experiment tracking capabilities of magnus are inferior in integration with + Experiment tracking capabilities of runnable are inferior in integration with popular python frameworks like pytorch and tensorflow as compared to other experiment tracking tools. @@ -453,7 +453,7 @@ Since mlflow does not support step wise logging of parameters, the key name is f
![Image](../assets/screenshots/mlflow.png){ width="800" height="600"} -
mlflow UI for the execution. The run_id remains the same as the run_id of magnus
+
mlflow UI for the execution. The run_id remains the same as the run_id of runnable
@@ -464,5 +464,5 @@ Since mlflow does not support step wise logging of parameters, the key name is f To provide implementation specific capabilities, we also provide a -[python API](../interactions.md/#magnus.get_experiment_tracker_context) to obtain the client context. The default +[python API](../interactions.md/#runnable.get_experiment_tracker_context) to obtain the client context. The default client context is a [null context manager](https://docs.python.org/3/library/contextlib.html#contextlib.nullcontext). diff --git a/docs/concepts/map.md b/docs/concepts/map.md index 7024bb49..a98f96dd 100644 --- a/docs/concepts/map.md +++ b/docs/concepts/map.md @@ -1,4 +1,4 @@ -```map``` nodes in magnus allows you to execute a sequence of nodes (i.e a pipeline) for all the items in a list. This is similar to +```map``` nodes in runnable allows you to execute a sequence of nodes (i.e a pipeline) for all the items in a list. This is similar to [Map state of AWS Step functions](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-map-state.html) or [loops in Argo workflows](https://argo-workflows.readthedocs.io/en/latest/walk-through/loops/). @@ -87,8 +87,8 @@ of the files to process. over ```chunks```. If the argument ```start_index``` is not provided, you can still access the current - value by ```MAGNUS_MAP_VARIABLE``` environment variable. - The environment variable ```MAGNUS_MAP_VARIABLE``` is a dictionary with keys as + value by ```runnable_MAP_VARIABLE``` environment variable. + The environment variable ```runnable_MAP_VARIABLE``` is a dictionary with keys as ```iterate_as``` This instruction is set while defining the map node. @@ -108,7 +108,7 @@ of the files to process. This instruction is set while defining the map node. Note that the ```branch``` of the map node has a similar schema of the pipeline. - You can run this example by ```magnus execute examples/concepts/map.yaml``` + You can run this example by ```runnable execute examples/concepts/map.yaml``` ```yaml linenums="1" hl_lines="23-26" --8<-- "examples/concepts/map.yaml" @@ -120,11 +120,11 @@ of the files to process. functions. The map branch "iterate and execute" iterates over chunks and exposes the current start_index of - as environment variable ```MAGNUS_MAP_VARIABLE```. + as environment variable ```runnable_MAP_VARIABLE```. - The environment variable ```MAGNUS_MAP_VARIABLE``` is a json string with keys of the ```iterate_as```. + The environment variable ```runnable_MAP_VARIABLE``` is a json string with keys of the ```iterate_as```. - You can run this example by ```magnus execute examples/concepts/map_shell.yaml``` + You can run this example by ```runnable execute examples/concepts/map_shell.yaml``` ```yaml linenums="1" hl_lines="26-27 29-32" --8<-- "examples/concepts/map_shell.yaml" @@ -156,7 +156,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -195,7 +195,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -218,7 +218,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -266,7 +266,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -312,7 +312,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -360,7 +360,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -406,7 +406,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -454,7 +454,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -500,7 +500,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -548,7 +548,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -594,7 +594,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -642,7 +642,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -687,7 +687,7 @@ of the files to process. "code_identifier": "30ca73bb01ac45db08b1ca75460029da142b53fa", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], diff --git a/docs/concepts/nesting.md b/docs/concepts/nesting.md index cfeedaea..05a152a7 100644 --- a/docs/concepts/nesting.md +++ b/docs/concepts/nesting.md @@ -1,11 +1,11 @@ As seen from the definitions of [parallel](../concepts/parallel.md) or [map](../concepts/map.md), the branches are pipelines -themselves. This allows for deeply nested workflows in **magnus**. +themselves. This allows for deeply nested workflows in **runnable**. Technically there is no limit in the depth of nesting but there are some practical considerations. -- Not all workflow engines that magnus can transpile the workflow to support deeply nested workflows. +- Not all workflow engines that runnable can transpile the workflow to support deeply nested workflows. AWS Step functions and Argo workflows support them. - Deeply nested workflows are complex to understand and debug during errors. @@ -25,7 +25,7 @@ AWS Step functions and Argo workflows support them. === "yaml" - You can run this pipeline by ```magnus execute examples/concepts/nesting.yaml``` + You can run this pipeline by ```runnable execute examples/concepts/nesting.yaml``` ```yaml linenums="1" --8<-- "examples/concepts/nesting.yaml" @@ -57,7 +57,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -96,7 +96,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -119,7 +119,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -142,7 +142,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -165,7 +165,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -201,7 +201,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -243,7 +243,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -279,7 +279,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -320,7 +320,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -362,7 +362,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -385,7 +385,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -421,7 +421,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -463,7 +463,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -499,7 +499,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -540,7 +540,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -581,7 +581,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -623,7 +623,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -646,7 +646,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -669,7 +669,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -705,7 +705,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -747,7 +747,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -783,7 +783,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -824,7 +824,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -866,7 +866,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -889,7 +889,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -925,7 +925,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -967,7 +967,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -1003,7 +1003,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -1044,7 +1044,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -1085,7 +1085,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -1126,7 +1126,7 @@ AWS Step functions and Argo workflows support them. "code_identifier": "99139c3507898c60932ad5d35c08b395399a19f6", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], diff --git a/docs/concepts/parallel.md b/docs/concepts/parallel.md index 112b5ea1..1c2f882c 100644 --- a/docs/concepts/parallel.md +++ b/docs/concepts/parallel.md @@ -1,4 +1,4 @@ -Parallel nodes in magnus allows you to run multiple pipelines in parallel and use your compute resources efficiently. +Parallel nodes in runnable allows you to run multiple pipelines in parallel and use your compute resources efficiently. ## Example @@ -98,7 +98,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -129,7 +129,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -152,7 +152,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -183,7 +183,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -214,7 +214,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -251,7 +251,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -282,7 +282,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -318,7 +318,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -349,7 +349,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -380,7 +380,7 @@ ensemble model happens only after both models are (successfully) trained. "code_identifier": "f0a2719001de9be30c27069933e4b4a64a065e2b", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], diff --git a/docs/concepts/parameters.md b/docs/concepts/parameters.md index dbdc2ae0..1de3b83e 100644 --- a/docs/concepts/parameters.md +++ b/docs/concepts/parameters.md @@ -1,4 +1,6 @@ -In magnus, ```parameters``` are python data types that can be passed from one ```task``` +## TODO: Concretly show an example! + +In runnable, ```parameters``` are python data types that can be passed from one ```task``` to the next ```task```. These parameters can be accessed by the ```task``` either as environment variables, arguments of the ```python function``` or using the [API](../interactions.md). @@ -8,9 +10,9 @@ environment variables, arguments of the ```python function``` or using the The initial parameters of the pipeline can set by using a ```yaml``` file and presented during execution -```--parameters-file, -parameters``` while using the [magnus CLI](../usage.md/#usage) +```--parameters-file, -parameters``` while using the [runnable CLI](../usage.md/#usage) -or by using ```parameters_file``` with [the sdk](..//sdk.md/#magnus.Pipeline.execute). +or by using ```parameters_file``` with [the sdk](..//sdk.md/#runnable.Pipeline.execute). They can also be set using environment variables which override the parameters defined by the file. @@ -25,14 +27,14 @@ They can also be set using environment variables which override the parameters d === "environment variables" - Any environment variables prefixed with ```MAGNUS_PRM_ ``` are interpreted as + Any environment variables prefixed with ```runnable_PRM_ ``` are interpreted as parameters by the ```tasks```. The yaml formatted parameters can also be defined as: ```shell - export MAGNUS_PRM_spam="hello" - export MAGNUS_PRM_eggs='{"ham": "Yes, please!!"}' + export runnable_PRM_spam="hello" + export runnable_PRM_eggs='{"ham": "Yes, please!!"}' ``` Parameters defined by environment variables override parameters defined by diff --git a/docs/concepts/pipeline.md b/docs/concepts/pipeline.md index 4aadf3dd..5398eaaa 100644 --- a/docs/concepts/pipeline.md +++ b/docs/concepts/pipeline.md @@ -1,6 +1,6 @@ ???+ tip inline end "Steps" - In magnus, a step can be a simple ```task``` or ```stub``` or complex nested pipelines like + In runnable, a step can be a simple ```task``` or ```stub``` or complex nested pipelines like ```parallel``` branches, embedded ```dags``` or dynamic workflows. In this section, we use ```stub``` for convenience. For more in depth information about other types, @@ -8,7 +8,7 @@ -In **magnus**, we use the words +In **runnable**, we use the words - ```dag```, ```workflows``` and ```pipeline``` interchangeably. - ```nodes```, ```steps``` interchangeably. @@ -79,7 +79,7 @@ one more node. ???+ warning inline end "Step names" - In magnus, the names of steps should not have ```%``` or ```.``` in them. + In runnable, the names of steps should not have ```%``` or ```.``` in them. You can name them as descriptive as you want. @@ -223,7 +223,7 @@ Reaching one of these states as part of traversal indicates the status of the pi You can, alternatively, create a ```success``` and ```fail``` state and link them together. ```python - from magnus import Success, Fail + from runnable import Success, Fail success = Success(name="Custom Success") fail = Fail(name="Custom Failure") diff --git a/docs/concepts/run-log.md b/docs/concepts/run-log.md index 8f677fb9..b18b0be5 100644 --- a/docs/concepts/run-log.md +++ b/docs/concepts/run-log.md @@ -1,6 +1,6 @@ # Run Log -Internally, magnus uses a ```run log``` to keep track of the execution of the pipeline. It +Internally, runnable uses a ```run log``` to keep track of the execution of the pipeline. It also stores the parameters, experiment tracking metrics and reproducibility information captured during the execution. It should not be confused with application logs generated during the execution of a ```task``` i.e the stdout and stderr @@ -43,7 +43,7 @@ when running the ```command``` of a task. "code_identifier": "ca4c5fbff4148d3862a4738942d4607a9c4f0d88", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -87,7 +87,7 @@ when running the ```command``` of a task. "code_identifier": "ca4c5fbff4148d3862a4738942d4607a9c4f0d88", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -131,7 +131,7 @@ when running the ```command``` of a task. "code_identifier": "ca4c5fbff4148d3862a4738942d4607a9c4f0d88", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -175,7 +175,7 @@ when running the ```command``` of a task. "code_identifier": "ca4c5fbff4148d3862a4738942d4607a9c4f0d88", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -247,7 +247,7 @@ when running the ```command``` of a task. the parameters and sets them back as environment variables.\nThe step display_again displays the updated parameters from modify_initial and updates them.\n\n - You can run this pipeline as:\n magnus execute -f + You can run this pipeline as:\n runnable execute -f examples/concepts/task_shell_parameters.yaml -p examples/concepts/parameters. yaml\n", "internal_branch_name": "", @@ -350,7 +350,7 @@ A snippet from the above example: "code_identifier": "ca4c5fbff4148d3862a4738942d4607a9c4f0d88", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -509,7 +509,7 @@ reproduced in local environments and fixed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -548,7 +548,7 @@ reproduced in local environments and fixed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -594,7 +594,7 @@ reproduced in local environments and fixed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -640,7 +640,7 @@ reproduced in local environments and fixed. "expose_parameters_as_inputs": true, "secrets_from_k8s": [], "output_file": "argo-pipeline.yaml", - "name": "magnus-dag-", + "name": "runnable-dag-", "annotations": {}, "labels": {}, "activeDeadlineSeconds": 172800, @@ -661,7 +661,7 @@ reproduced in local environments and fixed. "service_account_name": "default-editor", "persistent_volumes": [ { - "name": "magnus-volume", + "name": "runnable-volume", "mount_path": "/mnt" } ], @@ -696,7 +696,7 @@ reproduced in local environments and fixed. "dag": { "start_at": "Setup", "name": "", - "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n magnus execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", + "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n runnable execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", "steps": { "Setup": { "type": "task", @@ -724,7 +724,7 @@ reproduced in local environments and fixed. }, "max_attempts": 1, "command_type": "shell", - "command": "echo \"Hello from magnus\" >> data/hello.txt\n", + "command": "echo \"Hello from runnable\" >> data/hello.txt\n", "node_name": "Create Content" }, "Retrieve Content": { @@ -794,7 +794,7 @@ reproduced in local environments and fixed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -815,7 +815,7 @@ reproduced in local environments and fixed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -836,7 +836,7 @@ reproduced in local environments and fixed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -882,7 +882,7 @@ reproduced in local environments and fixed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -934,7 +934,7 @@ reproduced in local environments and fixed. "tag": "", "run_id": "polynomial-bartik-2226", "variables": { - "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/magnus:latest" + "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/runnable:latest" }, "use_cached": true, "original_run_id": "toFail", @@ -945,7 +945,7 @@ reproduced in local environments and fixed. present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Clean up to get again: We remove the data folder. Note that this is stubbed to prevent\n accidental deletion of your contents. You can change type to task to make really run.\n4. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n - magnus execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", + runnable execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", "steps": { "Setup": { "type": "task", @@ -973,7 +973,7 @@ reproduced in local environments and fixed. }, "max_attempts": 1, "command_type": "shell", - "command": "echo \"Hello from magnus\" >> data/hello.txt\n", + "command": "echo \"Hello from runnable\" >> data/hello.txt\n", "node_name": "Create Content" }, "Retrieve Content": { @@ -1065,7 +1065,7 @@ reproduced in local environments and fixed. > "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", > "code_identifier_type": "git", > "code_identifier_dependable": true, - > "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + > "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", > "code_identifier_message": "" > } > ], @@ -1163,7 +1163,7 @@ reproduced in local environments and fixed. < "expose_parameters_as_inputs": true, < "secrets_from_k8s": [], < "output_file": "argo-pipeline.yaml", - < "name": "magnus-dag-", + < "name": "runnable-dag-", < "annotations": {}, < "labels": {}, < "activeDeadlineSeconds": 172800, @@ -1184,7 +1184,7 @@ reproduced in local environments and fixed. < "service_account_name": "default-editor", < "persistent_volumes": [ < { - < "name": "magnus-volume", + < "name": "runnable-volume", < "mount_path": "/mnt" < } < ], @@ -1215,14 +1215,14 @@ reproduced in local environments and fixed. --- > "run_id": "polynomial-bartik-2226", > "variables": { - > "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/magnus:latest" + > "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/runnable:latest" > }, > "use_cached": true, > "original_run_id": "toFail", 208c168 - < "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n magnus execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", + < "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n runnable execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", --- - > "description": "This is a simple pipeline that demonstrates passing data between steps.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Clean up to get again: We remove the data folder. Note that this is stubbed to prevent\n accidental deletion of your contents. You can change type to task to make really run.\n4. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n magnus execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", + > "description": "This is a simple pipeline that demonstrates passing data between steps.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Clean up to get again: We remove the data folder. Note that this is stubbed to prevent\n accidental deletion of your contents. You can change type to task to make really run.\n4. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n runnable execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", 253c213 < "command": "cat data/hello1.txt", --- @@ -1237,10 +1237,10 @@ reproduced in local environments and fixed. ## API Tasks can access the ```run log``` during the execution of the step -[using the API](../interactions.md/#magnus.get_run_log). The run log returned by this method is a deep copy +[using the API](../interactions.md/#runnable.get_run_log). The run log returned by this method is a deep copy to prevent any modifications. Tasks can also access the ```run_id``` of the current execution either by -[using the API](../interactions.md/#magnus.get_run_id) or by the environment -variable ```MAGNUS_RUN_ID```. +[using the API](../interactions.md/#runnable.get_run_id) or by the environment +variable ```runnable_RUN_ID```. diff --git a/docs/concepts/secrets.md b/docs/concepts/secrets.md index b6ee7fb6..8d5b01ed 100644 --- a/docs/concepts/secrets.md +++ b/docs/concepts/secrets.md @@ -3,7 +3,7 @@ !!! note "Opt out" Pipelines need not use the ```secrets``` if the preferred tools of choice is - not implemented in magnus. The default configuration of ```do-nothing``` is no-op by design. + not implemented in runnable. The default configuration of ```do-nothing``` is no-op by design. We kindly request to raise a feature request to make us aware of the eco-system. @@ -11,7 +11,7 @@ Most complex pipelines require secrets to hold sensitive information during task They could be database credentials, API keys or any information that need to present at the run-time but invisible at all other times. -Magnus provides a [clean API](../interactions.md/#magnus.get_secret) to access secrets +runnable provides a [clean API](../interactions.md/#runnable.get_secret) to access secrets and independent of the actual secret provider, the interface remains the same. A typical example would be a task requiring the database connection string to connect @@ -24,7 +24,7 @@ class CustomObject: @property def connection_object(self): - from magnus import get_secret + from runnable import get_secret connection_string = get_secret("connection_string") # Do something with the secrets ``` diff --git a/docs/concepts/stub.md b/docs/concepts/stub.md index a01d9cac..f7e83b30 100644 --- a/docs/concepts/stub.md +++ b/docs/concepts/stub.md @@ -1,4 +1,4 @@ -Stub nodes in magnus are just like +Stub nodes in runnable are just like [```Pass``` state](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html) in AWS Step Functions or ```pass``` in python code. It is a placeholder and useful when you want to debug or design your pipeline. diff --git a/docs/concepts/task.md b/docs/concepts/task.md index 3cfcae84..ced25339 100644 --- a/docs/concepts/task.md +++ b/docs/concepts/task.md @@ -1,6 +1,6 @@ Task nodes are the execution units of the pipeline. -In magnus, a ```command``` in a task node can be [python functions](#python_functions), +In runnable, a ```command``` in a task node can be [python functions](#python_functions), [Jupyter notebooks](#notebook) or a [shell scripts](#shell). All task nodes can take arguments, retrieve and create files/objects and return arguments, though their access patterns are different. @@ -57,7 +57,7 @@ should be the dotted path to the python function. === "yaml" - You can execute this by magnus execute -f examples/concepts/simple.yaml + You can execute this by runnable execute -f examples/concepts/simple.yaml ```yaml linenums="1" --8<-- "examples/concepts/simple.yaml" @@ -88,10 +88,10 @@ is to execute this function. !!! tip "Mutability" - Functions mutating the input parameters is idiomatic is python. However, functions as part of magnus + Functions mutating the input parameters is idiomatic is python. However, functions as part of runnable pipeline should return the mutated parameters for downstream steps to have access to them. - For example, unless the function ```mutating_function``` returns the updated parameters, magnus will + For example, unless the function ```mutating_function``` returns the updated parameters, runnable will not know about the change. @@ -125,7 +125,7 @@ Lets assume that the initial parameters are: === "Natively" - Internally, magnus stores the parameters in serialised json format. + Internally, runnable stores the parameters in serialised json format. ### ^^Input arguments to the function^^ @@ -139,7 +139,7 @@ Lets assume that the initial parameters are: !!! warning "Annotation" - Without annotations, magnus cannot determine the type and can cause unexpected behavior. + Without annotations, runnable cannot determine the type and can cause unexpected behavior. This is especially true in distributed executors (eg: argo workflows). @@ -177,14 +177,14 @@ Lets assume that the initial parameters are: === "Using the API" - Magnus also has [python API](../interactions.md) to access parameters. + runnable also has [python API](../interactions.md) to access parameters. - Use [get_parameter](../interactions.md/#magnus.get_parameter) to access a parameter at the root level. + Use [get_parameter](../interactions.md/#runnable.get_parameter) to access a parameter at the root level. You can optionally specify the ```type``` by using ```cast_as``` argument to the API. For example, line 19 would cast ```eggs```parameter into ```EggsModel```. Native python types do not need any explicit ```cast_as``` argument. - Use [set_parameter](../interactions.md/#magnus.set_parameter) to set parameters at the root level. + Use [set_parameter](../interactions.md/#runnable.set_parameter) to set parameters at the root level. Multiple parameters can be set at the same time, for example, line 26 would set both the ```spam``` and ```eggs``` in a single call. @@ -200,14 +200,14 @@ Lets assume that the initial parameters are: === "Using environment variables" - Any environment variable with ```MAGNUS_PRM_``` is understood to be a parameter in magnus. + Any environment variable with ```runnable_PRM_``` is understood to be a parameter in runnable. Before the execution of the ```command```, all the parameters at the root level are set as environment variables - with the key prefixed by ```MAGNUS_PRM_```. Python functions that are called during the execution of the command + with the key prefixed by ```runnable_PRM_```. Python functions that are called during the execution of the command can also access them as environment variables. After the execution of the ```command```, the environment is "scanned" again to identify changes to the existing - variables prefixed by ```MAGNUS_PRM_```. All updated variables are stored at the root level. + variables prefixed by ```runnable_PRM_```. All updated variables are stored at the root level. Parameters set by environment variables over-ride the parameters defined by the initial parameters which can be handy to quickly experiment without modifying code or to dynamically adjust behavior when running in @@ -373,7 +373,7 @@ Assume that the initial parameters are: Similar to the input parameters, outputs from the notebook ca be indicated by tagging the cell. Please ensure The tagged cell should ```print``` the dictionary as the output and nothing else. - The default ```tag``` to indicate output parameters is ```magnus_output``` but can be configured by + The default ```tag``` to indicate output parameters is ```runnable_output``` but can be configured by ```output_cell_tag``` while defining the task in both SDK and yaml. @@ -417,7 +417,7 @@ Assume that the initial parameters are: === "Using environment variables" As seen in [python tasks](#python_functions), you can get/set the parameters by using environment variables. - Any variable with prefix ```MAGNUS_PRM_``` is identified to be a parameter. + Any variable with prefix ```runnable_PRM_``` is identified to be a parameter. === "pipeline definition" @@ -484,10 +484,10 @@ Assuming the initial parameters are: - [x] Passing parameters between steps The only way ```shell``` commands can pass parameters between steps is via the ```environment``` variables. -Any environment variable with prefix ```MAGNUS_PRM_``` should be understood as a parameter inside the shell +Any environment variable with prefix ```runnable_PRM_``` should be understood as a parameter inside the shell script/command. Nested parameters are set in json string format. -To pass parameter to downstream steps, set/update environment variables with ```MAGNUS_PRM_``` prefix. The +To pass parameter to downstream steps, set/update environment variables with ```runnable_PRM_``` prefix. The execution environment is "scanned" for updated environment variables and stored for downstream steps. ```yaml linenums="1" @@ -496,7 +496,7 @@ execution environment is "scanned" for updated environment variables and stored In the above example, the execution is specified with initial parameters by the ```-p``` option. -In line 18, we just display the parameters prefixed by ```MAGNUS_PRM_``. The next step ```modify_initial``` +In line 18, we just display the parameters prefixed by ```runnable_PRM_``. The next step ```modify_initial``` updates the parameters by setting new environment variables in line 26 and 27. The next step ```display_again``` displays the updated parameters and updates them for downstream steps in @@ -505,8 +505,8 @@ lines 33-35. !!! note "Output" - You might notice that the output might have a few extra lines starting with ```MAGNUS```. You can ignore - them as they are generated by internal mechanisms of magnus. + You might notice that the output might have a few extra lines starting with ```runnable```. You can ignore + them as they are generated by internal mechanisms of runnable. diff --git a/docs/concepts/the-big-picture.md b/docs/concepts/the-big-picture.md index 60a6eb2f..cb62f5ff 100644 --- a/docs/concepts/the-big-picture.md +++ b/docs/concepts/the-big-picture.md @@ -1,4 +1,4 @@ -Magnus revolves around the concept of pipelines or workflows and tasks that happen within them. +runnable revolves around the concept of pipelines or workflows and tasks that happen within them. --- @@ -21,7 +21,7 @@ flowchart LR To define a workflow, we need: - [List of steps](pipeline.md/#steps) -- [starting step](pipeline.md/#start_at) +- a [starting step](pipeline.md/#start_at) - Next step - [In case of success](pipeline.md/#linking) @@ -63,7 +63,7 @@ A step in the workflow can be: === "parallel" - A step that has a defined number of [parallel workflows](parallel.md) executing + A step that has a definite number of [parallel workflows](parallel.md) executing simultaneously. In the below visualisation, the green lined steps happen in sequence and wait for the previous step to @@ -183,24 +183,17 @@ To be useful, it can: - Or by [upstream steps](parameters.md/#parameters_flow) - [Publish or retrieve artifacts](catalog.md) from/to other steps. -- [Publish metrics](experiment-tracking.md) that are interesting. + - Have [access to secrets](secrets.md). -All the above functionality is possible either via: - -- Non intrusive ways: Your code does not have anything specific to magnus. - - - Application native way. - - Or via environment variables. - -- Or via the [python API](../interactions.md) which involves ```importing magnus``` in your code. +All the above functionality is possible naturally with no intrusion into code base. --- All executions of the pipeline should be: - [Reproducible](run-log.md) for audit and data lineage purposes. -- Runnable at local environments for +- Runnable in local environments for [debugging failed runs](run-log.md/#retrying_failures). --- @@ -208,17 +201,5 @@ All executions of the pipeline should be: Executions of pipeline should be scalable and use the infrastructure at your disposal efficiently. -Pipelines should be portable between different infrastructure patterns. -Infrastructure patterns change all the time and -so are the demands from the infrastructure. - -We achieve this by [changing configurations](../configurations/overview.md), rather than +We achieve this by adding [one configuration file](../configurations/overview.md), rather than changing the application code. - -For example a pipeline should be able to run: - -- Local machines + local file system for data + database for logs + mlflow for experiment -tracking. -- argo executor + cloud accessible storage for data and logs + mlflow for experiment tracking - -without any change in the code. diff --git a/docs/configurations/catalog.md b/docs/configurations/catalog.md index 2d20191d..dcfe6885 100644 --- a/docs/configurations/catalog.md +++ b/docs/configurations/catalog.md @@ -91,7 +91,7 @@ catalog: "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -130,7 +130,7 @@ catalog: "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -176,7 +176,7 @@ catalog: "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -258,7 +258,7 @@ catalog: ] }, "max_attempts": 1, - "command": "echo \"Hello from magnus\" >> data/hello.txt", + "command": "echo \"Hello from runnable\" >> data/hello.txt", "command_type": "shell", "node_name": "Create Content" }, diff --git a/docs/configurations/executors/argo.md b/docs/configurations/executors/argo.md index 4898eff8..62b21b5c 100644 --- a/docs/configurations/executors/argo.md +++ b/docs/configurations/executors/argo.md @@ -1,7 +1,7 @@ [Argo workflows](https://argo-workflows.readthedocs.io/en/latest/) is a powerful container orchestration framework for Kubernetes and it can run on any Kubernetes environment. -**magnus** will transpile pipeline definition to argo specification during the pipeline execution which +**runnable** will transpile pipeline definition to argo specification during the pipeline execution which you can then upload to the cluster either manually or via CICD (recommended). - [x] Execute the pipeline in any cloud environment. @@ -9,7 +9,7 @@ you can then upload to the cluster either manually or via CICD (recommended). - [x] Ability to provide specialized compute environments for different steps of the pipeline. - [ ] Expects a mature cloud kubernetes environment and expertise. -Magnus provides *sensible* defaults to most of the configuration variables but it is highly advised +runnable provides *sensible* defaults to most of the configuration variables but it is highly advised to get inputs from infrastructure teams or ML engineers in defining the configuration. @@ -56,7 +56,7 @@ executor: | Parameter | Default | Argo Field | | :-----------: | :-------------: | :------------: | -| name | ```magnus-dag-``` | ```generateName``` | +| name | ```runnable-dag-``` | ```generateName``` | | annotations | ```{}``` | ```annotations``` of ```metadata``` | | labels | ```{}``` | ```labels``` | | pod_gc | ```OnPodCompletion``` | ```podGC``` | @@ -109,11 +109,11 @@ You can attach multiple persistent volumes to the pods as long as there are no c -The following adds the volume ```magnus-volume``` to every container of the workflow at ```/mnt``` +The following adds the volume ```runnable-volume``` to every container of the workflow at ```/mnt``` ```yaml persistent_volumes: - - name: magnus-volume + - name: runnable-volume mount_path: /mnt ``` @@ -154,7 +154,7 @@ as inputs to the workflow. This allows for changing the parameters at runtime. === "pipeline" Execute the pipeline as: - ```magnus execute -f examples/concepts/task_shell_parameters.yaml -p examples/concepts/parameters.yaml -c examples/configs/argo-config.yaml``` + ```runnable execute -f examples/concepts/task_shell_parameters.yaml -p examples/concepts/parameters.yaml -c examples/configs/argo-config.yaml``` ```yaml linenums="1" --8<-- "examples/concepts/task_shell_parameters.yaml" @@ -169,12 +169,12 @@ as inputs to the workflow. This allows for changing the parameters at runtime. apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: magnus-dag- + generateName: runnable-dag- annotations: {} labels: {} spec: activeDeadlineSeconds: 172800 - entrypoint: magnus-dag + entrypoint: runnable-dag podGC: strategy: OnPodCompletion retryStrategy: @@ -186,7 +186,7 @@ as inputs to the workflow. This allows for changing the parameters at runtime. maxDuration: '3600' serviceAccountName: default-editor templates: - - name: magnus-dag + - name: runnable-dag failFast: true dag: tasks: @@ -204,9 +204,9 @@ as inputs to the workflow. This allows for changing the parameters at runtime. depends: display-again-task-6d1ofy.Succeeded - name: access-initial-task-cybkoa container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - access%initial @@ -230,13 +230,13 @@ as inputs to the workflow. This allows for changing the parameters at runtime. memory: 1Gi cpu: 250m env: - - name: MAGNUS_PRM_spam + - name: runnable_PRM_spam value: '{{workflow.parameters.spam}}' - name: modify-initial-task-6lka8g container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - modify%initial @@ -261,9 +261,9 @@ as inputs to the workflow. This allows for changing the parameters at runtime. cpu: 250m - name: display-again-task-6d1ofy container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - display%again @@ -288,9 +288,9 @@ as inputs to the workflow. This allows for changing the parameters at runtime. cpu: 250m - name: success-success-igw6ct container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - '{{workflow.parameters.run_id}}' - success @@ -325,7 +325,7 @@ as inputs to the workflow. This allows for changing the parameters at runtime. volumes: - name: executor-0 persistentVolumeClaim: - claimName: magnus-volume + claimName: runnable-volume ``` @@ -357,7 +357,7 @@ as inputs to the workflow. This allows for changing the parameters at runtime. "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -548,8 +548,8 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur 1. Use ```argo``` executor type to execute the pipeline. 2. By default, all the tasks are executed in the docker image . Please refer to [building docker images](container-environments.md) - 3. Mount the persistent volume ```magnus-volume``` to all the containers as ```/mnt```. - 4. Store the run logs in the file-system. As all containers have access to ```magnus-volume``` + 3. Mount the persistent volume ```runnable-volume``` to all the containers as ```/mnt```. + 4. Store the run logs in the file-system. As all containers have access to ```runnable-volume``` as ```/mnt```. We use that to mounted folder as run log store. @@ -559,10 +559,10 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur multi-stage process](container-environments.md). 1. Generate the ```yaml``` definition file by: - ```MAGNUS_CONFIGURATION_FILE=examples/configs/argo-config.yaml python examples/concepts/simple.py``` - 2. Build the docker image with yaml definition in it, called magnus:latest in current example. - 3. Execute the pipeline via the magnus CLI, - ```MAGNUS_VAR_argo_docker_image=magnus:latest magnus execute -f magnus-pipeline.yaml -c examples/configs/argo-config.yaml``` + ```runnable_CONFIGURATION_FILE=examples/configs/argo-config.yaml python examples/concepts/simple.py``` + 2. Build the docker image with yaml definition in it, called runnable:latest in current example. + 3. Execute the pipeline via the runnable CLI, + ```runnable_VAR_argo_docker_image=runnable:latest runnable execute -f runnable-pipeline.yaml -c examples/configs/argo-config.yaml``` ```python linenums="1" hl_lines="24" @@ -570,16 +570,16 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur ``` 1. You can provide a configuration file dynamically by using the environment - variable ```MAGNUS_CONFIGURATION_FILE```. Please see [SDK for more details](../../sdk.md). + variable ```runnable_CONFIGURATION_FILE```. Please see [SDK for more details](../../sdk.md). === "yaml" For yaml based definitions, the execution order is to: - 1. Build the docker image with the yaml definition in it, called magnus:latest in current example. - 2. Execute the pipeline via the magnus CLI: - ```MAGNUS_VAR_argo_docker_image=magnus:latest magnus execute -f examples/concepts/simple.yaml -c examples/configs/argo-config.yaml``` + 1. Build the docker image with the yaml definition in it, called runnable:latest in current example. + 2. Execute the pipeline via the runnable CLI: + ```runnable_VAR_argo_docker_image=runnable:latest runnable execute -f examples/concepts/simple.yaml -c examples/configs/argo-config.yaml``` ```yaml linenums="1" --8<-- "examples/concepts/simple.yaml" @@ -630,7 +630,7 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -661,7 +661,7 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": false, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -691,7 +691,7 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur "image": "$argo_docker_image", "expose_parameters_as_inputs": true, "output_file": "argo-pipeline.yaml", - "name": "magnus-dag-", + "name": "runnable-dag-", "annotations": {}, "labels": {}, "namespace": null, @@ -715,7 +715,7 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur "secrets_from_k8s": [], "persistent_volumes": [ { - "name": "magnus-volume", + "name": "runnable-volume", "mount_path": "/mnt" } ], @@ -782,7 +782,7 @@ code versioning tools. We recommend using ```secrets_from_k8s``` in the configur ## Nesting -Magnus compiled argo workflows support deeply nested workflows. +runnable compiled argo workflows support deeply nested workflows. ### Example @@ -809,8 +809,8 @@ Magnus compiled argo workflows support deeply nested workflows. 1. Use ```argo``` executor type to execute the pipeline. 2. By default, all the tasks are executed in the docker image . Please refer to [building docker images](container-environments.md) - 3. Mount the persistent volume ```magnus-volume``` to all the containers as ```/mnt```. - 4. Store the run logs in the file-system. As all containers have access to ```magnus-volume``` + 3. Mount the persistent volume ```runnable-volume``` to all the containers as ```/mnt```. + 4. Store the run logs in the file-system. As all containers have access to ```runnable-volume``` as ```/mnt```. We use that to mounted folder as run log store. @@ -820,12 +820,12 @@ Magnus compiled argo workflows support deeply nested workflows. apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: magnus-dag- + generateName: runnable-dag- annotations: {} labels: {} spec: activeDeadlineSeconds: 172800 - entrypoint: magnus-dag + entrypoint: runnable-dag retryStrategy: limit: "0" retryPolicy: Always @@ -1072,7 +1072,7 @@ Magnus compiled argo workflows support deeply nested workflows. - name: outer-most-map-map-0ukhr5-fan-in template: outer-most-map-map-0ukhr5-fan-in depends: outer-most-map-map-0ukhr5-map.Succeeded || outer-most-map-map-0ukhr5-map.Failed - - name: magnus-dag + - name: runnable-dag failFast: true dag: tasks: @@ -1087,9 +1087,9 @@ Magnus compiled argo workflows support deeply nested workflows. depends: outer-most-map-map-0ukhr5.Succeeded - name: generate-list-task-s7za4e container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - generate_list @@ -1112,9 +1112,9 @@ Magnus compiled argo workflows support deeply nested workflows. cpu: 250m - name: outer-most-map-map-0ukhr5-fan-out container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map @@ -1144,9 +1144,9 @@ Magnus compiled argo workflows support deeply nested workflows. path: /tmp/output.txt - name: outer-most-map-map-0ukhr5-fan-in container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map @@ -1171,9 +1171,9 @@ Magnus compiled argo workflows support deeply nested workflows. cpu: 250m - name: nested-parallel-parallel-wje1o4-fan-out container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel @@ -1203,9 +1203,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: nested-parallel-parallel-wje1o4-fan-in container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel @@ -1235,9 +1235,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: inner-most-map-map-yeslqe-fan-out container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.a.inner%most%map @@ -1272,9 +1272,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: inner-most-map-map-yeslqe-fan-in container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.a.inner%most%map @@ -1304,9 +1304,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: executable-stub-blnf25 container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.a.inner%most%map.map_variable_placeholder.executable @@ -1335,9 +1335,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: yarg - name: success-success-trvgst container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.a.inner%most%map.map_variable_placeholder.success @@ -1366,9 +1366,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: yarg - name: success-success-y1yr7v container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.a.success @@ -1396,9 +1396,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: inner-most-map-map-b206p5-fan-out container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.b.inner%most%map @@ -1433,9 +1433,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: inner-most-map-map-b206p5-fan-in container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - fan - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.b.inner%most%map @@ -1465,9 +1465,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: executable-stub-8ui1yv container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.b.inner%most%map.map_variable_placeholder.executable @@ -1496,9 +1496,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: yarg - name: success-success-h4j0k9 container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.b.inner%most%map.map_variable_placeholder.success @@ -1527,9 +1527,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: yarg - name: success-success-dvma7h container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.nested%parallel.b.success @@ -1557,9 +1557,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: success-success-e4lb2k container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - outer%most%map.map_variable_placeholder.success @@ -1587,9 +1587,9 @@ Magnus compiled argo workflows support deeply nested workflows. - name: xarg - name: success-success-2v62uq container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - success @@ -1620,7 +1620,7 @@ Magnus compiled argo workflows support deeply nested workflows. volumes: - name: executor-0 persistentVolumeClaim: - claimName: magnus-volume + claimName: runnable-volume ``` diff --git a/docs/configurations/executors/container-environments.md b/docs/configurations/executors/container-environments.md index 2bb965fe..bf27f530 100644 --- a/docs/configurations/executors/container-environments.md +++ b/docs/configurations/executors/container-environments.md @@ -20,7 +20,7 @@ Follow the below steps to execute the pipeline defined by SDK. tagging the docker image with the short git sha to uniquely identify the docker image (1). 3. Define a [variable to temporarily hold](https://docs.python.org/3/library/string.html#template-strings) the docker image name in the pipeline definition, if the docker image name is not known. -4. Execute the pipeline using the [magnus CLI](../../usage.md/#usage). +4. Execute the pipeline using the [runnable CLI](../../usage.md/#usage). @@ -40,29 +40,29 @@ All containerized executors have a circular dependency problem. Not providing the required environment variable will raise an exception. -To resolve this, magnus supports ```variables``` in the configuration of executors, both global and in step +To resolve this, runnable supports ```variables``` in the configuration of executors, both global and in step overrides. Variables should follow the [python template strings](https://docs.python.org/3/library/string.html#template-strings) -syntax and are replaced with environment variable prefixed by ```MAGNUS_VAR_```. +syntax and are replaced with environment variable prefixed by ```runnable_VAR_```. -Concretely, ```$identifier``` is replaced by ```MAGNUS_VAR_```. +Concretely, ```$identifier``` is replaced by ```runnable_VAR_```. ## Dockerfile -magnus should be installed in the docker image and available in the path. An example dockerfile is provided +runnable should be installed in the docker image and available in the path. An example dockerfile is provided below. !!! note inline end "non-native orchestration" - Having magnus to be part of the docker image adds additional dependencies for python to be present in the docker - image. In that sense, magnus is technically non-native container orchestration tool. + Having runnable to be part of the docker image adds additional dependencies for python to be present in the docker + image. In that sense, runnable is technically non-native container orchestration tool. - Facilitating native container orchestration, without magnus as part of the docker image, results in a complicated + Facilitating native container orchestration, without runnable as part of the docker image, results in a complicated specification of files/parameters/experiment tracking losing the value of native interfaces to these essential orchestration concepts. - With the improvements in python packaging ecosystem, it should be possible to distribute magnus as a + With the improvements in python packaging ecosystem, it should be possible to distribute runnable as a self-contained binary and reducing the dependency on the docker image. #### TODO: Change this to a proper example. diff --git a/docs/configurations/executors/local-container.md b/docs/configurations/executors/local-container.md index 94819c3d..c9fde8bc 100644 --- a/docs/configurations/executors/local-container.md +++ b/docs/configurations/executors/local-container.md @@ -74,7 +74,7 @@ the patterns. 2. By default, all the tasks are executed in the docker image . Please refer to [building docker images](container-environments.md/#dynamic_name_of_the_image) 3. Pass any environment variables that are needed for the container. - 4. Store the run logs in the file-system. Magnus will handle the access to them + 4. Store the run logs in the file-system. runnable will handle the access to them by mounting the file system into the container. @@ -84,10 +84,10 @@ the patterns. multi-stage process](container-environments.md). 1. Generate the ```yaml``` definition file by: - ```MAGNUS_CONFIGURATION_FILE=examples/configs/local-container.yaml python examples/concepts/simple.py``` - 2. Build the docker image with yaml definition in it, called magnus:demo in current example. - 3. Execute the pipeline via the magnus CLI, - ```MAGNUS_VAR_default_docker_image=magnus:demo magnus execute -f magnus-pipeline.yaml -c examples/configs/local-container.yaml``` + ```runnable_CONFIGURATION_FILE=examples/configs/local-container.yaml python examples/concepts/simple.py``` + 2. Build the docker image with yaml definition in it, called runnable:demo in current example. + 3. Execute the pipeline via the runnable CLI, + ```runnable_VAR_default_docker_image=runnable:demo runnable execute -f runnable-pipeline.yaml -c examples/configs/local-container.yaml``` ```python linenums="1" hl_lines="24" @@ -95,7 +95,7 @@ the patterns. ``` 1. You can provide a configuration file dynamically by using the environment - variable ```MAGNUS_CONFIGURATION_FILE```. Please see [SDK for more details](../../sdk.md). + variable ```runnable_CONFIGURATION_FILE```. Please see [SDK for more details](../../sdk.md). @@ -103,9 +103,9 @@ the patterns. For yaml based definitions, the execution order is to: - 1. Build the docker image with the yaml definition in it, called magnus:demo in current example. - 2. Execute the pipeline via the magnus CLI: - ```MAGNUS_VAR_default_docker_image=magnus:demo magnus execute -f examples/concepts/simple.yaml -c examples/configs/local-container.yaml``` + 1. Build the docker image with the yaml definition in it, called runnable:demo in current example. + 2. Execute the pipeline via the runnable CLI: + ```runnable_VAR_default_docker_image=runnable:demo runnable execute -f examples/concepts/simple.yaml -c examples/configs/local-container.yaml``` ```yaml linenums="1" --8<-- "examples/concepts/simple.yaml" @@ -138,7 +138,7 @@ the patterns. "code_identifier": "ef142998dc315ddbd9aa10e016128c872de6e6e1", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" }, { @@ -184,7 +184,7 @@ the patterns. "code_identifier": "ef142998dc315ddbd9aa10e016128c872de6e6e1", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -234,7 +234,7 @@ the patterns. "tag": "", "run_id": "shortest-stallman-2113", "variables": { - "default_docker_image": "magnus:demo" + "default_docker_image": "runnable:demo" }, "use_cached": false, "original_run_id": "", @@ -305,17 +305,17 @@ executor. multi-stage process](container-environments.md). 1. Generate the ```yaml``` definition file by: - ```MAGNUS_CONFIGURATION_FILE=examples/executors/local-container-override.yaml python examples/executors/step_overrides_container.py``` + ```runnable_CONFIGURATION_FILE=examples/executors/local-container-override.yaml python examples/executors/step_overrides_container.py``` 2. Build the docker image with yaml definition in it. In this example, we build two docker images. - * magnus:3.8 as the default_docker_image. - * magnus:3.9 as the custom_docker_image. + * runnable:3.8 as the default_docker_image. + * runnable:3.9 as the custom_docker_image. Both the docker images are same except for the python version. - 3. Execute the pipeline via the magnus CLI, - ```MAGNUS_VAR_default_docker_image=magnus:3.8 MAGNUS_VAR_custom_docker_image=magnus:3.9 magnus execute -f magnus-pipeline.yaml -c examples/executors/local-container-override.yaml``` + 3. Execute the pipeline via the runnable CLI, + ```runnable_VAR_default_docker_image=runnable:3.8 runnable_VAR_custom_docker_image=runnable:3.9 runnable execute -f runnable-pipeline.yaml -c examples/executors/local-container-override.yaml``` You should see the console output of the ```step 1``` to be ```3.8``` and key to be "value" @@ -334,15 +334,15 @@ executor. two docker images. - * magnus:3.8 as the default_docker_image. - * magnus:3.9 as the custom_docker_image. + * runnable:3.8 as the default_docker_image. + * runnable:3.9 as the custom_docker_image. Both the docker images are same except for the python version. - 2. Execute the pipeline via the magnus CLI: - ```MAGNUS_VAR_default_docker_image=magnus:3.8 MAGNUS_VAR_custom_docker_image=magnus:3.9 magnus execute -f examples/executors/step_overrides_container.yaml -c examples/executors/local-container-override.yaml``` + 2. Execute the pipeline via the runnable CLI: + ```runnable_VAR_default_docker_image=runnable:3.8 runnable_VAR_custom_docker_image=runnable:3.9 runnable execute -f examples/executors/step_overrides_container.yaml -c examples/executors/local-container-override.yaml``` You should see the console output of the ```step 1``` to be ```3.8``` and key to be "value" while the python version for ```step 2``` to be 3.9 and key to be "not-value". diff --git a/docs/configurations/executors/mocked.md b/docs/configurations/executors/mocked.md index 79070547..04ccf38b 100644 --- a/docs/configurations/executors/mocked.md +++ b/docs/configurations/executors/mocked.md @@ -36,7 +36,7 @@ The following example shows the simple case of mocking all the steps of the pipe === "pipeline in yaml" You can execute the mocked pipeline by: - ```magnus execute -f examples/concepts/simple.yaml -c examples/configs/mocked-config-simple.yaml``` + ```runnable execute -f examples/concepts/simple.yaml -c examples/configs/mocked-config-simple.yaml``` ```yaml linenums="1" --8<-- "examples/concepts/simple.yaml" @@ -46,7 +46,7 @@ The following example shows the simple case of mocking all the steps of the pipe You can execute the mocked pipeline by: - ```MAGNUS_CONFIGURATION_FILE=examples/configs/mocked-config-simple.yaml python examples/concepts/simple.py``` + ```runnable_CONFIGURATION_FILE=examples/configs/mocked-config-simple.yaml python examples/concepts/simple.py``` ```python linenums="1" --8<-- "examples/concepts/simple.py" @@ -84,7 +84,7 @@ The following example shows the simple case of mocking all the steps of the pipe "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -115,7 +115,7 @@ The following example shows the simple case of mocking all the steps of the pipe "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -218,7 +218,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is The run log is a simple json file that can be parsed and validated against designed behaviors. You can also create the ```RunLog``` object by deserializing - ```magnus.datastore.RunLog``` from the json. + ```runnable.datastore.RunLog``` from the json. This can be handy when validating complex pipelines. @@ -262,7 +262,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -301,7 +301,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -332,7 +332,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -444,9 +444,9 @@ and ```step 1 >> step3``` in case of failure. The traversal is Running the pipeline with mocked configuration: - for yaml: ```magnus execute -f examples/on-failure.yaml -c examples/configs/mocked-config-unittest.yaml``` + for yaml: ```runnable execute -f examples/on-failure.yaml -c examples/configs/mocked-config-unittest.yaml``` - for python: ```MAGNUS_CONFIGURATION_FILE=examples/configs/mocked-config-unittest.yaml python examples/on_failure.py``` + for python: ```runnable_CONFIGURATION_FILE=examples/configs/mocked-config-unittest.yaml python examples/on_failure.py``` ```yaml linenums="1" --8<-- "examples/configs/mocked-config-unittest.yaml" @@ -479,7 +479,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -518,7 +518,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -549,7 +549,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -580,7 +580,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -644,7 +644,7 @@ and ```step 1 >> step3``` in case of failure. The traversal is "name": "", "description": "This is a simple pipeline to demonstrate failure in a step.\n\nThe default behavior is to traverse to step type fail and mark the run as failed.\nBut you can control it by providing on_failure.\n\nIn this example: step 1 fails and moves to step 3 skipping step 2. The pipeline status\nis considered to be - success.\n\nstep 1 (FAIL) >> step 3 >> success\n\nYou can run this pipeline by magnus execute -f examples/on-failure.yaml\n", + success.\n\nstep 1 (FAIL) >> step 3 >> success\n\nYou can run this pipeline by runnable execute -f examples/on-failure.yaml\n", "steps": { "step 1": { "type": "task", @@ -742,7 +742,7 @@ failure which are captured by the ```run log``` and ```catalog```. "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -781,7 +781,7 @@ failure which are captured by the ```run log``` and ```catalog```. "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -827,7 +827,7 @@ failure which are captured by the ```run log``` and ```catalog```. "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -873,7 +873,7 @@ failure which are captured by the ```run log``` and ```catalog```. "code_identifier": "d76cf865af2f8e03b6c1205403351cbe42e6cdc4", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -930,7 +930,7 @@ failure which are captured by the ```run log``` and ```catalog```. "dag": { "start_at": "Setup", "name": "", - "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n magnus execute -f examples/retry-fail.yaml -c examples/configs/fs-catalog-run_log.yaml \\\n --run-id wrong-file-name\n", + "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n runnable execute -f examples/retry-fail.yaml -c examples/configs/fs-catalog-run_log.yaml \\\n --run-id wrong-file-name\n", "steps": { "Setup": { "type": "task", @@ -958,7 +958,7 @@ failure which are captured by the ```run log``` and ```catalog```. }, "max_attempts": 1, "command_type": "shell", - "command": "echo \"Hello from magnus\" >> data/hello.txt\n", + "command": "echo \"Hello from runnable\" >> data/hello.txt\n", "node_name": "Create Content" }, "Retrieve Content": { @@ -1011,5 +1011,5 @@ failure which are captured by the ```run log``` and ```catalog```. and retry with the fix: - ```magnus execute -f examples/retry-fail.yaml -c examples/configs/mocked-config-debug.yaml + ```runnable execute -f examples/retry-fail.yaml -c examples/configs/mocked-config-debug.yaml --run-id debug-pipeline``` diff --git a/docs/configurations/overview.md b/docs/configurations/overview.md index 29f1acdc..6ff83205 100644 --- a/docs/configurations/overview.md +++ b/docs/configurations/overview.md @@ -1,4 +1,4 @@ -**Magnus** is designed to make effective collaborations between data scientists/researchers +**runnable** is designed to make effective collaborations between data scientists/researchers and infrastructure engineers. All the features described in the [concepts](../concepts/the-big-picture.md) are @@ -7,10 +7,10 @@ aimed at the *research* side of data science projects while configurations add * Configurations are presented during the execution: -For ```yaml``` based pipeline, use the ```--config-file, -c``` option in the [magnus CLI](../usage.md/#usage). +For ```yaml``` based pipeline, use the ```--config-file, -c``` option in the [runnable CLI](../usage.md/#usage). -For [python SDK](../sdk.md/#magnus.Pipeline.execute), use the ```configuration_file``` option or via -environment variable ```MAGNUS_CONFIGURATION_FILE``` +For [python SDK](../sdk.md/#runnable.Pipeline.execute), use the ```configuration_file``` option or via +environment variable ```runnable_CONFIGURATION_FILE``` ## Default configuration diff --git a/docs/configurations/run-log.md b/docs/configurations/run-log.md index 118ab11c..211cfb21 100644 --- a/docs/configurations/run-log.md +++ b/docs/configurations/run-log.md @@ -62,11 +62,11 @@ run_log_store: === "sdk pipeline" The configuration can be provided dynamically by setting the environment variable - ```MAGNUS_CONFIGURATION_FILE```. + ```runnable_CONFIGURATION_FILE```. Executing the pipeline with: - ```MAGNUS_CONFIGURATION_FILE=examples/configs/fs-run_log.yaml python examples/concepts/simple.py``` + ```runnable_CONFIGURATION_FILE=examples/configs/fs-run_log.yaml python examples/concepts/simple.py``` ```python linenums="1" --8<-- "examples/concepts/simple.py" @@ -97,7 +97,7 @@ run_log_store: "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -136,7 +136,7 @@ run_log_store: "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -264,11 +264,11 @@ run_log_store: === "sdk pipeline" The configuration can be provided dynamically by setting the environment variable - ```MAGNUS_CONFIGURATION_FILE```. + ```runnable_CONFIGURATION_FILE```. Executing the pipeline with: - ```MAGNUS_CONFIGURATION_FILE=examples/configs/chunked-fs-run_log.yaml python examples/concepts/simple.py``` + ```runnable_CONFIGURATION_FILE=examples/configs/chunked-fs-run_log.yaml python examples/concepts/simple.py``` ```python linenums="1" --8<-- "examples/concepts/simple.py" @@ -363,7 +363,7 @@ run_log_store: Contains only the information of the single step ```simple```. The name of the file follows the pattern: - ```StepLog--.json```. The timestamp allows magnus to infer + ```StepLog--.json```. The timestamp allows runnable to infer the order of execution of the steps. ```json linenums="1" @@ -379,7 +379,7 @@ run_log_store: "code_identifier": "39cd98770cb2fd6994d8ac08ae4c5506e5ce694a", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], diff --git a/docs/configurations/secrets.md b/docs/configurations/secrets.md index edba9a11..ad99d171 100644 --- a/docs/configurations/secrets.md +++ b/docs/configurations/secrets.md @@ -1,5 +1,5 @@ -**Magnus** provides an interface to secrets managers -[via the API](../interactions.md/#magnus.get_secret). +**runnable** provides an interface to secrets managers +[via the API](../interactions.md/#runnable.get_secret). Please refer to [Secrets in concepts](../concepts/secrets.md) for more information. @@ -49,7 +49,7 @@ The actual key while calling the secrets manager via the API, ```get_secret(secr Below is a simple pipeline to demonstrate the use of secrets. The configuration file to use can be dynamically specified via the environment variable - ```MAGNUS_CONFIGURATION_FILE```. + ```runnable_CONFIGURATION_FILE```. The example can be found in ```examples/secrets_env.py``` @@ -60,7 +60,7 @@ The actual key while calling the secrets manager via the API, ```get_secret(secr === "Default Configuration" We can execute the pipeline using this configuration by: - ```secret="secret_value" MAGNUS_CONFIGURATION_FILE=examples/configs/secrets-env-default.yaml python examples/secrets_env.py``` + ```secret="secret_value" runnable_CONFIGURATION_FILE=examples/configs/secrets-env-default.yaml python examples/secrets_env.py``` The configuration file is located at ```examples/configs/secrets-env-default.yaml``` @@ -71,7 +71,7 @@ The actual key while calling the secrets manager via the API, ```get_secret(secr === "Prefixed and Suffixed Configuration" We can execute the pipeline using this configuration by: - ```magnus_secret="secret_value" MAGNUS_CONFIGURATION_FILE=examples/configs/secrets-env-ps.yaml python examples/secrets_env.py``` + ```runnable_secret="secret_value" runnable_CONFIGURATION_FILE=examples/configs/secrets-env-ps.yaml python examples/secrets_env.py``` The configuration file is located at ```examples/configs/secrets-env-ps.yaml``` @@ -83,7 +83,7 @@ The actual key while calling the secrets manager via the API, ```get_secret(secr ## dotenv -```.env``` files are routinely used to provide configuration parameters and secrets during development phase. Magnus can dotenv files as a secret store and can surface them to tasks. +```.env``` files are routinely used to provide configuration parameters and secrets during development phase. runnable can dotenv files as a secret store and can surface them to tasks. ### Configuration diff --git a/docs/example/dataflow.md b/docs/example/dataflow.md index 37a9177f..9a24aadf 100644 --- a/docs/example/dataflow.md +++ b/docs/example/dataflow.md @@ -1,8 +1,8 @@ -In **magnus**, we distinguish between 2 types of data that steps can communicate with each other. +In **runnable**, we distinguish between 2 types of data that steps can communicate with each other. [`Parameters`](#flow_of_parameters) -: Parameters can be thought of input and output arguments of functions. Magnus supports +: Parameters can be thought of input and output arguments of functions. runnable supports pydantic models both as input and return types of functions. [`Files`](#flow_of_files) @@ -16,9 +16,9 @@ using catalog. This can be controlled either by the configuration or by python A The [initial parameters](../concepts/parameters.md) of the pipeline can set by using a ```yaml``` file and presented during execution -```--parameters-file, -parameters``` while using the [magnus CLI](../usage.md/#usage) +```--parameters-file, -parameters``` while using the [runnable CLI](../usage.md/#usage) -or by using ```parameters_file``` with [the sdk](../sdk.md/#magnus.Pipeline.execute). +or by using ```parameters_file``` with [the sdk](../sdk.md/#runnable.Pipeline.execute). === "Initial Parameters" @@ -60,10 +60,10 @@ or by using ```parameters_file``` with [the sdk](../sdk.md/#magnus.Pipeline.exec === "Application native way" - !!! info annotate inline end "No ```import magnus``` !!!" + !!! info annotate inline end "No ```import runnable``` !!!" - A lot of design emphasis is to avoid "import magnus" and keep the function signature native to the application. - Magnus also has API's get_parameter and set_parameter if they are handy. + A lot of design emphasis is to avoid "import runnable" and keep the function signature native to the application. + runnable also has API's get_parameter and set_parameter if they are handy. @@ -102,7 +102,7 @@ or by using ```parameters_file``` with [the sdk](../sdk.md/#magnus.Pipeline.exec Tasks of type shell use this mechanism to access parameters. - There are richer ways to pass parameters in magnus if you are using only + There are richer ways to pass parameters in runnable if you are using only python in your application. This mechanism helps when you have non-python code as part of your application. @@ -111,8 +111,8 @@ or by using ```parameters_file``` with [the sdk](../sdk.md/#magnus.Pipeline.exec --8<-- "examples/parameters_env.yaml" ``` - 1. Show all the parameters prefixed by MAGNUS_PRM_ - 2. Set new values of the parameters as environment variables prefixed by MAGNUS_PRM_ + 1. Show all the parameters prefixed by runnable_PRM_ + 2. Set new values of the parameters as environment variables prefixed by runnable_PRM_ 3. Consume the parameters like you would using python. @@ -120,7 +120,7 @@ or by using ```parameters_file``` with [the sdk](../sdk.md/#magnus.Pipeline.exec ## Flow of Files -**Magnus** stores all the artifacts/files/logs generated by ```task``` nodes in a central storage called +**runnable** stores all the artifacts/files/logs generated by ```task``` nodes in a central storage called [catalog](../concepts/catalog.md). The catalog is indexed by the ```run_id``` of the pipeline and is unique for every execution of the pipeline. @@ -162,7 +162,7 @@ You can access content as if you are accessing files relative to the project roo ``` 1. Use local file system as a central catalog, defaults to ```.catalog``` - 2. By default, magnus uses ```data``` folder as the directory containing the user data. + 2. By default, runnable uses ```data``` folder as the directory containing the user data. === "pipeline in yaml" @@ -175,7 +175,7 @@ You can access content as if you are accessing files relative to the project roo ```python def create_content(): with open("data/hello.txt") as f: - f.write("hello from magnus") + f.write("hello from runnable") def retrieve_content(): with open("data/hello.txt") as f: @@ -190,7 +190,7 @@ You can access content as if you are accessing files relative to the project roo 1. Make a ```data``` folder if it does not already exist. 2. As the ```compute_data_folder``` is defined to ```.```, all paths should be relative to ```.```. Put the file ```hello.txt``` in ```data``` folder into the catalog. 3. We have intentionally made this ```stub``` node to prevent accidentally deleting your content. Please make it a ```task``` to actually delete the ```data``` folder. - 4. Should print "Hello from magnus" as the content of the ```hello.txt```. + 4. Should print "Hello from runnable" as the content of the ```hello.txt```. 5. Override the default ```.``` as ```compute_data_folder``` to ```data```. All interactions should then be relative to ```data``` folder. 6. Same as above, make it a ```task``` to actually delete the ```data``` folder @@ -205,7 +205,7 @@ You can access content as if you are accessing files relative to the project roo ```python def create_content(): with open("data/hello.txt") as f: - f.write("hello from magnus") + f.write("hello from runnable") def retrieve_content(): with open("data/hello.txt") as f: diff --git a/docs/example/example.md b/docs/example/example.md index d96bde7c..585b4b4a 100644 --- a/docs/example/example.md +++ b/docs/example/example.md @@ -1,7 +1,7 @@ -Magnus revolves around the concept of [pipelines or workflows](../concepts/pipeline.md). -Pipelines defined in magnus are translated into +runnable revolves around the concept of [pipelines or workflows](../concepts/pipeline.md). +Pipelines defined in runnable are translated into other workflow engine definitions like [Argo workflows](https://argoproj.github.io/workflows/) or [AWS step functions](https://aws.amazon.com/step-functions/). @@ -30,7 +30,7 @@ flowchart TD ``` -This pipeline can be represented in **magnus** as below: +This pipeline can be represented in **runnable** as below: === "yaml" @@ -83,7 +83,7 @@ This pipeline can be represented in **magnus** as below: "code_identifier": "399b0d42f4f28aaeeb2e062bb0b938d50ff1595c", // (4) "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -114,7 +114,7 @@ This pipeline can be represented in **magnus** as below: "code_identifier": "399b0d42f4f28aaeeb2e062bb0b938d50ff1595c", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -145,7 +145,7 @@ This pipeline can be represented in **magnus** as below: "code_identifier": "399b0d42f4f28aaeeb2e062bb0b938d50ff1595c", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -176,7 +176,7 @@ This pipeline can be represented in **magnus** as below: "code_identifier": "399b0d42f4f28aaeeb2e062bb0b938d50ff1595c", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -207,7 +207,7 @@ This pipeline can be represented in **magnus** as below: "code_identifier": "399b0d42f4f28aaeeb2e062bb0b938d50ff1595c", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -365,14 +365,14 @@ To run the pipeline in different environments, we just provide the yaml: - ```magnus execute -f examples/contrived.yaml -c examples/configs/argo-config.yaml``` + ```runnable execute -f examples/contrived.yaml -c examples/configs/argo-config.yaml``` python: Please refer to [containerised environments](../configurations/executors/container-environments.md) for more information. - MAGNUS_CONFIGURATION_FILE=examples/configs/argo-config.yaml python examples/contrived.py && magnus execute -f magnus-pipeline.yaml -c examples/configs/argo-config.yaml + runnable_CONFIGURATION_FILE=examples/configs/argo-config.yaml python examples/contrived.py && runnable execute -f runnable-pipeline.yaml -c examples/configs/argo-config.yaml ``` yaml linenums="1" title="Argo Configuration" --8<-- "examples/configs/argo-config.yaml" @@ -381,7 +381,7 @@ To run the pipeline in different environments, we just provide the 1. Use argo workflows as the execution engine to run the pipeline. 2. Run this docker image for every step of the pipeline. Please refer to [containerised environments](../configurations/executors/container-environments.md) for more details. - 3. Mount the volume from Kubernetes persistent volumes (magnus-volume) to /mnt directory. + 3. Mount the volume from Kubernetes persistent volumes (runnable-volume) to /mnt directory. 4. Resource constraints for the container runtime. 5. Since every step runs in a container, the run log should be persisted. Here we are using the file-system as our run log store. diff --git a/docs/example/experiment-tracking.md b/docs/example/experiment-tracking.md index 03109a5c..70ae1be9 100644 --- a/docs/example/experiment-tracking.md +++ b/docs/example/experiment-tracking.md @@ -1,7 +1,7 @@ Metrics in data science projects summarize important information about the execution and performance of the experiment. -Magnus captures [this information as part of the run log](../concepts/experiment-tracking.md) and also provides +runnable captures [this information as part of the run log](../concepts/experiment-tracking.md) and also provides an [interface to experiment tracking tools](../concepts/experiment-tracking.md/#experiment_tracking_tools) like [mlflow](https://mlflow.org/docs/latest/tracking.html) or [Weights and Biases](https://wandb.ai/site/experiment-tracking). @@ -58,7 +58,7 @@ like [mlflow](https://mlflow.org/docs/latest/tracking.html) or "code_identifier": "0b62e4c661a4b4a2187afdf44a7c64520374202d", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -104,7 +104,7 @@ like [mlflow](https://mlflow.org/docs/latest/tracking.html) or "code_identifier": "0b62e4c661a4b4a2187afdf44a7c64520374202d", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -198,5 +198,5 @@ like [mlflow](https://mlflow.org/docs/latest/tracking.html) or
![Image](../assets/screenshots/mlflow_example.png){ width="800" height="600"} -
mlflow UI for the execution. The run_id remains the same as the run_id of magnus
+
mlflow UI for the execution. The run_id remains the same as the run_id of runnable
diff --git a/docs/example/reproducibility.md b/docs/example/reproducibility.md index 8b31e6fa..39bdfd31 100644 --- a/docs/example/reproducibility.md +++ b/docs/example/reproducibility.md @@ -1,4 +1,4 @@ -Magnus stores a variety of information about the current execution in [run log](../concepts/run-log.md). +runnable stores a variety of information about the current execution in [run log](../concepts/run-log.md). The run log is internally used for keeping track of the execution (status of different steps, parameters, etc) but also has rich information for reproducing the state at the time of pipeline execution. @@ -85,7 +85,7 @@ Below we show an example pipeline and the different layers of the run log. "code_identifier": "ff60e7fa379c38adaa03755977057cd10acc4baa", // (3) "code_identifier_type": "git", "code_identifier_dependable": true, // (4) - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], diff --git a/docs/example/retry-after-failure.md b/docs/example/retry-after-failure.md index 1d3c5441..7f83bff3 100644 --- a/docs/example/retry-after-failure.md +++ b/docs/example/retry-after-failure.md @@ -1,4 +1,4 @@ -Magnus allows you to [debug and recover](../concepts/run-log.md/#retrying_failures) from a +runnable allows you to [debug and recover](../concepts/run-log.md/#retrying_failures) from a failure during the execution of pipeline. The pipeline can be restarted in any suitable environment for debugging. @@ -23,7 +23,7 @@ Below is an example of retrying a pipeline that failed. You can run this pipeline on your local machine by - ```magnus execute -f examples/retry-fail.yaml -c examples/configs/fs-catalog-run_log.yaml --run-id wrong-file-name``` + ```runnable execute -f examples/retry-fail.yaml -c examples/configs/fs-catalog-run_log.yaml --run-id wrong-file-name``` Note that we have specified the ```run_id``` to be something we can use later. The execution logs of the steps in the catalog will show the reason of the failure. @@ -64,7 +64,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -103,7 +103,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -149,7 +149,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -195,7 +195,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -247,14 +247,14 @@ Below is an example of retrying a pipeline that failed. "tag": "", "run_id": "wrong-file-name", "variables": { - "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/magnus:latest" + "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/runnable:latest" }, "use_cached": false, "original_run_id": "", "dag": { "start_at": "Setup", "name": "", - "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n magnus execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", + "description": "This is a simple pipeline that demonstrates retrying failures.\n\n1. Setup: We setup a data folder, we ignore if it is already present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n runnable execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", "steps": { "Setup": { "type": "task", @@ -282,7 +282,7 @@ Below is an example of retrying a pipeline that failed. }, "max_attempts": 1, "command_type": "shell", - "command": "echo \"Hello from magnus\" >> data/hello.txt\n", + "command": "echo \"Hello from runnable\" >> data/hello.txt\n", "node_name": "Create Content" }, "Retrieve Content": { @@ -325,7 +325,7 @@ Below is an example of retrying a pipeline that failed. You can run this pipeline on your local machine by - ```magnus execute -f examples/retry-fixed.yaml -c examples/configs/fs-catalog-run_log.yaml --use-cached wrong-file-name``` + ```runnable execute -f examples/retry-fixed.yaml -c examples/configs/fs-catalog-run_log.yaml --use-cached wrong-file-name``` Note that we have specified the run_id of the failed execution to be ```use-cached``` for the new execution. @@ -371,7 +371,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -392,7 +392,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -413,7 +413,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -459,7 +459,7 @@ Below is an example of retrying a pipeline that failed. "code_identifier": "f94e49a4fcecebac4d5eecbb5b691561b08e45c0", "code_identifier_type": "git", "code_identifier_dependable": true, - "code_identifier_url": "https://github.com/AstraZeneca/magnus-core.git", + "code_identifier_url": "https://github.com/AstraZeneca/runnable-core.git", "code_identifier_message": "" } ], @@ -511,7 +511,7 @@ Below is an example of retrying a pipeline that failed. "tag": "", "run_id": "naive-wilson-0625", "variables": { - "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/magnus:latest" + "argo_docker_image": "harbor.csis.astrazeneca.net/mlops/runnable:latest" }, "use_cached": true, "original_run_id": "wrong-file-name", @@ -522,7 +522,7 @@ Below is an example of retrying a pipeline that failed. present\n2. Create Content: We create a \"hello.txt\" and \"put\" the file in catalog\n3. Clean up to get again: We remove the data folder. Note that this is stubbed to prevent\n accidental deletion of your contents. You can change type to task to make really run.\n4. Retrieve Content: We \"get\" the file \"hello.txt\" from the catalog and show the contents\n5. Cleanup: We remove the data folder. Note that this is stubbed to prevent accidental deletion.\n\n\nYou can run this pipeline by:\n - magnus execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", + runnable execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml\n", "steps": { "Setup": { "type": "stub", @@ -549,7 +549,7 @@ Below is an example of retrying a pipeline that failed. }, "max_attempts": 1, "command_type": "shell", - "command": "echo \"Hello from magnus\" >> data/hello.txt\n" + "command": "echo \"Hello from runnable\" >> data/hello.txt\n" }, "Retrieve Content": { "type": "task", @@ -585,7 +585,7 @@ Below is an example of retrying a pipeline that failed. ``` -Magnus also supports [```mocked``` executor](../configurations/executors/mocked.md) which can +runnable also supports [```mocked``` executor](../configurations/executors/mocked.md) which can patch and mock tasks to isolate and focus on the failed task. Since python functions and notebooks are run in the same shell, it is possible to use [python debugger](https://docs.python.org/3/library/pdb.html) and diff --git a/docs/example/secrets.md b/docs/example/secrets.md index 7edb3c91..0a651870 100644 --- a/docs/example/secrets.md +++ b/docs/example/secrets.md @@ -1,9 +1,9 @@ -Secrets are required assets as the complexity of the application increases. Magnus provides a -[python API](../interactions.md/#magnus.get_secret) to get secrets from various sources. +Secrets are required assets as the complexity of the application increases. runnable provides a +[python API](../interactions.md/#runnable.get_secret) to get secrets from various sources. -!!! info annotate inline end "from magnus import get_secret" +!!! info annotate inline end "from runnable import get_secret" - Secrets is the only interface that you are required to "import magnus" in your python application. + Secrets is the only interface that you are required to "import runnable" in your python application. Native python and Jupyter notebooks can use this API. We currently do not support shell tasks with secrets from this interface. (1) diff --git a/docs/example/steps.md b/docs/example/steps.md index 873d10ff..fd1d6175 100644 --- a/docs/example/steps.md +++ b/docs/example/steps.md @@ -1,4 +1,4 @@ -Magnus provides a rich definition of of step types. +runnable provides a rich definition of of step types.
diff --git a/docs/extensions.md b/docs/extensions.md index 132c1482..93046913 100644 --- a/docs/extensions.md +++ b/docs/extensions.md @@ -1,45 +1,45 @@ ## General set up -Magnus is built around the idea to decouple the pipeline definition and pipeline execution. +runnable is built around the idea to decouple the pipeline definition and pipeline execution. [All the concepts](concepts/the-big-picture.md/) are defined with this principle and therefore are extendible as long as the API is satisfied. We internally use [stevedore](https:/pypi.org/project/stevedore/) to manage extensions. -Our [pyproject.toml](https://github.com/AstraZeneca/magnus-core/blob/main/pyproject.toml) has +Our [pyproject.toml](https://github.com/AstraZeneca/runnable-core/blob/main/pyproject.toml) has plugin space for all the concepts. ```toml [tool.poetry.plugins."executor"] -"local" = "magnus.extensions.executor.local.implementation:LocalExecutor" -"local-container" = "magnus.extensions.executor.local_container.implementation:LocalContainerExecutor" -"argo" = "magnus.extensions.executor.argo.implementation:ArgoExecutor" +"local" = "runnable.extensions.executor.local.implementation:LocalExecutor" +"local-container" = "runnable.extensions.executor.local_container.implementation:LocalContainerExecutor" +"argo" = "runnable.extensions.executor.argo.implementation:ArgoExecutor" # Plugins for Catalog [tool.poetry.plugins."catalog"] -"do-nothing" = "magnus.catalog:DoNothingCatalog" -"file-system" = "magnus.extensions.catalog.file_system.implementation:FileSystemCatalog" +"do-nothing" = "runnable.catalog:DoNothingCatalog" +"file-system" = "runnable.extensions.catalog.file_system.implementation:FileSystemCatalog" # Plugins for Secrets [tool.poetry.plugins."secrets"] -"do-nothing" = "magnus.secrets:DoNothingSecretManager" -"dotenv" = "magnus.extensions.secrets.dotenv.implementation:DotEnvSecrets" -"env-secrets-manager" = "magnus.extensions.secrets.env_secrets.implementation:EnvSecretsManager" +"do-nothing" = "runnable.secrets:DoNothingSecretManager" +"dotenv" = "runnable.extensions.secrets.dotenv.implementation:DotEnvSecrets" +"env-secrets-manager" = "runnable.extensions.secrets.env_secrets.implementation:EnvSecretsManager" # Plugins for Run Log store [tool.poetry.plugins."run_log_store"] -"buffered" = "magnus.datastore:BufferRunLogstore" -"file-system" = "magnus.extensions.run_log_store.file_system.implementation:FileSystemRunLogstore" -"chunked-fs" = "magnus.extensions.run_log_store.chunked_file_system.implementation:ChunkedFileSystemRunLogStore" +"buffered" = "runnable.datastore:BufferRunLogstore" +"file-system" = "runnable.extensions.run_log_store.file_system.implementation:FileSystemRunLogstore" +"chunked-fs" = "runnable.extensions.run_log_store.chunked_file_system.implementation:ChunkedFileSystemRunLogStore" # Plugins for Experiment tracker [tool.poetry.plugins."experiment_tracker"] -"do-nothing" = "magnus.experiment_tracker:DoNothingTracker" -"mlflow" = "magnus.extensions.experiment_tracker.mlflow.implementation:MLFlowExperimentTracker" +"do-nothing" = "runnable.experiment_tracker:DoNothingTracker" +"mlflow" = "runnable.extensions.experiment_tracker.mlflow.implementation:MLFlowExperimentTracker" # Plugins for Pickler [tool.poetry.plugins."pickler"] -"pickle" = "magnus.pickler:NativePickler" +"pickle" = "runnable.pickler:NativePickler" # Plugins for Integration @@ -48,19 +48,19 @@ plugin space for all the concepts. # Plugins for Tasks [tool.poetry.plugins."tasks"] -"python" = "magnus.tasks:PythonTaskType" -"shell" = "magnus.tasks:ShellTaskType" -"notebook" = "magnus.tasks:NotebookTaskType" +"python" = "runnable.tasks:PythonTaskType" +"shell" = "runnable.tasks:ShellTaskType" +"notebook" = "runnable.tasks:NotebookTaskType" # Plugins for Nodes [tool.poetry.plugins."nodes"] -"task" = "magnus.extensions.nodes:TaskNode" -"fail" = "magnus.extensions.nodes:FailNode" -"success" = "magnus.extensions.nodes:SuccessNode" -"parallel" = "magnus.extensions.nodes:ParallelNode" -"map" = "magnus.extensions.nodes:MapNode" -"stub" = "magnus.extensions.nodes:StubNode" +"task" = "runnable.extensions.nodes:TaskNode" +"fail" = "runnable.extensions.nodes:FailNode" +"success" = "runnable.extensions.nodes:SuccessNode" +"parallel" = "runnable.extensions.nodes:ParallelNode" +"map" = "runnable.extensions.nodes:MapNode" +"stub" = "runnable.extensions.nodes:StubNode" ``` @@ -71,7 +71,7 @@ them, the complexity is mostly in having access to them. To write extensions for your project and are not useful for wider audience, include the plugin within your pyproject.toml or [setuptools entry points](https://setuptools.pypa.io/en/latest/ pkg_resources.html#entry-points). During the execution of the pipeline, -magnus would automatically pick up the extension if it registered to the correct namespace. +runnable would automatically pick up the extension if it registered to the correct namespace. The below section shows the base class implementation for all the concepts. All the base classes @@ -86,7 +86,7 @@ Examples: [local](configurations/executors/local.md), [local-container](configurations/executors/local-container.md), [argo](configurations/executors/argo.md) -::: magnus.executor.BaseExecutor +::: runnable.executor.BaseExecutor options: show_root_heading: true show_source: true @@ -103,7 +103,7 @@ Examples: [buffered](configurations/run-log.md/#buffered), [file-system](configurations/run-log.md/#file-system), [chunked-fs](configurations/run-log.md/#chunked-fs) -::: magnus.datastore.BaseRunLogStore +::: runnable.datastore.BaseRunLogStore options: show_root_heading: true show_source: true @@ -111,7 +111,7 @@ Examples: [buffered](configurations/run-log.md/#buffered), members: None heading_level: 3 -The ```RunLog``` is a nested pydantic model and is located in ```magnus.datastore.RunLog```. +The ```RunLog``` is a nested pydantic model and is located in ```runnable.datastore.RunLog```. @@ -123,7 +123,7 @@ Example: [do-nothing](configurations/catalog.md/#do-nothing), [file-system](configurations/catalog.md/#file-system) -::: magnus.catalog.BaseCatalog +::: runnable.catalog.BaseCatalog options: show_root_heading: true show_source: true @@ -141,7 +141,7 @@ Example: [env-secrets-manager](configurations/secrets.md/#environment_secret_manager), [dotenv](configurations/secrets.md/#dotenv) -::: magnus.secrets.BaseSecrets +::: runnable.secrets.BaseSecrets options: show_root_heading: true show_source: true @@ -157,7 +157,7 @@ Register to namespace: [tool.poetry.plugins."experiment_tracker"] Example: [do-nothing](configurations/experiment-tracking.md), ```mlflow``` -::: magnus.experiment_tracker.BaseExperimentTracker +::: runnable.experiment_tracker.BaseExperimentTracker options: show_root_heading: true show_source: true @@ -175,7 +175,7 @@ Example: [parallel](concepts/parallel.md), [map](concepts/map.md) -::: magnus.nodes.BaseNode +::: runnable.nodes.BaseNode options: show_root_heading: true show_source: true @@ -194,7 +194,7 @@ Example: [shell](concepts/task.md/#shell), [notebook](concepts/task.md/#notebook) -::: magnus.tasks.BaseTaskType +::: runnable.tasks.BaseTaskType options: show_root_heading: true show_source: true diff --git a/docs/index.md b/docs/index.md index b41efe86..f03189ae 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,87 +1,206 @@ ---- -title: Welcome -sidebarDepth: 0 ---- +# Runnable
- ![Image title](assets/logo1.png){ width="400" height="300"} -
+ ![Image title](assets/sport.png){ width="200" height="100"} +
Orchestrate your functions, notebooks, scripts anywhere!!
+ +Runner icons created by Leremy - Flaticon + --- -Magnus is a simplified workflow definition language that helps in: +## Example -- **Streamlined Design Process:** Magnus enables users to efficiently plan their pipelines with -[stubbed nodes](concepts/stub.md), along with offering support for various structures such as -[tasks](concepts/task.md), [parallel branches](concepts/parallel.md), and [loops or map branches](concepts/map.md) -in both [yaml](concepts/pipeline.md) or a [python SDK](sdk.md) for maximum flexibility. +The data science specific code is a well-known +[iris example from scikit-learn](https://scikit-learn.org/stable/auto_examples/linear_model/plot_iris_logistic.html). -- **Incremental Development:** Build your pipeline piece by piece with Magnus, which allows for the -implementation of tasks as [python functions](concepts/task.md/#python_functions), -[notebooks](concepts/task.md/#notebooks), or [shell scripts](concepts/task.md/#shell), -adapting to the developer's preferred tools and methods. -- **Robust Testing:** Ensure your pipeline performs as expected with the ability to test using sampled data. Magnus -also provides the capability to [mock and patch tasks](configurations/executors/mocked.md) -for thorough evaluation before full-scale deployment. +```python linenums="1" +--8<-- "examples/iris_demo.py" +``` -- **Seamless Deployment:** Transition from the development stage to production with ease. -Magnus simplifies the process by requiring -[only configuration changes](configurations/overview.md) -to adapt to different environments, including support for [argo workflows](configurations/executors/argo.md). -- **Efficient Debugging:** Quickly identify and resolve issues in pipeline execution with Magnus's local -debugging features. Retrieve data from failed tasks and [retry failures](concepts/run-log.md/#retrying_failures) -using your chosen debugging tools to maintain a smooth development experience. +1. Return objects X and Y. +2. Store the file `iris_logistic.png` for future reference. +3. Define the sequence of tasks. +4. Define a pipeline with the tasks +The difference between native driver and runnable orchestration: -Along with the developer friendly features, magnus also acts as an interface to production grade concepts -such as [data catalog](concepts/catalog.md), [reproducibility](concepts/run-log.md), -[experiment tracking](concepts/experiment-tracking.md) -and secure [access to secrets](concepts/secrets.md). +!!! tip inline end "Notebooks and Shell scripts" -## Motivation + You can execute notebooks and shell scripts too!! -Successful data science projects require a varied set of skills from data scientists, ML engineers, and infrastructure -teams. Often, the roles and responsibilities of these personas are blurred leading to projects that are difficult to -maintain, test, reproduce or run at scale. + They can be written just as you would want them, *plain old notebooks and scripts*. -We build __**Magnus**__ to separate those concerns and create a clear boundary of the personas. -## Design principles -- [x] Code should not be mixed with implementation details of underlying platform. -**Example**: Data and parameters are often shared between different steps of the pipeline. -The platform implementation should not add additional code to make this happen. +
+```diff +- X, Y = load_data() ++load_data_task = PythonTask( ++ function=load_data, ++ name="load_data", ++ returns=[pickled("X"), pickled("Y")], (1) ++ ) -- [x] Interactive development/debugging should be a first-class citizen. +-logreg = model_fit(X, Y, C=1.0) ++model_fit_task = PythonTask( ++ function=model_fit, ++ name="model_fit", ++ returns=[pickled("logreg")], ++ ) +-generate_plots(X, Y, logreg) ++generate_plots_task = PythonTask( ++ function=generate_plots, ++ name="generate_plots", ++ terminate_with_success=True, ++ catalog=Catalog(put=["iris_logistic.png"]), (2) ++ ) -**Example**: Data science teams thrive in environments with quick debug loop. Able to use their preferred tools -and iterate without constraints of the platform aids development/debugging. ++pipeline = Pipeline( ++ steps=[load_data_task, model_fit_task, generate_plots_task], (3) -- [x] Align the best practices even during development phase. +``` +
-**Example**: All projects require secrets to access secure content. The concept of secret should be -available even during development phase and there should be no change in code when it is run in production set up. +1. Return objects X and Y. +2. Store the file `iris_logistic.png` for future reference. +3. Define the sequence of tasks. +--- +- [x] Absolutely no change in data science code to make it `runnable` +- [x] The ```driver``` function has an equivalent and intuitive runnable expression +- [x] Reproducible by default, runnable stores metadata about code/data/config for every execution. +- [x] The pipeline is `runnable` in any environment. -## What does it do? +## But why runnable? -Magnus is a thin abstraction layer over the services typically provided by production grade infrastructures. Independent -of the provider, it exposes a consistent interface to those services, **this holds true even for the local environment**. +Obviously, there are a lot of orchestration tools in python. A well maintained and curated [list is +available here](https://github.com/EthicalML/awesome-production-machine-learning/). + +Below is a rough comparison of `runnable` to others. + + +|Feature|runnable|Airflow|Argo workflows|Metaflow|ZenML|Kedro| +|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +|Cross platform|:white_check_mark:|:white_check_mark:|:white_check_mark:|:white_check_mark:|:white_check_mark:|:white_check_mark:| +|Bring your own infrastructure |:white_check_mark:|:x:|:x:|:x:|:x:|:white_check_mark:| +|Local executions|:white_check_mark:|:x:|:x:|:white_check_mark:|:white_check_mark:|:white_check_mark:| +|Bring your own code|:white_check_mark:|:x:|:x:|:x:|:x:|:x:| +|Reproducibility of executions|:white_check_mark:|:x:|:x:|:white_check_mark:|:white_check_mark:|:white_check_mark:| +|Easy to move on|:white_check_mark:|:X:|:x:|:x:|:x:|:white_check_mark:| +|End to end platform|:x:|:white_check_mark:|:x:|:white_check_mark:|:white_check_mark:|:x:| +|Task level orchestration|:x:|:white_check_mark:|:white_check_mark:|:x:|:x:|:x:| +|Notebook as tasks|:white_check_mark:|:x:|:x:|:x:|:x:|:x:| +|Unit testable pipelines|:white_check_mark:|:x:|:x:|:white_check_mark:|:white_check_mark:|:x:| +|Multi language support|:white_check_mark:|:white_check_mark:|:white_check_mark:|:X:|:x:|:x:| + + + + +They can be broadly classified in three categories: + +- __Native orchestrators__: These orchestrators are responsible for task level orchestration, +resource management on chosen infrastructure. Examples: + + - Airflow + - Argo workflows + - AWS step functions + + +#### runnable is complimentary to these orchestrators and is designed to enable data teams use them effectively. + +- __Platforms__: These are meant to provide end to end platform for training, deploying and +serving of ML models. Examples: + + - Dagster + - Prefect + - Flyte + + They have specific infrastructure requirements and are great if the entire organization buys into + their philosophy and ways of working. + +#### runnable is designed to work with your infrastructure and ways of working instead of dictating them. + + + +- __Meta orchestrators__: Orchestrators using the native orchestrators but provide a simplified +SDK tailored for typical data oriented tasks. Examples include: + + - Kedro: cross platform transpiler. + - Metaflow: A mix of platform and SDK. + - ZenML: A mix of platform and SDK. + +runnable is a _meta orchestrator_ with different design decisions. + + +
+ +- :material-clock-fast:{ .lg .middle } __Easy to adopt, its mostly your code__ + + --- + + Your application code remains as it is. Runnable exists outside of it. + + - No API's or decorators or imposed structure. + - Most often it is a single file. + + [:octicons-arrow-right-24: Getting started](concepts/the-big-picture.md) + +- :building_construction:{ .lg .middle } __Bring your infrastructure__ + + --- + + Runnable can be adapted to your infrastructure stack instead of dictating it. + + - Intentionally minimal in scope as a composer of pipelines in native orchestrators. + - Every execution is ready to be deployed to production. + + [:octicons-arrow-right-24: Infrastructure](configurations/overview.md) + +- :memo:{ .lg .middle } __Reproducibility__ + + --- + + Runnable tracks key information to reproduce the execution. All this happens without + any additional code. + + [:octicons-arrow-right-24: Run Log](concepts/run-log.md) + + + +- :repeat:{ .lg .middle } __Retry failues__ + + --- + + Debug any failure in your local development environment. + + [:octicons-arrow-right-24: Retry](#) + +- :microscope:{ .lg .middle } __Testing__ + + --- + + Unit test your code and pipelines. + + [:octicons-arrow-right-24: Test](#) + + + +- :broken_heart:{ .lg .middle } __Move on__ + + --- + + Moving away from runnable is as simple as deleting relevant files. -
- ![Image title](assets/whatdo.png){ width="1200" height="800"} -
-
-The scope of magnus is intentionally limited to aid during the model development phase. -It does not boast of end to end development. The heavy lifting is always done by the providers. +
diff --git a/docs/interactions.md b/docs/interactions.md deleted file mode 100644 index d296adec..00000000 --- a/docs/interactions.md +++ /dev/null @@ -1,71 +0,0 @@ -# API Reference of Magnus functions - - -::: magnus.get_parameter - options: - show_root_heading: true - -
- -::: magnus.set_parameter - options: - show_root_heading: true - -
- -::: magnus.get_from_catalog - options: - show_root_heading: true - -
- -::: magnus.put_in_catalog - options: - show_root_heading: true - -
- -::: magnus.get_object - options: - show_root_heading: true - -
- -::: magnus.put_object - options: - show_root_heading: true - - - -
- -::: magnus.get_secret - options: - show_root_heading: true - - -
- -::: magnus.get_run_log - options: - show_root_heading: true - - -
- -::: magnus.get_run_id - options: - show_root_heading: true - -
- -::: magnus.track_this - options: - show_root_heading: true - - -
- -::: magnus.get_experiment_tracker_context - options: - show_root_heading: true diff --git a/docs/roadmap.md b/docs/roadmap.md index eeaf8a40..25a85f72 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -16,7 +16,7 @@ Bring in native AWS services to orchestrate workflows. The stack should be: ## Better integrations with experiment tracking tools. -Currently, the implementation of experiment tracking tools within magnus is limited. It might be better to +Currently, the implementation of experiment tracking tools within runnable is limited. It might be better to choose a good open source implementation and stick with it. diff --git a/docs/sdk.md b/docs/sdk.md index bb345437..e989fc11 100644 --- a/docs/sdk.md +++ b/docs/sdk.md @@ -1,18 +1,18 @@ -::: magnus.Catalog +::: runnable.Catalog options: show_root_heading: true show_bases: false
-::: magnus.Stub +::: runnable.Stub options: show_root_heading: true show_bases: false
-::: magnus.Task +::: runnable.PythonTask options: show_root_heading: true show_bases: false @@ -20,7 +20,7 @@
-::: magnus.Parallel +::: runnable.ShellTask options: show_root_heading: true show_bases: false @@ -28,7 +28,7 @@
-::: magnus.Map +::: runnable.NotebookTask options: show_root_heading: true show_bases: false @@ -36,7 +36,7 @@
-::: magnus.Success +::: runnable.Parallel options: show_root_heading: true show_bases: false @@ -44,7 +44,7 @@
-::: magnus.Fail +::: runnable.Map options: show_root_heading: true show_bases: false @@ -52,7 +52,23 @@
-::: magnus.Pipeline +::: runnable.Success + options: + show_root_heading: true + show_bases: false + show_docstring_description: true + +
+ +::: runnable.Fail + options: + show_root_heading: true + show_bases: false + show_docstring_description: true + +
+ +::: runnable.Pipeline options: show_root_heading: true show_bases: false diff --git a/docs/usage.md b/docs/usage.md index deaaffb4..f6e45d2e 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -1,11 +1,11 @@ ## Installation -**magnus** is a python package and should be installed like any other python package. The minimum python version +**runnable** is a python package and should be installed like any other python package. The minimum python version is ```3.8``` ```shell -pip install magnus +pip install runnable ``` We recommend the installation in a virtual environment using ```poetry``` or any other package manager. @@ -14,35 +14,28 @@ We recommend the installation in a virtual environment using ```poetry``` or any #### Docker -To run the pipelines/functions/notebooks in a container, install magnus with docker functionality. +To run the pipelines/functions/notebooks in a container, install runnable with docker functionality. ```shell -pip install "magnus[docker]" +pip install "runnable[docker]" ``` #### Notebook -To use notebooks as tasks, install magnus with ```notebook``` functionality. +To use notebooks as tasks, install runnable with ```notebook``` functionality. ```shell -pip install "magnus[notebook]" +pip install "runnable[notebook]" ``` -#### mlflow - -To enable ```mlflow``` as experiment tracker, install magnus with ```mlflow``` functionality - -```shell -pip install "magnus[mlflow]" -```
## Usage -Pipelines defined in **magnus** can be either via [python sdk](sdk.md) or ```yaml``` based definitions. +Pipelines defined in **runnable** can be either via [python sdk](sdk.md) or ```yaml``` based definitions. -To execute a pipeline, defined in ```yaml```, use the **magnus** cli. +To execute a pipeline, defined in ```yaml```, use the **runnable** cli. The options are detailed below: - ```-f, --file``` (str): The pipeline definition file, defaults to pipeline.yaml @@ -58,6 +51,6 @@ The options are detailed below: ## Examples All the examples in the documentation are present in the ```examples``` directory of -[the repo](https://github.com/AstraZeneca/magnus-core) with instructions on how to run them. +[the repo](https://github.com/AstraZeneca/runnable-core) with instructions on how to run them. All the examples are tested, with multiple configurations, as part of our CI test suite. diff --git a/docs/why-magnus.md b/docs/why-magnus.md deleted file mode 100644 index 6db6bcfc..00000000 --- a/docs/why-magnus.md +++ /dev/null @@ -1,139 +0,0 @@ -# Why Magnus - -The scope of **magnus** is intentionally limited as an aid to author workflows for -production grade orchestrators like AWS Step functions or Argo Workflows. It is designed -to complement them, **NOT** replace them. - -### Simplified flow of data - -Production-grade orchestrators excel at managing a series of independent tasks, offering -straightforward implementation for task orchestration. Nonetheless, due to their general-purpose -design, orchestrating the flow of data—whether parameters or artifacts—can introduce complexity and -require careful handling. - -Magnus simplifies this aspect by introducing an [intuitive mechanism for data flow](example/dataflow.md), -thereby streamlining data management. This approach allows the orchestrators to focus on their core -competency: allocating the necessary computational resources for task execution. - -### Local first - -In the context of the project's proof-of-concept (PoC) phase, the utilization of production-level - orchestration systems is not optimal due to their complexity and potential constraints on rapid - experimentation. Data scientists require an environment that aligns with their established workflows, - which is most effectively achieved through the use of local development tools. - -Magnus serves as an intermediary stage, simulating the production environment by offering [local -versions](configurations/overview.md/) of essential services—such as execution engines, data catalogs, secret management, and -experiment tracking—without necessitating intricate configuration. As the project transitions into the -production phase, these local stand-ins are replaced with their robust, production-grade counterparts. - -### Reduce refactoring - -Transitioning from the proof of concept (PoC) phase to production often necessitates extensive code -refactoring, which presents significant challenges: - -1. Refactoring demands considerable engineering resources to dissect the existing codebase and -reconstruct it in a form that is both maintainable and amenable to testing. - -2. The engineered solutions that result from this process tend to exclude researchers from further -experimentation, thus impeding iterative research and development. - - -Magnus is engineered to minimize the need for such extensive refactoring when operationalizing -projects. It achieves this by allowing tasks to be defined as [simple Python functions](concepts/task.md/#python_functions) -or [Jupyter notebooks](concepts/task.md/#notebook). This means that the research-centric components of the code -can remain unchanged, avoiding -the need for immediate refactoring and allowing for the postponement of these efforts until they -become necessary for the long-term maintenance of the product. - -### Decouple implementations - -In the rapidly evolving realm of technology, the solutions and tools selected today can -quickly become the technical debt of tomorrow. Magnus addresses this inevitability by -abstracting the implementation details from the underlying concepts. This decoupling -enables a seamless transition to new technical architectures, reducing the process to a -mere modification of configuration settings. Thus, Magnus facilitates adaptability -in the face of changing technological landscapes, ensuring that updates or replacements -of the technical stack can be implemented with minimal disruption. - -### Non intrusive implementation - -A lof of design aspect of magnus is to let the task definitions, python functions or notebooks, -remain agnostic of the orchestration process. Most often, the task orchestration can be -achieved by writing native "driver" functions. This allows the implementation to be completely -within the control of data scientists. - -Most often, it should be possible to remove magnus from the tech stack if necessary. - -
- -## Alternatives - -[Kedro](https://github.com/kedro-org/kedro) and [metaflow](https://metaflow.org/) are also -based on similar ideas and have established presence in this field. We took a lot of -inspiration from these excellent projects when writing magnus. - -!!! note "Caveat" - - The scope of magnus is limited in comparison to metaflow. The below points are on - the design philosophy rather that implementation specifics. - - The highlighted differences are subjective opinions and should be taken as preferences - rather than criticisms. - - - - -### Infrastructure - -Metaflow stipulates [infrastructure prerequisites](https://docs.metaflow.org/getting-started/infrastructure) that are established and validated across numerous scenarios. - -In contrast, Magnus empowers engineering teams to define infrastructure specifications through a configuration file tailored to the stack they maintain. This versatility enables specialized teams to leverage their domain expertise, thereby enhancing the project's overall efficacy. - -As magnus is mostly responsible for translating workflows to infrastructure patterns, it can -adapt to different environments. - -### Project structure - -Kedro and metaflow come with their own predefined project structures, which might be -appealing to some users while others might find them restrictive. - -Magnus, on the other hand, offers a more flexible approach. It doesn't impose a specific -structure on your project. Whether you're working with Python functions, Jupyter notebooks, -or shell scripts, Magnus allows you to organize your work as you see fit. Even the location -of the data folder can be tailored for each step, avoiding a one-size-fits-all design and -providing the freedom to structure your project in a way that suits your preferences and -requirements. - - -### Notebook support - -Both metaflow and kedro do not support notebooks as tasks. Notebooks are great during the iterative -phase of the project allowing for interactive development. - -Magnus supports notebooks as tasks and has the ability to pass data/parameters between them -to allow orchestrating notebooks. - -### Testing pipelines - -Magnus supports patching and mocking tasks to test the end to end execution of the -pipeline. It is not clear on how to achieve the same in kedro or metaflow. - -### Learning curve - -Magnus allows tasks to stand on their own, separate from the orchestration system. Explaining and -understanding these tasks is made easy through the use of simple "driver" functions. This approach -makes it easier for anyone working on the project to get up to speed and maintain it, as the -orchestration part of Magnus remains distinct and straightforward. - -In contrast, learning to use Kedro and Metaflow can take more time because they have their own -specific ways of structuring projects and code that users need to learn. - -### Language support - -Kedro and metaflow only support python based pipeline definitions. It is possible to -run the non-python tasks as ```subprocesses``` in the pipeline tasks but the definition -is only possible using the python API. - -Magnus supports ```yaml``` based pipeline definitions and has ```shell``` tasks which -can be used for non-python tasks. diff --git a/docs/why-runnable.md b/docs/why-runnable.md new file mode 100644 index 00000000..1ec6e91a --- /dev/null +++ b/docs/why-runnable.md @@ -0,0 +1,137 @@ +# Why runnable + +**runnable** allows the data scientists/engineers to hook into production stack without +knowledge of them. It offers a simpler abstraction of the concepts found in +production stack thereby aligning to the production standards even during development. + +**runnable** is not a end to end deployment platform but limited to be an aid during +the development phase without modifying the production stack or application code. + +
+ +- :material-clock-fast:{ .lg .middle } __Easy to adopt, its mostly your code__ + + --- + + Your application code remains as it is. Runnable exists outside of it. + + [:octicons-arrow-right-24: Getting started](concepts/the-big-picture.md) + +- :building_construction:{ .lg .middle } __Bring your infrastructure__ + + --- + + Runnable can be adapted to your infrastructure stack instead of dictating it. + + [:octicons-arrow-right-24: Infrastructure](configurations/overview.md) + +- :memo:{ .lg .middle } __Reproducibility__ + + --- + + Runnable tracks key information to reproduce the execution. + + [:octicons-arrow-right-24: Run Log](concepts/run-log.md) + + + +- :repeat:{ .lg .middle } __Retry failues__ + + --- + + Debug any failure in your local development environment. + + [:octicons-arrow-right-24: Retry](#) + +- :microscope:{ .lg .middle } __Testing__ + + --- + + Unit test your code and pipelines. + + [:octicons-arrow-right-24: Test](#) + + + +- :broken_heart:{ .lg .middle } __Move on__ + + --- + + Moving away from runnable is as simple as deleting relevant files. + + +
+ + +## Alternatives + +**runnable** as an SDK competes with + +[Kedro](https://github.com/kedro-org/kedro) and [metaflow](https://metaflow.org/) are also +based on similar ideas and have established presence in this field. We took a lot of +inspiration from these excellent projects when writing runnable. + +!!! note "Caveat" + + The scope of runnable is limited in comparison to metaflow. The below points are on + the design philosophy rather that implementation specifics. + + The highlighted differences are subjective opinions and should be taken as preferences + rather than criticisms. + + + + +### Infrastructure + +Metaflow stipulates [infrastructure prerequisites](https://docs.metaflow.org/getting-started/infrastructure) that are established and validated across numerous scenarios. + +In contrast, runnable empowers engineering teams to define infrastructure specifications through a configuration file tailored to the stack they maintain. This versatility enables specialized teams to leverage their domain expertise, thereby enhancing the project's overall efficacy. + +As runnable is mostly responsible for translating workflows to infrastructure patterns, it can +adapt to different environments. + +### Project structure + +Kedro and metaflow come with their own predefined project structures, which might be +appealing to some users while others might find them restrictive. + +runnable, on the other hand, offers a more flexible approach. It doesn't impose a specific +structure on your project. Whether you're working with Python functions, Jupyter notebooks, +or shell scripts, runnable allows you to organize your work as you see fit. Even the location +of the data folder can be tailored for each step, avoiding a one-size-fits-all design and +providing the freedom to structure your project in a way that suits your preferences and +requirements. + + +### Notebook support + +Both metaflow and kedro do not support notebooks as tasks. Notebooks are great during the iterative +phase of the project allowing for interactive development. + +runnable supports notebooks as tasks and has the ability to pass data/parameters between them +to allow orchestrating notebooks. + +### Testing pipelines + +runnable supports patching and mocking tasks to test the end to end execution of the +pipeline. It is not clear on how to achieve the same in kedro or metaflow. + +### Learning curve + +runnable allows tasks to stand on their own, separate from the orchestration system. Explaining and +understanding these tasks is made easy through the use of simple "driver" functions. This approach +makes it easier for anyone working on the project to get up to speed and maintain it, as the +orchestration part of runnable remains distinct and straightforward. + +In contrast, learning to use Kedro and Metaflow can take more time because they have their own +specific ways of structuring projects and code that users need to learn. + +### Language support + +Kedro and metaflow only support python based pipeline definitions. It is possible to +run the non-python tasks as ```subprocesses``` in the pipeline tasks but the definition +is only possible using the python API. + +runnable supports ```yaml``` based pipeline definitions and has ```shell``` tasks which +can be used for non-python tasks. diff --git a/examples/Dockerfile b/examples/Dockerfile deleted file mode 100755 index 9d77f5f1..00000000 --- a/examples/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -# Python 3.8 Image without Dependecies -FROM python:3.8-slim - -LABEL maintainer="vijay.vammi@astrazeneca.com" - -RUN apt-get update && apt-get install -y --no-install-recommends \ - git \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* - - -RUN pip config set global.trusted-host \ - "pypi.org files.pythonhosted.org pypi.python.org" \ - --trusted-host=pypi.python.org \ - --trusted-host=pypi.org \ - --trusted-host=files.pythonhosted.org - -RUN pip install --upgrade pip \ - && pip install poetry - -ENV VIRTUAL_ENV=/opt/venv -RUN python -m virtualenv --python=/usr/local/bin/python $VIRTUAL_ENV -ENV PATH="$VIRTUAL_ENV/bin:$PATH" - -COPY . /app -WORKDIR /app - -RUN poetry config repositories.FPHO https://files.pythonhosted.org \ - && poetry config certificates.FPHO.cert false - -RUN poetry install --all-extras --without dev diff --git a/examples/Dockerfile.39 b/examples/Dockerfile.39 index f116853d..8d772f2f 100755 --- a/examples/Dockerfile.39 +++ b/examples/Dockerfile.39 @@ -16,7 +16,7 @@ RUN pip config set global.trusted-host \ --trusted-host=files.pythonhosted.org RUN pip install --upgrade pip \ - && pip install poetry + && pip install poetry ENV VIRTUAL_ENV=/opt/venv RUN python -m virtualenv --python=/usr/local/bin/python $VIRTUAL_ENV @@ -28,4 +28,4 @@ WORKDIR /app RUN poetry config repositories.FPHO https://files.pythonhosted.org \ && poetry config certificates.FPHO.cert false -RUN poetry install --all-extras --without dev +RUN poetry install --all-extras --without dev --without tutorial diff --git a/examples/catalog.py b/examples/catalog.py index 1f502407..c94071f2 100644 --- a/examples/catalog.py +++ b/examples/catalog.py @@ -5,31 +5,32 @@ python run examples/catalog.py """ -from magnus import Catalog, Pipeline, Stub, Task +from runnable import Catalog, Pipeline, ShellTask, Stub def main(): # Make the data folder if it does not exist - set_up = Task(name="Setup", command="mkdir -p data", command_type="shell") + set_up = ShellTask( + name="Setup", + command="mkdir -p data", + ) # create a catalog instruction to put a file into the catalog create_catalog = Catalog(put=["data/hello.txt"]) # This task will create a file in the data folder and attaches the instruction # to put the file into the catalog. - create = Task( + create = ShellTask( name="Create Content", - command='echo "Hello from magnus" >> data/hello.txt', - command_type="shell", + command='echo "Hello from runnable" >> data/hello.txt', catalog=create_catalog, ) # We remove the data folder to ensure that the data folder is cleaned up. # This is to show that the retrieve step just does not read from existing data - # This step is stubbed to prevent any accidental deletion, make it a Task + # This step is stubbed to prevent any accidental deletion, make it a ShellTask first_clean = Stub( name="Clean up to get again", command="rm -rf data", - command_type="shell", ) # We create a catalog instruction to retrieve a file from the catalog @@ -39,34 +40,30 @@ def main(): get_catalog = Catalog(get=["data/hello.txt"]) # This task will retrieve the file from the catalog and attach the instruction # to retrieve the file from the catalog before execution. - retrieve = Task( + retrieve = ShellTask( name="Retrieve Content", command="cat data/hello.txt", - command_type="shell", catalog=get_catalog, ) # We clean up. Note that this step is stubbed to prevent any accidental deletion, - # Make it a Task to actually clean up. + # Make it a ShellTask to actually clean up. clean_up = Stub( name="Clean up", command="rm -rf data", - command_type="shell", terminate_with_success=True, ) - # link all the steps of the pipeline - set_up >> create >> first_clean >> retrieve >> clean_up - pipeline = Pipeline( steps=[set_up, create, first_clean, retrieve, clean_up], - start_at=set_up, add_terminal_nodes=True, ) # override the default configuration to use file-system catalog. pipeline.execute(configuration_file="examples/configs/fs-catalog.yaml") + return pipeline + if __name__ == "__main__": main() diff --git a/examples/catalog.yaml b/examples/catalog.yaml index 4f34e52d..b70489f1 100644 --- a/examples/catalog.yaml +++ b/examples/catalog.yaml @@ -11,7 +11,7 @@ dag: You can run this pipeline by: - magnus execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml + runnable execute -f examples/catalog.yaml -c examples/configs/fs-catalog.yaml start_at: Setup steps: Setup: @@ -23,7 +23,7 @@ dag: type: task command_type: shell command: | - echo "Hello from magnus" >> data/hello.txt + echo "Hello from runnable" >> data/hello.txt next: Clean up to get again catalog: # (2) put: diff --git a/examples/catalog_api.py b/examples/catalog_api.py deleted file mode 100644 index c670ad98..00000000 --- a/examples/catalog_api.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -This example demonstrates how to use the catalog API. -You can use the python API for fine grained control if configurational specification -does not fit your needs. - -You can run this pipeline by: python examples/catalog_api.py -""" - -from pathlib import Path - -from magnus import Pipeline, Task, get_from_catalog, put_in_catalog - - -def create_content(): - dir_path = Path("data") - dir_path.mkdir(parents=True, exist_ok=True) # Make data folder if it doesn't exist - - with open(dir_path / "hello.txt", "w") as f: - f.write("Hello from magnus!!") - - # Put the created file in the catalog - put_in_catalog("data/hello.txt") - - -def retrieve_content(): - # Get the file from the catalog - get_from_catalog("data/hello.txt") - - with open("data/hello.txt", "r") as f: - f.read() - - -def main(): - # This step creates a file and syncs it to the catalog. - create = Task(name="create_content", command="examples.catalog_api.create_content") - # This step retrieves the file from the catalog and prints its content. - retrieve = Task( - name="retrieve_content", - command="examples.catalog_api.retrieve_content", - terminate_with_success=True, - ) - - create >> retrieve - - pipeline = Pipeline(steps=[create, retrieve], start_at=create, add_terminal_nodes=True) - - # Override the default configuration file with the one that has file-system as the catalog. - run_log = pipeline.execute(configuration_file="examples/configs/fs-catalog.yaml") - print(run_log) - - -if __name__ == "__main__": - main() diff --git a/examples/catalog_simple.py b/examples/catalog_simple.py index b63bdb10..02c719a0 100644 --- a/examples/catalog_simple.py +++ b/examples/catalog_simple.py @@ -6,36 +6,34 @@ """ -from magnus import Catalog, Pipeline, Task +from runnable import Catalog, Pipeline, ShellTask def main(): # Make the data folder if it does not exist - set_up = Task(name="Setup", command="mkdir -p data", command_type="shell") + set_up = ShellTask(name="Setup", command="mkdir -p data") # create a catalog instruction to put a file into the catalog create_catalog = Catalog(put=["data/hello.txt"]) # This task will create a file in the data folder and attaches the instruction # to put the file into the catalog. - create = Task( + create = ShellTask( name="Create Content", - command='echo "Hello from magnus" >> data/hello.txt', - command_type="shell", + command='echo "Hello from runnable" >> data/hello.txt', catalog=create_catalog, terminate_with_success=True, ) - set_up >> create - pipeline = Pipeline( steps=[set_up, create], - start_at=set_up, add_terminal_nodes=True, ) # override the default configuration to use file-system catalog. pipeline.execute(configuration_file="examples/configs/fs-catalog.yaml") + return pipeline + if __name__ == "__main__": main() diff --git a/examples/concepts/catalog.py b/examples/concepts/catalog.py index db0aefe7..921732d1 100644 --- a/examples/concepts/catalog.py +++ b/examples/concepts/catalog.py @@ -38,52 +38,50 @@ def retrieve_content_from_both(): def main(): - from magnus import Catalog, Pipeline, Task + from runnable import Catalog, Pipeline, PythonTask, ShellTask # This step creates a file in the data folder and syncs it to the catalog. data_catalog = Catalog(put=["data/hello.txt"]) - data_create = Task( + data_create = PythonTask( name="create_content_in_data_folder", - command="examples.concepts.catalog.create_content_in_data_folder", + function=create_content_in_data_folder, catalog=data_catalog, ) # This step creates a file in the another folder and syncs it to the catalog. another_catalog = Catalog(put=["another/world.txt"]) - another_create = Task( + another_create = PythonTask( name="create_content_in_another_folder", - command="examples.concepts.catalog.create_content_in_another_folder", + function=create_content_in_another_folder, catalog=another_catalog, ) # Delete the another folder to showcase that the folder will be recreated # when we run the retrieve task. - delete_another_folder = Task( + delete_another_folder = ShellTask( name="delete_another_folder", command="rm -rf another/", - command_type="shell", ) # This step retrieves the file from the catalog and prints its content. all_catalog = Catalog(get=["**/*"]) - retrieve = Task( + retrieve = PythonTask( name="retrieve_content_from_both", - command="examples.concepts.catalog.retrieve_content_from_both", + function=retrieve_content_from_both, catalog=all_catalog, terminate_with_success=True, ) - data_create >> another_create >> delete_another_folder >> retrieve - pipeline = Pipeline( - steps=[data_create, another_create, retrieve, delete_another_folder], - start_at=data_create, + steps=[data_create, another_create, delete_another_folder, retrieve], add_terminal_nodes=True, ) # Override the default configuration file with the one that has file-system as the catalog. _ = pipeline.execute(configuration_file="examples/configs/fs-catalog.yaml") + return pipeline + if __name__ == "__main__": main() diff --git a/examples/concepts/catalog.yaml b/examples/concepts/catalog.yaml index 3a2b6a80..f671eca7 100644 --- a/examples/concepts/catalog.yaml +++ b/examples/concepts/catalog.yaml @@ -9,7 +9,7 @@ dag: retrieve_content_from_both: Retrieves the content from both "data" and "another You can run this pipeline by: - magnus execute -f examples/concepts/catalog.yaml -c examples/configs/fs-catalog.yaml + runnable execute -f examples/concepts/catalog.yaml -c examples/configs/fs-catalog.yaml start_at: create_content_in_data_folder steps: diff --git a/examples/concepts/catalog_api.py b/examples/concepts/catalog_api.py deleted file mode 100644 index a64077d6..00000000 --- a/examples/concepts/catalog_api.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -A pipeline to demonstrate using the catalog service to create and retrieve content. -Here we use the python API get and put in the catalog. - -You can run this pipeline by: - python run examples/concepts/catalog_api.py -""" - -from pathlib import Path - -from magnus import get_from_catalog, put_in_catalog - - -def create_content_in_data_folder(): - """ - Create a data directory and write a file "hello.txt" in the data folder. - Use the python API put_in_catalog to put the file in the catalog. - """ - Path("data").mkdir(parents=True, exist_ok=True) - with open(Path("data") / "hello.txt", "w") as f: - f.write("Hello from data folder!!") - - put_in_catalog("data/hello.txt") - - -def create_content_in_another_folder(): - """ - Create a "another" directory and write a file "world.txt" in it. - Use the python API put_in_catalog to put the file in the catalog. - """ - Path("another").mkdir(parents=True, exist_ok=True) - with open(Path("another") / "world.txt", "w") as f: - f.write("Hello from another folder!!") - - put_in_catalog("another/world.txt") - - -def retrieve_content_from_both(): - """ - Retrieve the contents of the files from the catalog using the python - API get_from_catalog. - Display the contents of the files in data and "another" folder - """ - - get_from_catalog("**/*") - - with open(Path("data") / "hello.txt", "r") as f: - print(f.read()) - - with open(Path("another") / "world.txt", "r") as f: - print(f.read()) - - -def main(): - from magnus import Pipeline, Task - - # This step creates a file in the data folder and syncs it to the catalog. - data_create = Task( - name="create_content_in_data_folder", - command="examples.concepts.catalog_api.create_content_in_data_folder", - ) - - # This step creates a file in the another folder and syncs it to the catalog. - another_create = Task( - name="create_content_in_another_folder", - command="examples.concepts.catalog_api.create_content_in_another_folder", - ) - - # Delete the another folder to showcase that the folder will be recreated - # when we run the retrieve task. - delete_another_folder = Task( - name="delete_another_folder", - command="rm -rf another/", - command_type="shell", - ) - - # This step retrieves the file from the catalog and prints its content. - retrieve = Task( - name="retrieve_content_from_both", - command="examples.concepts.catalog_api.retrieve_content_from_both", - terminate_with_success=True, - ) - - data_create >> another_create >> delete_another_folder >> retrieve - - pipeline = Pipeline( - steps=[data_create, another_create, retrieve, delete_another_folder], - start_at=data_create, - add_terminal_nodes=True, - ) - - # Override the default configuration file with the one that has file-system as the catalog. - _ = pipeline.execute(configuration_file="examples/configs/fs-catalog.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/catalog_object.py b/examples/concepts/catalog_object.py deleted file mode 100644 index 60b96001..00000000 --- a/examples/concepts/catalog_object.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -A simple example of using catalog service to create and retrieve objects. - -You can run this pipeline by: - python run examples/concepts/catalog_object.py -""" - -from pydantic import BaseModel - -from magnus import get_object, put_object - - -class EggsModel(BaseModel): - ham: str - - -class EverythingModel(BaseModel): - spam: str - eggs: EggsModel - - -def put_data_object(): - """ - Create a pydantic object that we want to pass between steps - Store the object in the catalog for downstream steps. - """ - - data_model = EverythingModel(spam="Hello", eggs=EggsModel(ham="Yes, please!!")) - - put_object(data_model, name="everything_model") - - -def retrieve_object(): - """ - Retrieve the pydantic object from the catalog. - """ - - data_model = get_object("everything_model") - - assert data_model == EverythingModel(spam="Hello", eggs=EggsModel(ham="Yes, please!!")) - - print(data_model) - ">>>spam='Hello' eggs=EggsModel(ham='Yes, please!!')" - - -def main(): - from magnus import Pipeline, Task - - # This step creates an object and stores it in the catalog. - object_put = Task( - name="create_content_in_data_folder", - command="examples.concepts.catalog_object.put_data_object", - ) - - # This step retrieves the object from the catalog and prints its content. - object_get = Task( - name="retrieve_content_from_both", - command="examples.concepts.catalog_object.retrieve_object", - terminate_with_success=True, - ) - - object_put >> object_get - - pipeline = Pipeline( - steps=[object_put, object_get], - start_at=object_put, - add_terminal_nodes=True, - ) - - # Override the default configuration file with the one that has file-system as the catalog. - _ = pipeline.execute(configuration_file="examples/configs/fs-catalog.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/experiment_tracking_api.py b/examples/concepts/experiment_tracking_api.py deleted file mode 100644 index 9a1cb8d9..00000000 --- a/examples/concepts/experiment_tracking_api.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -A simple example of using experiment tracking service to track experiments. - -You can run this pipeline by: - python run examples/concepts/experiment_tracking_api.py -""" - -from pydantic import BaseModel - -from magnus import Pipeline, Task, track_this - - -class EggsModel(BaseModel): - ham: str - - -def emit_metrics(): - """ - A function that populates experiment tracker with metrics. - - track_this can take many keyword arguments. - Nested structures are supported by pydantic models. - """ - track_this(spam="hello", eggs=EggsModel(ham="world")) - track_this(answer=42.0) - track_this(is_it_true=False) - - -def main(): - metrics = Task( - name="Emit Metrics", - command="examples.concepts.experiment_tracking_api.emit_metrics", - terminate_with_success=True, - ) - - pipeline = Pipeline( - steps=[metrics], - start_at=metrics, - add_terminal_nodes=True, - ) - - pipeline.execute() # (1) - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/experiment_tracking_env.py b/examples/concepts/experiment_tracking_env.py deleted file mode 100644 index 944b75ca..00000000 --- a/examples/concepts/experiment_tracking_env.py +++ /dev/null @@ -1,47 +0,0 @@ -import json -import os - -from pydantic import BaseModel - -from magnus import Pipeline, Task - - -class EggsModel(BaseModel): - ham: str - - -def emit_metrics(): - """ - A function that populates environment variables with metrics. - - Any environment variable with prefix "MAGNUS_TRACK_" will be - understood as a metric. - - Numeric metrics can be set as strings but would be stored to - int/float. Boolean metrics are not supported. - """ - os.environ["MAGNUS_TRACK_spam"] = "hello" - os.environ["MAGNUS_TRACK_eggs"] = json.dumps( - EggsModel(ham="world").model_dump(by_alias=True), - ) - os.environ["MAGNUS_TRACK_answer"] = "42.0" # Would be stored as float(42) - - -def main(): - metrics = Task( - name="Emit Metrics", - command="examples.concepts.experiment_tracking_env.emit_metrics", - terminate_with_success=True, - ) - - pipeline = Pipeline( - steps=[metrics], - start_at=metrics, - add_terminal_nodes=True, - ) - - pipeline.execute() # (1) - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/experiment_tracking_env.yaml b/examples/concepts/experiment_tracking_env.yaml deleted file mode 100644 index 0837bac1..00000000 --- a/examples/concepts/experiment_tracking_env.yaml +++ /dev/null @@ -1,23 +0,0 @@ -dag: - description: | - An example pipeline to demonstrate setting experiment tracking metrics - using environment variables. Any environment variable with prefix - 'MAGNUS_TRACK_' will be recorded as a metric captured during the step. - - You can run this pipeline as: - magnus execute -f examples/concepts/experiment_tracking_env.yaml - - start_at: shell - steps: - shell: - type: task - command_type: shell - command: | - export MAGNUS_TRACK_spam="hello" - export MAGNUS_TRACK_eggs='{"ham": "world"}' - export MAGNUS_TRACK_answer="42.0" - next: success - success: - type: success - fail: - type: fail diff --git a/examples/concepts/experiment_tracking_env_step.yaml b/examples/concepts/experiment_tracking_env_step.yaml deleted file mode 100644 index 7ec032d6..00000000 --- a/examples/concepts/experiment_tracking_env_step.yaml +++ /dev/null @@ -1,25 +0,0 @@ -dag: - description: | - An example pipeline to demonstrate setting experiment tracking metrics - using environment variables. Any environment variable with prefix - 'MAGNUS_TRACK_' will be recorded as a metric captured during the step. - - You can run this pipeline as: - magnus execute -f examples/concepts/experiment_tracking_env_step.yaml - - start_at: shell - steps: - shell: - type: task - command_type: shell - command: | - export MAGNUS_TRACK_spam="hello" - export MAGNUS_TRACK_eggs='{"ham": "world"}' - export MAGNUS_TRACK_spam_STEP_1="hey" - export MAGNUS_TRACK_eggs_STEP_1='{"ham": "universe"}' - export MAGNUS_TRACK_answer="42.0" - next: success - success: - type: success - fail: - type: fail diff --git a/examples/concepts/experiment_tracking_integration.py b/examples/concepts/experiment_tracking_integration.py deleted file mode 100644 index a5078f8e..00000000 --- a/examples/concepts/experiment_tracking_integration.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -A simple example of using experiment tracking service to track experiments. -In this example, we integrate with mlflow as our experiment tracking service. - -The mlflow server is expected to be running at: http://127.0.0.1:8080 - -You can run this pipeline by: - python run examples/concepts/experiment_tracking_integration.py -""" - -from pydantic import BaseModel - -from magnus import Pipeline, Task, track_this - - -class EggsModel(BaseModel): - ham: str - - -def emit_metrics(): - """ - A function that populates experiment tracker with metrics. - - track_this can take many keyword arguments. - Nested structures are supported by pydantic models. - """ - track_this(spam="hello", eggs=EggsModel(ham="world")) - track_this(is_it_true=False) - - track_this(answer=0.0) - track_this(step=1, answer=20.0) - track_this(step=2, answer=40.0) - track_this(step=3, answer=60.0) - - -def main(): - metrics = Task( - name="Emit Metrics", - command="examples.concepts.experiment_tracking_integration.emit_metrics", - terminate_with_success=True, - ) - - pipeline = Pipeline( - steps=[metrics], - start_at=metrics, - add_terminal_nodes=True, - ) - - pipeline.execute(configuration_file="examples/configs/mlflow-config.yaml", tag="demo-magnus") - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/experiment_tracking_step.py b/examples/concepts/experiment_tracking_step.py deleted file mode 100644 index f693c6fb..00000000 --- a/examples/concepts/experiment_tracking_step.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -A simple example of using experiment tracking service to track experiments. -In this example, we are emitting metrics per step. - -You can run this pipeline by: - python run examples/concepts/experiment_tracking_step.py -""" - -from pydantic import BaseModel - -from magnus import Pipeline, Task, track_this - - -class EggsModel(BaseModel): - ham: str - - -def emit_metrics(): - """ - A function that populates experiment tracker with metrics. - - track_this can take many keyword arguments. - Nested structures are supported by pydantic models. - """ - track_this(step=0, spam="hello", eggs=EggsModel(ham="world")) - track_this(step=1, spam="hey", eggs=EggsModel(ham="universe")) - track_this(answer=42.0) - track_this(is_it_true=False) - - -def main(): - metrics = Task( - name="Emit Metrics", - command="examples.concepts.experiment_tracking_step.emit_metrics", - terminate_with_success=True, - ) - - pipeline = Pipeline( - steps=[metrics], - start_at=metrics, - add_terminal_nodes=True, - ) - - pipeline.execute() # (1) - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/map.py b/examples/concepts/map.py index 769ff2fb..97b86efc 100644 --- a/examples/concepts/map.py +++ b/examples/concepts/map.py @@ -5,17 +5,13 @@ The start_index argument for the function process_chunk is dynamically set by iterating over chunks. If the argument start_index is not provided, you can still access the current value by -MAGNUS_MAP_VARIABLE environment variable. The environment variable MAGNUS_MAP_VARIABLE +runnable_MAP_VARIABLE environment variable. The environment variable runnable_MAP_VARIABLE is a dictionary with keys as iterate_as. Run this pipeline by: python examples/concepts/map.py """ -from typing import List - -from pydantic import create_model - def chunk_files(): """ @@ -25,11 +21,12 @@ def chunk_files(): Set the parameter "stride" to be the number of files to execute per batch. """ - return create_model( - "DynamicModel", - chunks=(List[int], list(range(0, 50, 10))), - stride=(int, 10), - )() + return 10, list(range(0, 50, 10)) + # create_model( + # "DynamicModel", + # chunks=(List[int], list(range(0, 50, 10))), + # stride=(int, 10), + # )() def process_chunk(stride: int, start_index: int): @@ -54,17 +51,21 @@ def main(): process_chunk(chunks.stride, start_index) """ - from magnus import Map, Pipeline, Task + from runnable import Map, Pipeline, PythonTask - execute = Task( + execute = PythonTask( name="execute", - command="examples.concepts.map.process_chunk", + function=process_chunk, terminate_with_success=True, ) - execute_branch = Pipeline(steps=[execute], start_at=execute, add_terminal_nodes=True) + execute_branch = Pipeline(steps=[execute], add_terminal_nodes=True) - generate = Task(name="chunk files", command="examples.concepts.map.chunk_files") + generate = PythonTask( + name="chunk files", + function=chunk_files, + returns=["stride", "chunks"], + ) iterate_and_execute = Map( name="iterate and execute", branch=execute_branch, @@ -73,12 +74,12 @@ def main(): terminate_with_success=True, ) - generate >> iterate_and_execute - - pipeline = Pipeline(steps=[generate, iterate_and_execute], start_at=generate, add_terminal_nodes=True) + pipeline = Pipeline(steps=[generate, iterate_and_execute], add_terminal_nodes=True) _ = pipeline.execute(configuration_file="examples/configs/fs-catalog-run_log.yaml") + return pipeline + if __name__ == "__main__": main() diff --git a/examples/concepts/map.yaml b/examples/concepts/map.yaml index b997e443..cee8e0b7 100644 --- a/examples/concepts/map.yaml +++ b/examples/concepts/map.yaml @@ -12,13 +12,18 @@ dag: parameter name per chunk is set to be "start_index". Run this example by: - magnus execute -f examples/concepts/map.yaml + runnable execute -f examples/concepts/map.yaml start_at: chunk files steps: chunk files: type: task command_type: python command: "examples.concepts.map.chunk_files" + returns: + - name: stride + kind: json + - name: chunks + kind: json next: iterate and execute iterate and execute: type: map diff --git a/examples/concepts/map_returns.py b/examples/concepts/map_returns.py new file mode 100644 index 00000000..07a530fb --- /dev/null +++ b/examples/concepts/map_returns.py @@ -0,0 +1,94 @@ +""" +An example pipeline of using "map" to process a sequence of nodes repeatedly over a iterable +parameter. + +The start_index argument for the function process_chunk is dynamically set by iterating over chunks. + +If the argument start_index is not provided, you can still access the current value by +runnable_MAP_VARIABLE environment variable. The environment variable runnable_MAP_VARIABLE +is a dictionary with keys as iterate_as. + +Run this pipeline by: + python examples/concepts/map.py +""" + + +def chunk_files(): + """ + Identify the number of chunks and files to execute per batch. + + Set the parameter "chunks" to be the start indexes of batch. + Set the parameter "stride" to be the number of files to + execute per batch. + """ + return 10, list(range(0, 50, 10)) + + +def process_chunk(stride: int, start_index: int): + """ + The function processes a chunk of files. + The files between the start_index and the start_index + stride + are processed per chunk. + """ + for i in range(start_index, start_index + stride, stride): + pass + + return f"processed {start_index} to {start_index + stride}" + + +def read_processed_chunk(processed: str): + print(processed) + + +def main(): + """ + The pythonic equivalent of the following pipeline. + + chunks = chunk_files() + + for start_index in chunks.chunks: + process_chunk(chunks.stride, start_index) + + """ + from runnable import Map, Pipeline, PythonTask + + execute = PythonTask( + name="execute", + function=process_chunk, + returns=["processed"], + ) + + read_chunk = PythonTask( + name="read processed chunk", + function=read_processed_chunk, + terminate_with_success=True, + ) + + execute_branch = Pipeline( + steps=[execute, read_chunk], + add_terminal_nodes=True, + ) + + generate = PythonTask( + name="chunk files", + function=chunk_files, + returns=["stride", "chunks"], + ) + iterate_and_execute = Map( + name="iterate and execute", + branch=execute_branch, + iterate_on="chunks", # iterate on chunks parameter set by execute step + iterate_as="start_index", # expose the current start_index as the iterate_as parameter + reducer="lambda *x: [f'reduced {y}' for y in x]", + terminate_with_success=True, + ) + + pipeline = Pipeline(steps=[generate, iterate_and_execute], add_terminal_nodes=True) + + _ = pipeline.execute(configuration_file="examples/configs/fs-catalog-run_log.yaml") + + return pipeline + + +if __name__ == "__main__": + main() diff --git a/examples/concepts/map_shell.yaml b/examples/concepts/map_shell.yaml index 64fe8f6e..1037b31f 100644 --- a/examples/concepts/map_shell.yaml +++ b/examples/concepts/map_shell.yaml @@ -12,19 +12,24 @@ dag: parameter name per chunk is set to be "start_index". The shell script can access the start_index as - MAGNUS_MAP_VARIABLE environment variable. + runnable_MAP_VARIABLE environment variable. Run this pipeline by: - magnus execute -f examples/concepts/map_shell.yaml + runnable execute -f examples/concepts/map_shell.yaml start_at: chunk files steps: chunk files: type: task command_type: shell + returns: + - name: stride + kind: json + - name: chunks + kind: json command: | - export MAGNUS_PRM_stride=10 && - export MAGNUS_PRM_chunks="[0, 10, 20, 30, 40]" + export stride=10 && + export chunks="[0, 10, 20, 30, 40]" next: iterate and execute iterate and execute: type: map @@ -38,9 +43,9 @@ dag: type: task command_type: shell command: | - echo $MAGNUS_PRM_stride - echo $MAGNUS_MAP_VARIABLE - # prints 10 and {"start_index": } + echo stride + echo start_index + # prints 10 and 0, 10, 20, 30, 40 next: success success: type: success diff --git a/examples/concepts/nesting.py b/examples/concepts/nesting.py index 8e2b0fea..7d1b85df 100644 --- a/examples/concepts/nesting.py +++ b/examples/concepts/nesting.py @@ -9,22 +9,17 @@ from typing import List -from pydantic import create_model - -from magnus import Map, Parallel, Pipeline, Stub, Task +from runnable import Map, Parallel, Pipeline, PythonTask, Stub def generate_list(): - return create_model( - "DynamicModel", - array=(List[int], list(range(2))), - )() + return List[int], list(range(2)) def main(): stub = Stub(name="executable", terminate_with_success=True) # A dummy pipeline that does nothing interesting - stubbed_pipeline = Pipeline(steps=[stub], start_at=stub, add_terminal_nodes=True) + stubbed_pipeline = Pipeline(steps=[stub], add_terminal_nodes=True) # A map step that executes the stubbed pipeline dynamically # This step represents 2 parallel workflows when executed. @@ -37,7 +32,7 @@ def main(): ) # A pipeline with map state. - map_pipeline = Pipeline(steps=[inner_most_map], start_at=inner_most_map, add_terminal_nodes=True) + map_pipeline = Pipeline(steps=[inner_most_map], add_terminal_nodes=True) # A parallel step that executes a map_pipeline and stubbed pipeline # By nesting a map within the parallel step, the total number of workflows is 4 (2 X 2 = 4) @@ -48,9 +43,9 @@ def main(): ) # A pipeline with one nested parallel step - nested_parallel_pipeline = Pipeline(steps=[nested_parallel], start_at=nested_parallel, add_terminal_nodes=True) + nested_parallel_pipeline = Pipeline(steps=[nested_parallel], add_terminal_nodes=True) - list_generator = Task(name="generate list", command="examples.concepts.nesting.generate_list") + list_generator = PythonTask(name="generate list", function=generate_list, returns=["array"]) # A map step that iterates over array and executes nested_parallel_pipeline # The total number of workflows is 50 by this time (2 X 2 X 2 = 8) @@ -62,12 +57,12 @@ def main(): terminate_with_success=True, ) - list_generator >> outer_most_map - - root_pipeline = Pipeline(steps=[list_generator, outer_most_map], start_at=list_generator, add_terminal_nodes=True) + root_pipeline = Pipeline(steps=[list_generator, outer_most_map], add_terminal_nodes=True) _ = root_pipeline.execute(configuration_file="examples/configs/fs-catalog-run_log.yaml") + return root_pipeline + if __name__ == "__main__": main() diff --git a/examples/concepts/nesting.yaml b/examples/concepts/nesting.yaml index b8ced8d0..93f594f9 100644 --- a/examples/concepts/nesting.yaml +++ b/examples/concepts/nesting.yaml @@ -3,14 +3,17 @@ dag: An example of nesting pipelines within pipelines. Run this pipeline by: - magnus execute -f example/concepts/nesting.yaml + runnable execute -f examples/concepts/nesting.yaml start_at: generate_list steps: generate_list: type: task command_type: shell - command: export MAGNUS_PRM_array="[0, 1]" + returns: + - name: array + kind: json + command: export array="[0, 1]" next: outer most map outer most map: type: map diff --git a/examples/concepts/notebook_api_parameters.ipynb b/examples/concepts/notebook_api_parameters.ipynb deleted file mode 100644 index 1ee97a19..00000000 --- a/examples/concepts/notebook_api_parameters.ipynb +++ /dev/null @@ -1,90 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "4377a9c8", - "metadata": {}, - "outputs": [], - "source": [ - "from pydantic import BaseModel" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", - "metadata": {}, - "outputs": [], - "source": [ - "from magnus import get_parameter, set_parameter\n", - "\n", - "class EggsModel(BaseModel):\n", - " ham: str\n", - "\n", - "\n", - "class EverythingModel(BaseModel):\n", - " spam: str\n", - " eggs: EggsModel" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e04f11a", - "metadata": {}, - "outputs": [], - "source": [ - "eggs = get_parameter(\"eggs\", cast_as=EggsModel)\n", - "spam = get_parameter(\"spam\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", - "metadata": {}, - "outputs": [], - "source": [ - "def modify_from_native(spam: str, eggs: EggsModel):\n", - " \"\"\"\n", - " Access initial parameters by the keys.\n", - " Type annotation helps in casting to the right model type.\n", - " \"\"\"\n", - " print(spam) # as returned by native parameters notebook\n", - " \">>> World\"\n", - " print(eggs) # as returned by native parameters notebook\n", - " \">>> ham='No, Thank you!!'\"\n", - "\n", - " \n", - " spam =\"Universe?\"\n", - " eggs = EggsModel(ham=\"Maybe, one more!!\")\n", - " set_parameter(spam=spam, eggs=eggs)\n", - "\n", - "\n", - "modified = modify_from_native(spam=spam, eggs=eggs)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/concepts/notebook_api_parameters.yaml b/examples/concepts/notebook_api_parameters.yaml deleted file mode 100644 index 04836c0d..00000000 --- a/examples/concepts/notebook_api_parameters.yaml +++ /dev/null @@ -1,26 +0,0 @@ -dag: - description: | - This is a sample pipeline with two steps that executes a notebook. - - The notebook is executed in the same environment as the current - project, you can import any module that was installed for the project. - - You can run this pipeline as: - magnus execute -f examples/concepts/notebook_api_parameters.yaml -p examples/concepts/parameters.yaml - - start_at: native notebook - steps: - native notebook: - type: task - command_type: notebook - command: examples/concepts/notebook_native_parameters.ipynb - next: api notebook - api notebook: - type: task - command_type: notebook - command: examples/concepts/notebook_api_parameters.ipynb - next: success - success: - type: success - fail: - type: fail diff --git a/examples/concepts/notebook_api_parameters_out.ipynb b/examples/concepts/notebook_api_parameters_out.ipynb deleted file mode 100644 index 3d4446c5..00000000 --- a/examples/concepts/notebook_api_parameters_out.ipynb +++ /dev/null @@ -1,139 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "1b6e62ed", - "metadata": { - "ploomber": { - "timestamp_end": 1707918790.255937, - "timestamp_start": 1707918790.255617 - }, - "tags": [ - "injected-parameters" - ] - }, - "outputs": [], - "source": [ - "# Injected parameters\n", - "spam = \"World\"\n", - "eggs = {\"ham\": \"No, Thank you!!\"}\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "4377a9c8", - "metadata": { - "ploomber": { - "timestamp_end": 1707918790.256169, - "timestamp_start": 1707918790.25596 - } - }, - "outputs": [], - "source": [ - "from pydantic import BaseModel" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", - "metadata": { - "ploomber": { - "timestamp_end": 1707918790.257516, - "timestamp_start": 1707918790.256187 - } - }, - "outputs": [], - "source": [ - "from magnus import get_parameter, set_parameter\n", - "\n", - "class EggsModel(BaseModel):\n", - " ham: str\n", - "\n", - "\n", - "class EverythingModel(BaseModel):\n", - " spam: str\n", - " eggs: EggsModel" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "0e04f11a", - "metadata": { - "ploomber": { - "timestamp_end": 1707918790.257781, - "timestamp_start": 1707918790.257534 - } - }, - "outputs": [], - "source": [ - "eggs = get_parameter(\"eggs\", cast_as=EggsModel)\n", - "spam = get_parameter(\"spam\")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", - "metadata": { - "ploomber": { - "timestamp_end": 1707918790.25824, - "timestamp_start": 1707918790.257795 - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "World\n", - "ham='No, Thank you!!'\n" - ] - } - ], - "source": [ - "def modify_from_native(spam: str, eggs: EggsModel):\n", - " \"\"\"\n", - " Access initial parameters by the keys.\n", - " Type annotation helps in casting to the right model type.\n", - " \"\"\"\n", - " print(spam) # as returned by native parameters notebook\n", - " \">>> World\"\n", - " print(eggs) # as returned by native parameters notebook\n", - " \">>> ham='No, Thank you!!'\"\n", - "\n", - " \n", - " spam =\"Universe?\"\n", - " eggs = EggsModel(ham=\"Maybe, one more!!\")\n", - " set_parameter(spam=spam, eggs=eggs)\n", - "\n", - "\n", - "modified = modify_from_native(spam=spam, eggs=eggs)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/concepts/notebook_env_parameters.ipynb b/examples/concepts/notebook_env_parameters.ipynb deleted file mode 100644 index cd230c69..00000000 --- a/examples/concepts/notebook_env_parameters.ipynb +++ /dev/null @@ -1,86 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "4377a9c8", - "metadata": {}, - "outputs": [], - "source": [ - "from pydantic import BaseModel\n", - "import os\n", - "import json" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", - "metadata": {}, - "outputs": [], - "source": [ - "class EggsModel(BaseModel):\n", - " ham: str\n", - "\n", - "\n", - "class EverythingModel(BaseModel):\n", - " spam: str\n", - " eggs: EggsModel" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e04f11a", - "metadata": {}, - "outputs": [], - "source": [ - "spam = os.environ[\"MAGNUS_PRM_spam\"]\n", - "eggs = EggsModel.model_validate_json(os.environ[\"MAGNUS_PRM_eggs\"])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", - "metadata": {}, - "outputs": [], - "source": [ - "def modify_from_native(spam: str, eggs: EggsModel):\n", - " print(spam) # as returned by native parameters notebook\n", - " \">>> World\"\n", - " print(eggs) # as returned by native parameters notebook\n", - " \">>> ham='No, Thank you!!'\"\n", - "\n", - " \n", - " os.environ[\"MAGNUS_PRM_spam\"] = \"Universe\"\n", - " eggs = EggsModel(ham=\"Maybe, one more!!\")\n", - " os.environ[\"MAGNUS_PRM_eggs\"] = json.dumps(eggs.model_dump(by_alias=True))\n", - "\n", - "\n", - "modified = modify_from_native(spam=spam, eggs=eggs)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/concepts/notebook_env_parameters.yaml b/examples/concepts/notebook_env_parameters.yaml deleted file mode 100644 index 0df1d105..00000000 --- a/examples/concepts/notebook_env_parameters.yaml +++ /dev/null @@ -1,26 +0,0 @@ -dag: - description: | - This is a sample pipeline with two steps that executes a notebook. - - The notebook is executed in the same environment as the current - project, you can import any module that was installed for the project. - - You can run this pipeline as: - magnus execute -f examples/concepts/notebook_env_parameters.yaml -p examples/concepts/parameters.yaml - - start_at: native notebook - steps: - native notebook: - type: task - command_type: notebook - command: examples/concepts/notebook_native_parameters.ipynb - next: api notebook - api notebook: - type: task - command_type: notebook - command: examples/concepts/notebook_env_parameters.ipynb - next: success - success: - type: success - fail: - type: fail diff --git a/examples/concepts/notebook_native_parameters.ipynb b/examples/concepts/notebook_native_parameters.ipynb index 4d9f7833..33a29b75 100644 --- a/examples/concepts/notebook_native_parameters.ipynb +++ b/examples/concepts/notebook_native_parameters.ipynb @@ -1,111 +1,88 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", - "metadata": {}, - "outputs": [], - "source": [ - "from pydantic import BaseModel\n", - "\n", - "class EggsModel(BaseModel):\n", - " ham: str\n", - "\n", - "\n", - "class EverythingModel(BaseModel):\n", - " spam: str\n", - " eggs: EggsModel" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7f0aab2", - "metadata": { - "tags": [ - "parameters" - ] - }, - "outputs": [], - "source": [ - "# Note the tag parameters\n", - "spam = \"Change me\" \n", - "eggs =\"Change me\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e04f11a", - "metadata": {}, - "outputs": [], - "source": [ - "eggs = EggsModel.model_validate(eggs) # Cast the dict to EggsModel object" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", - "metadata": {}, - "outputs": [], - "source": [ - "def modify_initial(spam: str, eggs: EggsModel):\n", - " \"\"\"\n", - " Access initial parameters by the keys.\n", - " Type annotation helps in casting to the right model type.\n", - " \"\"\"\n", - " print(spam)\n", - " \">>> Hello\"\n", - " print(eggs)\n", - " \">>> ham='Yes, please!!'\"\n", - "\n", - " \n", - " spam =\"World\"\n", - " eggs = EggsModel(ham=\"No, Thank you!!\")\n", - " return EverythingModel(spam=spam, eggs=eggs)\n", - "\n", - "\n", - "modified = modify_initial(spam=spam, eggs=eggs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a58a4492", - "metadata": { - "tags": [ - "magnus_output" - ] - }, - "outputs": [], - "source": [ - "\n", - "# Note the tag magnus_output\n", - "print(modified.model_dump(by_alias=True))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", + "metadata": {}, + "outputs": [], + "source": [ + "from pydantic import BaseModel\n", + "\n", + "\n", + "class EggsModel(BaseModel):\n", + " ham: str\n", + "\n", + "\n", + "class EverythingModel(BaseModel):\n", + " spam: str\n", + " eggs: EggsModel\n", + "\n", + "\n", + "\n", + "class CustomObject:\n", + " def __init__(self, value):\n", + " self.value = 42" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7f0aab2", + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# Note the tag parameters\n", + "spam = \"Change me\" \n", + "eggs =\"Change me\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e04f11a", + "metadata": {}, + "outputs": [], + "source": [ + "eggs = EggsModel.model_validate(eggs) # Cast the dict to EggsModel object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", + "metadata": {}, + "outputs": [], + "source": [ + "spam =\"World\"\n", + "eggs = EggsModel(ham=\"No, Thank you!!\")\n", + "custom = CustomObject(value=42)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/examples/concepts/notebook_native_parameters.yaml b/examples/concepts/notebook_native_parameters.yaml index e4d08b72..0144d119 100644 --- a/examples/concepts/notebook_native_parameters.yaml +++ b/examples/concepts/notebook_native_parameters.yaml @@ -3,14 +3,14 @@ dag: This is a sample pipeline with one step that executes a notebook. The step name "notebook" has the "command_type" to be notebook to - let magnus know to execute a notebook while the command is the + let runnable know to execute a notebook while the command is the path to the notebook relative to the project root. The notebook is executed in the same environment as the current project, you can import any module that was installed for the project. You can run this pipeline as: - magnus execute -f examples/concepts/notebook_native_parameters.yaml -p examples/concepts/parameters.yaml + runnable execute -f examples/concepts/notebook_native_parameters.yaml -p examples/concepts/parameters.yaml start_at: notebook steps: @@ -18,6 +18,18 @@ dag: type: task command_type: notebook command: examples/concepts/notebook_native_parameters.ipynb + returns: + - name: spam + kind: json + - name: eggs + kind: json + - name: custom + kind: object + next: consume_notebook + consume_notebook: + type: task + command_type: notebook + command: examples/concepts/notebook_native_parameters_consume.ipynb next: success success: type: success diff --git a/examples/concepts/notebook_native_parameters_consume.ipynb b/examples/concepts/notebook_native_parameters_consume.ipynb new file mode 100644 index 00000000..ae5ca22e --- /dev/null +++ b/examples/concepts/notebook_native_parameters_consume.ipynb @@ -0,0 +1,82 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", + "metadata": {}, + "outputs": [], + "source": [ + "from pydantic import BaseModel\n", + "\n", + "\n", + "class EggsModel(BaseModel):\n", + " ham: str\n", + "\n", + "\n", + "class EverythingModel(BaseModel):\n", + " spam: str\n", + " eggs: EggsModel\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7f0aab2", + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# Note the tag parameters\n", + "spam = \"Change me\" \n", + "eggs =\"Change me\"\n", + "custom = \"change me\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e04f11a", + "metadata": {}, + "outputs": [], + "source": [ + "eggs = EggsModel.model_validate(eggs) # Cast the dict to EggsModel object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", + "metadata": {}, + "outputs": [], + "source": [ + "spam =\"World\"\n", + "eggs = EggsModel(ham=\"No, Thank you!!\")\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/concepts/notebook_env_parameters_out.ipynb b/examples/concepts/notebook_native_parameters_consume_out.ipynb similarity index 54% rename from examples/concepts/notebook_env_parameters_out.ipynb rename to examples/concepts/notebook_native_parameters_consume_out.ipynb index 52d75e9d..16c8e6ad 100644 --- a/examples/concepts/notebook_env_parameters_out.ipynb +++ b/examples/concepts/notebook_native_parameters_consume_out.ipynb @@ -3,59 +3,66 @@ { "cell_type": "code", "execution_count": 1, - "id": "91076f2e", + "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", "metadata": { "ploomber": { - "timestamp_end": 1707918790.591766, - "timestamp_start": 1707918790.591444 - }, - "tags": [ - "injected-parameters" - ] + "timestamp_end": 1712673802.476133, + "timestamp_start": 1712673802.474048 + } }, "outputs": [], "source": [ - "# Injected parameters\n", - "spam = \"World\"\n", - "eggs = {\"ham\": \"No, Thank you!!\"}\n" + "from pydantic import BaseModel\n", + "\n", + "\n", + "class EggsModel(BaseModel):\n", + " ham: str\n", + "\n", + "\n", + "class EverythingModel(BaseModel):\n", + " spam: str\n", + " eggs: EggsModel\n" ] }, { "cell_type": "code", "execution_count": 2, - "id": "4377a9c8", + "id": "e7f0aab2", "metadata": { "ploomber": { - "timestamp_end": 1707918790.591986, - "timestamp_start": 1707918790.591793 - } + "timestamp_end": 1712673802.476318, + "timestamp_start": 1712673802.476155 + }, + "tags": [ + "parameters" + ] }, "outputs": [], "source": [ - "from pydantic import BaseModel\n", - "import os\n", - "import json" + "# Note the tag parameters\n", + "spam = \"Change me\" \n", + "eggs =\"Change me\"\n", + "custom = \"change me\"" ] }, { "cell_type": "code", "execution_count": 3, - "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", + "id": "143149bb", "metadata": { "ploomber": { - "timestamp_end": 1707918790.593172, - "timestamp_start": 1707918790.592001 - } + "timestamp_end": 1712673802.476461, + "timestamp_start": 1712673802.476332 + }, + "tags": [ + "injected-parameters" + ] }, "outputs": [], "source": [ - "class EggsModel(BaseModel):\n", - " ham: str\n", - "\n", - "\n", - "class EverythingModel(BaseModel):\n", - " spam: str\n", - " eggs: EggsModel" + "# Injected parameters\n", + "spam = \"World\"\n", + "eggs = {\"ham\": \"No, Thank you!!\"}\n" ] }, { @@ -64,14 +71,13 @@ "id": "0e04f11a", "metadata": { "ploomber": { - "timestamp_end": 1707918790.593867, - "timestamp_start": 1707918790.593187 + "timestamp_end": 1712673802.476606, + "timestamp_start": 1712673802.476473 } }, "outputs": [], "source": [ - "spam = os.environ[\"MAGNUS_PRM_spam\"]\n", - "eggs = EggsModel.model_validate_json(os.environ[\"MAGNUS_PRM_eggs\"])" + "eggs = EggsModel.model_validate(eggs) # Cast the dict to EggsModel object" ] }, { @@ -80,34 +86,14 @@ "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", "metadata": { "ploomber": { - "timestamp_end": 1707918790.594819, - "timestamp_start": 1707918790.593886 + "timestamp_end": 1712673802.476742, + "timestamp_start": 1712673802.476618 } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\"World\"\n", - "ham='No, Thank you!!'\n" - ] - } - ], + "outputs": [], "source": [ - "def modify_from_native(spam: str, eggs: EggsModel):\n", - " print(spam) # as returned by native parameters notebook\n", - " \">>> World\"\n", - " print(eggs) # as returned by native parameters notebook\n", - " \">>> ham='No, Thank you!!'\"\n", - "\n", - " \n", - " os.environ[\"MAGNUS_PRM_spam\"] = \"Universe\"\n", - " eggs = EggsModel(ham=\"Maybe, one more!!\")\n", - " os.environ[\"MAGNUS_PRM_eggs\"] = json.dumps(eggs.model_dump(by_alias=True))\n", - "\n", - "\n", - "modified = modify_from_native(spam=spam, eggs=eggs)" + "spam =\"World\"\n", + "eggs = EggsModel(ham=\"No, Thank you!!\")\n" ] } ], diff --git a/examples/concepts/notebook_native_parameters_out.ipynb b/examples/concepts/notebook_native_parameters_out.ipynb index b6382d45..0550af9c 100644 --- a/examples/concepts/notebook_native_parameters_out.ipynb +++ b/examples/concepts/notebook_native_parameters_out.ipynb @@ -6,21 +6,28 @@ "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", "metadata": { "ploomber": { - "timestamp_end": 1707918790.857198, - "timestamp_start": 1707918790.85559 + "timestamp_end": 1712673802.301212, + "timestamp_start": 1712673802.299645 } }, "outputs": [], "source": [ "from pydantic import BaseModel\n", "\n", + "\n", "class EggsModel(BaseModel):\n", " ham: str\n", "\n", "\n", "class EverythingModel(BaseModel):\n", " spam: str\n", - " eggs: EggsModel" + " eggs: EggsModel\n", + "\n", + "\n", + "\n", + "class CustomObject:\n", + " def __init__(self, value):\n", + " self.value = 42" ] }, { @@ -29,8 +36,8 @@ "id": "e7f0aab2", "metadata": { "ploomber": { - "timestamp_end": 1707918790.857372, - "timestamp_start": 1707918790.857225 + "timestamp_end": 1712673802.30143, + "timestamp_start": 1712673802.301298 }, "tags": [ "parameters" @@ -46,11 +53,11 @@ { "cell_type": "code", "execution_count": 3, - "id": "214344ac", + "id": "2aa79db0", "metadata": { "ploomber": { - "timestamp_end": 1707918790.857516, - "timestamp_start": 1707918790.857386 + "timestamp_end": 1712673802.30158, + "timestamp_start": 1712673802.301444 }, "tags": [ "injected-parameters" @@ -69,8 +76,8 @@ "id": "0e04f11a", "metadata": { "ploomber": { - "timestamp_end": 1707918790.857676, - "timestamp_start": 1707918790.857529 + "timestamp_end": 1712673802.301748, + "timestamp_start": 1712673802.301593 } }, "outputs": [], @@ -84,66 +91,15 @@ "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", "metadata": { "ploomber": { - "timestamp_end": 1707918790.858087, - "timestamp_start": 1707918790.857688 - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hello\n", - "ham='Yes, please!!'\n" - ] + "timestamp_end": 1712673802.301934, + "timestamp_start": 1712673802.301761 } - ], - "source": [ - "def modify_initial(spam: str, eggs: EggsModel):\n", - " \"\"\"\n", - " Access initial parameters by the keys.\n", - " Type annotation helps in casting to the right model type.\n", - " \"\"\"\n", - " print(spam)\n", - " \">>> Hello\"\n", - " print(eggs)\n", - " \">>> ham='Yes, please!!'\"\n", - "\n", - " \n", - " spam =\"World\"\n", - " eggs = EggsModel(ham=\"No, Thank you!!\")\n", - " return EverythingModel(spam=spam, eggs=eggs)\n", - "\n", - "\n", - "modified = modify_initial(spam=spam, eggs=eggs)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "a58a4492", - "metadata": { - "ploomber": { - "timestamp_end": 1707918790.858388, - "timestamp_start": 1707918790.858251 - }, - "tags": [ - "magnus_output" - ] }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'spam': 'World', 'eggs': {'ham': 'No, Thank you!!'}}\n" - ] - } - ], + "outputs": [], "source": [ - "\n", - "# Note the tag magnus_output\n", - "print(modified.model_dump(by_alias=True))" + "spam =\"World\"\n", + "eggs = EggsModel(ham=\"No, Thank you!!\")\n", + "custom = CustomObject(value=42)" ] } ], diff --git a/examples/concepts/parallel.py b/examples/concepts/parallel.py index 0443bdc8..8b40596a 100644 --- a/examples/concepts/parallel.py +++ b/examples/concepts/parallel.py @@ -11,7 +11,7 @@ python examples/concepts/parallel.py """ -from magnus import Parallel, Pipeline, Stub +from runnable import Parallel, Pipeline, Stub def main(): @@ -19,13 +19,13 @@ def main(): prepare_xgboost = Stub(name="Prepare for XGBoost") train_xgboost = Stub(name="Train XGBoost", terminate_with_success=True) - prepare_xgboost >> train_xgboost + # prepare_xgboost >> train_xgboost # The pipeline for XGBoost training xgboost = Pipeline( name="XGBoost", steps=[prepare_xgboost, train_xgboost], - start_at=prepare_xgboost, + # start_at=prepare_xgboost, add_terminal_nodes=True, ) @@ -33,7 +33,7 @@ def main(): train_rf = Stub(name="Train RF", terminate_with_success=True) rfmodel = Pipeline( steps=[train_rf], - start_at=train_rf, + # start_at=train_rf, add_terminal_nodes=True, ) @@ -48,17 +48,19 @@ def main(): ensemble_model = Stub(name="Ensemble Modelling") run_inference = Stub(name="Run Inference", terminate_with_success=True) - get_features >> train_models >> ensemble_model >> run_inference + # get_features >> train_models >> ensemble_model >> run_inference # The parent pipeline pipeline = Pipeline( steps=[get_features, train_models, ensemble_model, run_inference], - start_at=get_features, + # start_at=get_features, add_terminal_nodes=True, ) _ = pipeline.execute(configuration_file="examples/configs/fs-catalog-run_log.yaml") + return pipeline + if __name__ == "__main__": main() diff --git a/examples/concepts/parallel.yaml b/examples/concepts/parallel.yaml index dd623966..9fd54690 100644 --- a/examples/concepts/parallel.yaml +++ b/examples/concepts/parallel.yaml @@ -9,7 +9,7 @@ dag: flesh out the steps later. You can run this pipeline by: - magnus execute -f examples/concepts/parallel.yaml + runnable execute -f examples/concepts/parallel.yaml start_at: Get Features steps: Get Features: diff --git a/examples/concepts/simple.py b/examples/concepts/simple.py index b3932846..069c86dd 100644 --- a/examples/concepts/simple.py +++ b/examples/concepts/simple.py @@ -5,7 +5,7 @@ python examples/concepts/simple.py """ -from magnus import Pipeline, Task +from runnable import Pipeline, PythonTask def simple_function(): @@ -16,20 +16,21 @@ def simple_function(): def main(): - simple_task = Task( + simple_task = PythonTask( name="simple", - command="examples.concepts.simple.simple_function", + function=simple_function, terminate_with_success=True, ) pipeline = Pipeline( steps=[simple_task], - start_at=simple_task, add_terminal_nodes=True, ) pipeline.execute() # (1) + return pipeline + if __name__ == "__main__": main() diff --git a/examples/concepts/simple.yaml b/examples/concepts/simple.yaml index 97b241ce..1854c11b 100644 --- a/examples/concepts/simple.yaml +++ b/examples/concepts/simple.yaml @@ -3,7 +3,7 @@ dag: A simple pipeline with a simple function that just prints "Hello World!". Run this pipeline by: - magnus execute -f examples/concepts/simple.yaml + runnable execute -f examples/concepts/simple.yaml start_at: simple steps: simple: diff --git a/examples/concepts/simple_notebook.ipynb b/examples/concepts/simple_notebook.ipynb index 0aa468a5..1f2547b7 100644 --- a/examples/concepts/simple_notebook.ipynb +++ b/examples/concepts/simple_notebook.ipynb @@ -19,7 +19,17 @@ "outputs": [], "source": [ "def multiply(x, y):\n", - " return x * y" + " return x * y\n", + "\n", + "from pydantic import BaseModel\n", + "\n", + "class EggsModel(BaseModel):\n", + " ham: str\n", + "\n", + "\n", + "class ObjectType:\n", + " def __init__(self):\n", + " self.salute = \"hello\"" ] }, { @@ -39,7 +49,9 @@ "metadata": {}, "outputs": [], "source": [ - "b = multiply(2, 100)" + "b = multiply(2, 100)\n", + "\n", + "c = EggsModel(ham=\"hello\")" ] } ], diff --git a/examples/concepts/simple_notebook.yaml b/examples/concepts/simple_notebook.yaml index 69acada8..27f8de77 100644 --- a/examples/concepts/simple_notebook.yaml +++ b/examples/concepts/simple_notebook.yaml @@ -3,20 +3,24 @@ dag: This is a sample pipeline with one step that executes a notebook. The step name "notebook" has the "command_type" to be notebook to - let magnus know to execute a notebook while the command is the + let runnable know to execute a notebook while the command is the path to the notebook relative to the project root. The notebook is executed in the same environment as the current project, you can import any module that was installed for the project. You can run this pipeline as: - magnus execute -f examples/concepts/simple_notebook.yaml + runnable execute -f examples/concepts/simple_notebook.yaml start_at: notebook steps: notebook: type: task command_type: notebook + returns: + - name: a + - name: b + - name: c command: examples/concepts/simple_notebook.ipynb next: success success: diff --git a/examples/concepts/simple_notebook_out.ipynb b/examples/concepts/simple_notebook_out.ipynb index 6517aa9d..2abe5bb4 100644 --- a/examples/concepts/simple_notebook_out.ipynb +++ b/examples/concepts/simple_notebook_out.ipynb @@ -3,11 +3,11 @@ { "cell_type": "code", "execution_count": 1, - "id": "fde0ddbb", + "id": "eda8faba", "metadata": { "ploomber": { - "timestamp_end": 1707918791.855813, - "timestamp_start": 1707918791.855612 + "timestamp_end": 1712673803.931629, + "timestamp_start": 1712673803.931437 }, "tags": [ "injected-parameters" @@ -24,8 +24,8 @@ "id": "3e98e89e-765c-42d4-81ea-c371c2eab14d", "metadata": { "ploomber": { - "timestamp_end": 1707918791.856004, - "timestamp_start": 1707918791.855832 + "timestamp_end": 1712673803.931918, + "timestamp_start": 1712673803.931656 } }, "outputs": [], @@ -40,14 +40,24 @@ "id": "9f1cbac6-cada-42b0-8fb1-ddb25a88836c", "metadata": { "ploomber": { - "timestamp_end": 1707918791.856139, - "timestamp_start": 1707918791.856017 + "timestamp_end": 1712673803.932905, + "timestamp_start": 1712673803.931932 } }, "outputs": [], "source": [ "def multiply(x, y):\n", - " return x * y" + " return x * y\n", + "\n", + "from pydantic import BaseModel\n", + "\n", + "class EggsModel(BaseModel):\n", + " ham: str\n", + "\n", + "\n", + "class ObjectType:\n", + " def __init__(self):\n", + " self.salute = \"hello\"" ] }, { @@ -56,8 +66,8 @@ "id": "9dcadc93-aa77-4a0a-9465-2e33eef4da44", "metadata": { "ploomber": { - "timestamp_end": 1707918791.856272, - "timestamp_start": 1707918791.856151 + "timestamp_end": 1712673803.933058, + "timestamp_start": 1712673803.932921 } }, "outputs": [], @@ -71,13 +81,15 @@ "id": "7b872cdf-820b-47b5-8f22-15c4b69c8637", "metadata": { "ploomber": { - "timestamp_end": 1707918791.856388, - "timestamp_start": 1707918791.856284 + "timestamp_end": 1712673803.933233, + "timestamp_start": 1712673803.933071 } }, "outputs": [], "source": [ - "b = multiply(2, 100)" + "b = multiply(2, 100)\n", + "\n", + "c = EggsModel(ham=\"hello\")" ] } ], diff --git a/examples/concepts/task_api_parameters.py b/examples/concepts/task_api_parameters.py deleted file mode 100644 index 998dcc27..00000000 --- a/examples/concepts/task_api_parameters.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -An example pipeline of accessing initial parameters and passing parameters between tasks -using the python API. - -You can run this pipeline by: - python examples/concepts/task_api_parameters.py - -""" - -from pydantic import BaseModel - -from magnus import Pipeline, Task, get_parameter, set_parameter - - -class EggsModel(BaseModel): - ham: str - - -class EverythingModel(BaseModel): - spam: str - eggs: EggsModel - - -def modify_initial(): - """ - Access initial parameters by the keys. - """ - spam = get_parameter("spam") - eggs = get_parameter("eggs", cast_as=EggsModel) - print(spam) - ">>> Hello" - print(eggs) - ">>> ham='Yes, please!!'" - - # modify parameters - set_parameter(spam="World", eggs=EggsModel(ham="No, Thank you!!")) - - -def consume(): - """ - Access only a subset of the parameters. - """ - # the value is set by the modify_initial function. - # Use cast_as to type hint the return value. - eggs = get_parameter("eggs", cast_as=EggsModel) - print(eggs) - ">>> ham='No, Thank you!!'" - - set_parameter(eggs=EggsModel(ham="May be one more!!")) - - -def main(): - modify = Task( - name="Modify", - command="examples.concepts.task_api_parameters.modify_initial", - ) - - consume = Task( - name="Consume", - command="examples.concepts.task_api_parameters.consume", - terminate_with_success=True, - ) - - modify >> consume - - pipeline = Pipeline( - steps=[modify, consume], - start_at=modify, - add_terminal_nodes=True, - ) - pipeline.execute(parameters_file="examples/concepts/parameters.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/task_env_parameters.py b/examples/concepts/task_env_parameters.py deleted file mode 100644 index 0fc1d409..00000000 --- a/examples/concepts/task_env_parameters.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -An example pipeline of accessing initial parameters and passing parameters between tasks -using environment variables. - -You can run this pipeline by: - python examples/concepts/task_env_parameters.py - -""" - -import json -import os - -from pydantic import BaseModel - -from magnus import Pipeline, Task - - -class EggsModel(BaseModel): - ham: str - - -class EverythingModel(BaseModel): - spam: str - eggs: EggsModel - - -def modify_initial(): - """ - Access initial parameters by the keys. - """ - spam = os.environ["MAGNUS_PRM_spam"] - eggs = EggsModel.model_validate_json(os.environ["MAGNUS_PRM_eggs"]) - print(spam) - ">>> Hello" - print(eggs) - ">>> ham='Yes, please!!'" - - # modify parameters - os.environ["MAGNUS_PRM_spam"] = "World" - os.environ["MAGNUS_PRM_eggs"] = json.dumps(eggs.model_dump(by_alias=True)) - - -def consume(): - """ - Access only a subset of the parameters. - """ - # the value is set by the modify_initial function. - # Use cast_as to type hint the return value. - eggs = EggsModel.model_validate_json(os.environ["MAGNUS_PRM_eggs"]) - print(eggs) - ">>> ham='No, Thank you!!'" - - os.environ["MAGNUS_PRM_eggs"] = json.dumps(EggsModel(ham="May be one more!!").model_dump_json(by_alias=True)) - - -def main(): - modify = Task( - name="Modify", - command="examples.concepts.task_api_parameters.modify_initial", - ) - - consume = Task( - name="Consume", - command="examples.concepts.task_api_parameters.consume", - terminate_with_success=True, - ) - - modify >> consume - - pipeline = Pipeline( - steps=[modify, consume], - start_at=modify, - add_terminal_nodes=True, - ) - pipeline.execute(parameters_file="examples/concepts/parameters.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/task_native_parameters.py b/examples/concepts/task_native_parameters.py deleted file mode 100644 index e5623e3a..00000000 --- a/examples/concepts/task_native_parameters.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -An example pipeline of accessing initial parameters and passing parameters between tasks. - -You can run this pipeline by: - python examples/concepts/task_native_parameters.py - -""" - -from pydantic import BaseModel, create_model - - -class EggsModel(BaseModel): - ham: str - - -class EverythingModel(BaseModel): - spam: str - eggs: EggsModel - - -def modify_initial(spam: str, eggs: EggsModel): - """ - Access initial parameters by the keys. - Type annotation helps in casting to the right model type. - """ - print(spam) - ">>> Hello" - print(eggs) - ">>> ham='Yes, please!!'" - - # Return modified parameters - # Use this pattern to create or modify parameters at the root level. - return EverythingModel(spam="World", eggs=EggsModel(ham="No, Thank you!!")) - - -def consume(eggs: EggsModel): - """ - Access only a subset of the parameters. - """ - # the value is set by the modify_initial function. - print(eggs) - ">>> ham='No, Thank you!!'" - - # Magnus supports only pydantic models as return types. - # You can modify a subset of the parameters by creating a dynamic pydantic model. - # https://docs.pydantic.dev/latest/concepts/models/#dynamic-model-creation - - # CAUTION: Returning "eggs" would result in a new parameter "ham" at the root level - # as it looses the nested structure. - return create_model( - "DynamicModel", - eggs=(EggsModel, EggsModel(ham="May be one more!!")), - )() - - -def main(): - from magnus import Pipeline, Task - - modify = Task( - name="Modify", - command="examples.concepts.task_native_parameters.modify_initial", - ) - - consume = Task( - name="Consume", - command="examples.concepts.task_native_parameters.consume", - terminate_with_success=True, - ) - - modify >> consume - - pipeline = Pipeline( - steps=[modify, consume], - start_at=modify, - add_terminal_nodes=True, - ) - pipeline.execute(parameters_file="examples/concepts/parameters.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/task_shell_parameters.yaml b/examples/concepts/task_shell_parameters.yaml index a014a135..a4b2ff80 100644 --- a/examples/concepts/task_shell_parameters.yaml +++ b/examples/concepts/task_shell_parameters.yaml @@ -7,7 +7,7 @@ dag: The step display_again displays the updated parameters from modify_initial and updates them. You can run this pipeline as: - magnus execute -f examples/concepts/task_shell_parameters.yaml -p examples/concepts/parameters.yaml + runnable execute -f examples/concepts/task_shell_parameters.yaml -p examples/concepts/parameters.yaml start_at: access initial steps: @@ -15,28 +15,33 @@ dag: type: task command_type: shell command: | - env | grep 'MAGNUS_PRM_' - # MAGNUS_PRM_spam="Hello" - # MAGNUS_PRM_eggs={"ham": "Yes, please!!"} + env next: modify initial modify initial: type: task command_type: shell + returns: + - name: spam + kind: json + - name: eggs + kind: json command: | - export MAGNUS_PRM_spam='World' && \ - export MAGNUS_PRM_eggs='{"ham": "No, Thank you!!"}' + export spam='World' + export eggs='{"ham": "No, Thank you!!"}' next: display again display again: type: task command_type: shell + returns: + - name: spam + kind: json + - name: eggs + kind: json + command: | - env | grep 'MAGNUS_PRM_' && \ - export MAGNUS_PRM_spam='Universe' && \ - export MAGNUS_PRM_eggs='{"ham": "Maybe, one more.."}' - # prints - # MAGNUS_PRM_spam="World" - # MAGNUS_PRM_eggs={"ham": "No, Thank you!!"} - # and sets new values + env && \ + export spam='Universe' && \ + export eggs='{"ham": "Maybe, one more.."}' next: success success: type: success diff --git a/examples/concepts/task_shell_simple.yaml b/examples/concepts/task_shell_simple.yaml index 4109fc54..afe55bcc 100644 --- a/examples/concepts/task_shell_simple.yaml +++ b/examples/concepts/task_shell_simple.yaml @@ -3,11 +3,11 @@ dag: This is a sample pipeline with one step that executes a shell command. The step name "shell" has the "command_type" to be shell to - let magnus know to execute a shell while the command is directly + let runnable know to execute a shell while the command is directly executed in the current environment. You can run this pipeline as: - magnus execute -f examples/concepts/task_shell_simple.yaml + runnable execute -f examples/concepts/task_shell_simple.yaml start_at: shell steps: diff --git a/examples/concepts/traversal.py b/examples/concepts/traversal.py deleted file mode 100644 index b648857b..00000000 --- a/examples/concepts/traversal.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -This is a stubbed pipeline that does 4 steps in sequence. -All the steps are mocked and they will just pass through. -Use this pattern to define the skeleton of your pipeline and -flesh out the steps later. - -You can run this pipeline by python run examples/pipelines/traversal.py -""" - -from magnus import Pipeline, Stub - - -def main(): - step_1 = Stub(name="Step 1") - - step_2 = Stub(name="Step 2") - - step_3 = Stub(name="Step 3", terminate_with_success=True) - - # link nodes - step_1 >> step_2 >> step_3 - - """ - or - step_1 << step_2 << step_3 - or - - step_2.depends_on(step_1) - step_3.depends_on(step_2) - """ - - pipeline = Pipeline( - steps=[step_1, step_2, step_3], - start_at=step_1, - add_terminal_nodes=True, - ) - - run_log = pipeline.execute() - print(run_log) - - -if __name__ == "__main__": - main() diff --git a/examples/concepts/traversal.yaml b/examples/concepts/traversal.yaml deleted file mode 100644 index 2b6af4d9..00000000 --- a/examples/concepts/traversal.yaml +++ /dev/null @@ -1,25 +0,0 @@ -dag: - description: | - This is a stubbed pipeline that does 3 steps in sequence. - All the steps are mocked and they will just pass through. - Use this pattern to define the skeleton of your pipeline - and flesh out the steps later. - - You can run this pipeline by: - magnus execute -f examples/pipelines/traversal.yaml - - start_at: step 1 - steps: - step 1: - type: stub - next: step 2 - step 2: - type: stub - next: step 3 - step 3: - type: stub - next: success - success: - type: success - fail: - type: fail diff --git a/examples/configs/argo-config-catalog.yaml b/examples/configs/argo-config-catalog.yaml index 6b58d7ae..800deb63 100644 --- a/examples/configs/argo-config-catalog.yaml +++ b/examples/configs/argo-config-catalog.yaml @@ -4,7 +4,7 @@ executor: image: $argo_docker_image # (2) service_account_name: default-editor persistent_volumes: # (3) - - name: magnus-volume + - name: runnable-volume mount_path: /mnt run_log_store: # (4) diff --git a/examples/configs/argo-config-full.yaml b/examples/configs/argo-config-full.yaml index 911dd88c..12499d57 100644 --- a/examples/configs/argo-config-full.yaml +++ b/examples/configs/argo-config-full.yaml @@ -22,7 +22,7 @@ executor: tolerations: # apply to spec image_pull_policy: "" # apply to template persistent_volumes: # (3) - - name: magnus-volume + - name: runnable-volume mount_path: /mnt # overrides: diff --git a/examples/configs/argo-config-sequential.yaml b/examples/configs/argo-config-sequential.yaml index 9fdf8c13..6bc0853a 100644 --- a/examples/configs/argo-config-sequential.yaml +++ b/examples/configs/argo-config-sequential.yaml @@ -4,7 +4,7 @@ executor: image: $argo_docker_image # (2) service_account_name: default-editor persistent_volumes: # (3) - - name: magnus-volume + - name: runnable-volume mount_path: /mnt overrides: sequential: diff --git a/examples/configs/argo-config.yaml b/examples/configs/argo-config.yaml index b964e0b2..4b508f61 100644 --- a/examples/configs/argo-config.yaml +++ b/examples/configs/argo-config.yaml @@ -1,10 +1,10 @@ executor: type: "argo" # (1) config: - image: magnus:demo # (2) + image: runnable:demo # (2) service_account_name: default-editor persistent_volumes: # (3) - - name: magnus-volume + - name: runnable-volume mount_path: /mnt run_log_store: # (4) @@ -17,6 +17,3 @@ catalog: secrets: type: do-nothing - -experiment_tracker: - type: do-nothing diff --git a/examples/configs/argo-pipeline.yaml b/examples/configs/argo-pipeline.yaml index ffaa437e..2b0f009b 100644 --- a/examples/configs/argo-pipeline.yaml +++ b/examples/configs/argo-pipeline.yaml @@ -1,12 +1,12 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: magnus-dag- + generateName: runnable-dag- annotations: {} labels: {} spec: activeDeadlineSeconds: 172800 - entrypoint: magnus-dag + entrypoint: runnable-dag podGC: strategy: OnPodCompletion retryStrategy: @@ -17,7 +17,7 @@ spec: factor: 2 maxDuration: "3600" templates: - - name: magnus-dag + - name: runnable-dag failFast: true dag: tasks: @@ -29,9 +29,9 @@ spec: depends: simple-task-6mn2ll.Succeeded - name: simple-task-6mn2ll container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - simple @@ -54,9 +54,9 @@ spec: cpu: 250m - name: success-success-0uvo9r container: - image: harbor.csis.astrazeneca.net/mlops/magnus:latest + image: harbor.csis.astrazeneca.net/mlops/runnable:latest command: - - magnus + - runnable - execute_single_node - "{{workflow.parameters.run_id}}" - success @@ -87,4 +87,4 @@ spec: volumes: - name: executor-0 persistentVolumeClaim: - claimName: magnus-volume + claimName: runnable-volume diff --git a/examples/configs/default.yaml b/examples/configs/default.yaml index c29165bb..a1953a1c 100644 --- a/examples/configs/default.yaml +++ b/examples/configs/default.yaml @@ -9,6 +9,3 @@ catalog: secrets: type: do-nothing # (4) - -experiment_tracker: # (5) - type: do-nothing diff --git a/examples/configs/local-container.yaml b/examples/configs/local-container.yaml index 2592a30c..b16589a4 100644 --- a/examples/configs/local-container.yaml +++ b/examples/configs/local-container.yaml @@ -1,7 +1,7 @@ executor: type: "local-container" # (1) config: - docker_image: $default_docker_image # (2) + docker_image: runnable:latest # (2) environment: key: value # (3) @@ -13,6 +13,3 @@ catalog: secrets: type: do-nothing - -experiment_tracker: - type: do-nothing diff --git a/examples/configs/mlflow-config.yaml b/examples/configs/mlflow-config.yaml deleted file mode 100644 index 2b02e58c..00000000 --- a/examples/configs/mlflow-config.yaml +++ /dev/null @@ -1,4 +0,0 @@ -experiment_tracker: - type: mlflow - config: - server_url: http://127.0.0.1:8080 diff --git a/examples/configs/retry-config.yaml b/examples/configs/retry-config.yaml new file mode 100644 index 00000000..7e42e56f --- /dev/null +++ b/examples/configs/retry-config.yaml @@ -0,0 +1,10 @@ +executor: + type: retry + config: + run_id: parallel-fail + +catalog: + type: file-system # (1) + +run_log_store: + type: file-system # (1) diff --git a/examples/configs/secrets-env-ps.yaml b/examples/configs/secrets-env-ps.yaml index 1534ece1..3aaad3bd 100644 --- a/examples/configs/secrets-env-ps.yaml +++ b/examples/configs/secrets-env-ps.yaml @@ -1,4 +1,4 @@ secrets: type: env-secrets-manager config: - prefix: "magnus_" + prefix: "runnable_" diff --git a/examples/contrived.py b/examples/contrived.py deleted file mode 100644 index c1d86fb2..00000000 --- a/examples/contrived.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -This is a stubbed pipeline that does 4 steps in sequence. -All the steps are mocked and they will just pass through. -Use this pattern to define the skeleton of your pipeline and flesh out the steps later. - -You can run this pipeline by python run examples/contrived.py -""" - -from magnus import Pipeline, Stub - - -def main(): - acquire_data = Stub(name="Acquire Data", next="Prepare Data") # (1) - - prepare_data = Stub(name="Prepare Data") - - extract_features = Stub(name="Extract Features").depends_on(prepare_data) - - modelling = Stub(name="Model", terminate_with_success=True) # (2) - - extract_features >> modelling # (3) - - pipeline = Pipeline( - steps=[acquire_data, prepare_data, extract_features, modelling], - start_at=acquire_data, - add_terminal_nodes=True, - ) # (4) - - run_log = pipeline.execute() # (5) - print(run_log) - - -if __name__ == "__main__": - main() diff --git a/examples/contrived.yaml b/examples/contrived.yaml deleted file mode 100644 index 209ddfc3..00000000 --- a/examples/contrived.yaml +++ /dev/null @@ -1,26 +0,0 @@ -dag: - description: | - This is a stubbed pipeline that does 4 steps in sequence. - All the steps are mocked and they will just pass through. - Use this pattern to define the skeleton of your pipeline and flesh out the steps later. - - You can run this pipeline by magnus execute -f examples/contrived.yaml - - start_at: Acquire data - steps: - Acquire data: - type: stub # (1) - next: Prepare data # (2) - Prepare data: - type: stub - next: Extract features - Extract features: - type: stub - next: Model - Model: - type: stub - next: success - success: # (3) - type: success - fail: # (4) - type: fail diff --git a/examples/default-fail.yaml b/examples/default-fail.yaml index 283b480b..7dde5464 100644 --- a/examples/default-fail.yaml +++ b/examples/default-fail.yaml @@ -8,7 +8,7 @@ dag: You can control the flow by using on_failure, please check example/on-failure.yaml - You can run this pipeline by magnus execute -f examples/default-fail.yaml + You can run this pipeline by runnable execute -f examples/default-fail.yaml start_at: step 1 steps: step 1: diff --git a/examples/executors/step_overrides_container.py b/examples/executors/step_overrides_container.py index e29b5b11..b3b45b8c 100644 --- a/examples/executors/step_overrides_container.py +++ b/examples/executors/step_overrides_container.py @@ -10,20 +10,20 @@ You can run this example using two steps: Generates yaml file: - MAGNUS_CONFIGURATION_FILE=examples/executors/local-container-override.yaml \ + runnable_CONFIGURATION_FILE=examples/executors/local-container-override.yaml \ python examples/executors/step_overrides_container.py - # Create the docker image with the pipeline magnus-pipeline.yaml as part of it. + # Create the docker image with the pipeline runnable-pipeline.yaml as part of it. Execute the pipeline using the CLI: - MAGNUS_VAR_default_docker_image=magnus:3.8 \ - MAGNUS_VAR_custom_docker_image=magnus:3.9 \ - magnus execute -f magnus-pipeline.yaml -c examples/executors/local-container-override.yaml + runnable_VAR_default_docker_image=runnable:3.8 \ + runnable_VAR_custom_docker_image=runnable:3.9 \ + runnable execute -f runnable-pipeline.yaml -c examples/executors/local-container-override.yaml """ -from magnus import Pipeline, Task +from runnable import Pipeline, Task def main(): @@ -41,10 +41,7 @@ def main(): overrides={"local-container": "custom_docker_image"}, ) - step1 >> step2 - pipeline = Pipeline( - start_at=step1, steps=[step1, step2], add_terminal_nodes=True, ) diff --git a/examples/executors/step_overrides_container.yaml b/examples/executors/step_overrides_container.yaml index 6ee00502..1ea32c9d 100644 --- a/examples/executors/step_overrides_container.yaml +++ b/examples/executors/step_overrides_container.yaml @@ -9,7 +9,7 @@ dag: in overrides section of executor config. You can execute the pipeline by: - magnus execute -f examples/executors/step_overrides_container.yaml \ + runnable execute -f examples/executors/step_overrides_container.yaml \ -c examples/executors/local-container-override.yaml start_at: step 1 steps: diff --git a/examples/experiment_tracking_api.py b/examples/experiment_tracking_api.py deleted file mode 100644 index 1d88faaf..00000000 --- a/examples/experiment_tracking_api.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -An example pipeline to demonstrate setting experiment tracking metrics - using environment variables. Any environment variable with prefix - 'MAGNUS_TRACK_' will be recorded as a metric captured during the step. - - You can run this pipeline as: - python examples/experiment_tracking_api.py - - The mlflow server is expected to be running at http://127.0.0.1:8080 -""" - - -from pydantic import BaseModel - -from magnus import Pipeline, Task, track_this - - -class EggsModel(BaseModel): - ham: str - - -def emit_metrics(): - """ - A function that populates experiment tracker with metrics. - - track_this can take many keyword arguments. - Nested structures are supported by pydantic models. - """ - track_this(spam="hello", eggs=EggsModel(ham="world")) # (1) - track_this(answer=42.0) - track_this(is_it_true=False) - - -def main(): - metrics = Task( - name="Emit Metrics", - command="examples.experiment_tracking_api.emit_metrics", - terminate_with_success=True, - ) - - pipeline = Pipeline( - steps=[metrics], - start_at=metrics, - add_terminal_nodes=True, - ) - - pipeline.execute(configuration_file="examples/configs/mlflow-config.yaml") # (2) - - -if __name__ == "__main__": - main() diff --git a/examples/experiment_tracking_env.yaml b/examples/experiment_tracking_env.yaml deleted file mode 100644 index 92663515..00000000 --- a/examples/experiment_tracking_env.yaml +++ /dev/null @@ -1,26 +0,0 @@ -dag: - description: | - An example pipeline to demonstrate setting experiment tracking metrics - using environment variables. Any environment variable with prefix - 'MAGNUS_TRACK_' will be recorded as a metric captured during the step. - - You can run this pipeline as: - magnus execute -f examples/concepts/experiment_tracking_env.yaml \ - -c examples/configs/mlflow-config.yaml - - The mlflow server is expected to be running at http://127.0.0.1:8080 - - start_at: shell - steps: - shell: - type: task - command_type: shell - command: | - export MAGNUS_TRACK_spam="hello" - export MAGNUS_TRACK_eggs='{"ham": "world"}' - export MAGNUS_TRACK_answer="42.0" - next: success - success: - type: success - fail: - type: fail diff --git a/examples/functions.py b/examples/functions.py deleted file mode 100644 index 8b8cff97..00000000 --- a/examples/functions.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Utility functions used in examples. -""" - -import logging - -from pydantic import BaseModel - -# Magnus logging levels are different to your logging levels. -logger = logging.getLogger("application") -logger.setLevel(logging.DEBUG) - - -class InnerModel(BaseModel): - """ - A pydantic model representing a group of related parameters. - """ - - foo: int - bar: str - - -class Parameter(BaseModel): - """ - A pydantic model representing the parameters of the whole pipeline. - """ - - x: int - y: InnerModel - - -def return_parameter() -> Parameter: - """ - A example python task that does something interesting and returns - a parameter to be used in downstream steps. - - The annotation of the return type of the function is not mandatory - but it is a good practice. - - Returns: - Parameter: The parameters that should be used in downstream steps. - """ - # Return type of a function should be a pydantic model - return Parameter(x=1, y=InnerModel(foo=10, bar="hello world")) - - -def display_parameter(x: int, y: InnerModel): - """ - An example python task that does something interesting with input parameters. - - Annotating the arguments of the function is important for - magnus to understand the type of parameters you want. - - Without annotations, magnus would return a python dictionary. - - Input args can be a pydantic model or the individual attributes of the non-nested model - """ - print(x) - # >>> prints 1 - print(y) - # >>> prints InnerModel(foo=10, bar="hello world") - logger.info(f"I got a parameter: {x}") - logger.info(f"I got another parameter: {y}") - - -""" -Without any framework, the "driver" code would be the main function. -""" - - -def main(): - """ - This is not required for magnus to run! - """ - my_param = return_parameter() - display_parameter(my_param.x, my_param.y) - - -if __name__ == "__main__": - main() diff --git a/examples/generated-argo-pipeline.yaml b/examples/generated-argo-pipeline.yaml index 290dd5a2..ac8faf20 100644 --- a/examples/generated-argo-pipeline.yaml +++ b/examples/generated-argo-pipeline.yaml @@ -1,30 +1,30 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: magnus-dag- + generateName: runnable-dag- annotations: {} labels: {} spec: activeDeadlineSeconds: 172800 - entrypoint: magnus-dag + entrypoint: runnable-dag podGC: strategy: OnPodCompletion retryStrategy: - limit: '0' + limit: "0" retryPolicy: Always backoff: - duration: '120' + duration: "120" factor: 2 - maxDuration: '3600' + maxDuration: "3600" serviceAccountName: default-editor templates: - - name: magnus-dag + - name: runnable-dag failFast: true dag: tasks: - name: Acquire-data-stub-zl7utt template: Acquire-data-stub-zl7utt - depends: '' + depends: "" - name: Prepare-data-stub-jkn77g template: Prepare-data-stub-jkn77g depends: Acquire-data-stub-zl7utt.Succeeded @@ -39,11 +39,11 @@ spec: depends: Model-stub-42qnma.Succeeded - name: Acquire-data-stub-zl7utt container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - - '{{workflow.parameters.run_id}}' + - "{{workflow.parameters.run_id}}" - Acquire%data - --log-level - WARNING @@ -54,7 +54,7 @@ spec: volumeMounts: - name: executor-0 mountPath: /mnt - imagePullPolicy: '' + imagePullPolicy: "" resources: limits: memory: 1Gi @@ -64,11 +64,11 @@ spec: cpu: 250m - name: Prepare-data-stub-jkn77g container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - - '{{workflow.parameters.run_id}}' + - "{{workflow.parameters.run_id}}" - Prepare%data - --log-level - WARNING @@ -79,7 +79,7 @@ spec: volumeMounts: - name: executor-0 mountPath: /mnt - imagePullPolicy: '' + imagePullPolicy: "" resources: limits: memory: 1Gi @@ -89,11 +89,11 @@ spec: cpu: 250m - name: Extract-features-stub-jdonf3 container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - - '{{workflow.parameters.run_id}}' + - "{{workflow.parameters.run_id}}" - Extract%features - --log-level - WARNING @@ -104,7 +104,7 @@ spec: volumeMounts: - name: executor-0 mountPath: /mnt - imagePullPolicy: '' + imagePullPolicy: "" resources: limits: memory: 1Gi @@ -114,11 +114,11 @@ spec: cpu: 250m - name: Model-stub-42qnma container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - - '{{workflow.parameters.run_id}}' + - "{{workflow.parameters.run_id}}" - Model - --log-level - WARNING @@ -129,7 +129,7 @@ spec: volumeMounts: - name: executor-0 mountPath: /mnt - imagePullPolicy: '' + imagePullPolicy: "" resources: limits: memory: 1Gi @@ -139,11 +139,11 @@ spec: cpu: 250m - name: success-success-mk4nqv container: - image: magnus:demo + image: runnable:demo command: - - magnus + - runnable - execute_single_node - - '{{workflow.parameters.run_id}}' + - "{{workflow.parameters.run_id}}" - success - --log-level - WARNING @@ -154,7 +154,7 @@ spec: volumeMounts: - name: executor-0 mountPath: /mnt - imagePullPolicy: '' + imagePullPolicy: "" resources: limits: memory: 1Gi @@ -168,8 +168,8 @@ spec: arguments: parameters: - name: run_id - value: '{{workflow.uid}}' + value: "{{workflow.uid}}" volumes: - name: executor-0 persistentVolumeClaim: - claimName: magnus-volume + claimName: runnable-volume diff --git a/examples/iris_demo.py b/examples/iris_demo.py new file mode 100644 index 00000000..0283b310 --- /dev/null +++ b/examples/iris_demo.py @@ -0,0 +1,101 @@ +""" +Example of Logistic regression using scikit-learn +https://scikit-learn.org/stable/auto_examples/linear_model/plot_iris_logistic.html +""" + +import matplotlib.pyplot as plt +import numpy as np +from sklearn import datasets +from sklearn.inspection import DecisionBoundaryDisplay +from sklearn.linear_model import LogisticRegression + + +def load_data(): + # import some data to play with + iris = datasets.load_iris() + X = iris.data[:, :2] # we only take the first two features. + Y = iris.target + + return X, Y + + +def model_fit(X: np.ndarray, Y: np.ndarray, C: float = 1e5): + logreg = LogisticRegression(C=C) + logreg.fit(X, Y) + + return logreg + + +def generate_plots(X: np.ndarray, Y: np.ndarray, logreg: LogisticRegression): + _, ax = plt.subplots(figsize=(4, 3)) + DecisionBoundaryDisplay.from_estimator( + logreg, + X, + cmap=plt.cm.Paired, + ax=ax, + response_method="predict", + plot_method="pcolormesh", + shading="auto", + xlabel="Sepal length", + ylabel="Sepal width", + eps=0.5, + ) + + # Plot also the training points + plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors="k", cmap=plt.cm.Paired) + + plt.xticks(()) + plt.yticks(()) + + plt.savefig("iris_logistic.png") + + return 0.6 + + +## Without any orchestration +def main(): + X, Y = load_data() + logreg = model_fit(X, Y, C=1.0) + generate_plots(X, Y, logreg) + + +## With runnable orchestration +def runnable_pipeline(): + # The below code can be anywhere + from runnable import Catalog, Pipeline, PythonTask, metric, pickled + + # X, Y = load_data() + load_data_task = PythonTask( + function=load_data, + name="load_data", + returns=[pickled("X"), pickled("Y")], # (1) + ) + + # logreg = model_fit(X, Y, C=1.0) + model_fit_task = PythonTask( + function=model_fit, + name="model_fit", + returns=[pickled("logreg")], + ) + + # generate_plots(X, Y, logreg) + generate_plots_task = PythonTask( + function=generate_plots, + name="generate_plots", + terminate_with_success=True, + catalog=Catalog(put=["iris_logistic.png"]), # (2) + returns=[metric("score")], + ) + + pipeline = Pipeline( + steps=[load_data_task, model_fit_task, generate_plots_task], + ) # (4) + + pipeline.execute() + + return pipeline + + +if __name__ == "__main__": + # main() + runnable_pipeline() diff --git a/examples/logging.yaml b/examples/logging.yaml deleted file mode 100644 index fdc9f6f0..00000000 --- a/examples/logging.yaml +++ /dev/null @@ -1,29 +0,0 @@ -dag: - description: | - This is a simple pipeline that does 3 steps in sequence. - - step 1 >> step 2 >> success - - Log levels: - magnus log level is different from your application log level. By default, it is set to WARNING but you can - control it by using --log-level while calling application. - - To view only your application logs, you can execute this pipeline with: - magnus execute logging.yaml --log-level ERROR - - start_at: step 1 - steps: - step 1: - type: task - command_type: python - command: examples.functions.return_parameter - next: step 2 - step 2: - type: task - command_type: python - command: examples.functions.display_parameter - next: success - success: - type: success - fail: - type: fail diff --git a/examples/mocking.py b/examples/mocking.py index ffa1fc7a..a7ae7d11 100644 --- a/examples/mocking.py +++ b/examples/mocking.py @@ -13,29 +13,21 @@ python examples/mocking.py """ - -from magnus import Pipeline, Stub +from runnable import Pipeline, Stub def main(): step1 = Stub(name="step1") # (1) - step2 = Stub(name="step2", what="is this thing").depends_on(step1) # (2) + step2 = Stub(name="step2", what="is this thing") step3 = Stub(name="step3", terminate_with_success=True) # (3) - step2 >> step3 - """ - Equivalents: - step3.depends_on(step2) - step3 << step2 - - Choose the definition that you prefer - """ - - pipeline = Pipeline(start_at=step1, steps=[step1, step2, step3], add_terminal_nodes=True) # (4) + pipeline = Pipeline(steps=[step1, step2, step3], add_terminal_nodes=True) # (4) pipeline.execute() + return pipeline + if __name__ == "__main__": main() diff --git a/examples/mocking.yaml b/examples/mocking.yaml index fe27f2b8..d4921f2d 100644 --- a/examples/mocking.yaml +++ b/examples/mocking.yaml @@ -11,7 +11,7 @@ dag: to mock steps within mature pipelines. You can run this pipeline by: - magnus execute -f examples/mocking.yaml + runnable execute -f examples/mocking.yaml start_at: step 1 steps: step 1: diff --git a/examples/on-failure.yaml b/examples/on-failure.yaml index 7249c695..02ae42d8 100644 --- a/examples/on-failure.yaml +++ b/examples/on-failure.yaml @@ -10,7 +10,7 @@ dag: step 1 (FAIL) >> step 3 >> success - You can run this pipeline by magnus execute -f examples/on-failure.yaml + You can run this pipeline by runnable execute -f examples/on-failure.yaml start_at: step 1 steps: step 1: diff --git a/examples/on_failure.py b/examples/on_failure.py index 40c96f4b..b8464e04 100644 --- a/examples/on_failure.py +++ b/examples/on_failure.py @@ -13,26 +13,26 @@ python examples/on_failure.py """ -from magnus import Pipeline, Stub, Task +from runnable import Pipeline, ShellTask, Stub def main(): - step_1 = Task(name="step 1", command="exit 1", command_type="shell") + step_1 = ShellTask(name="step 1", command="exit 1") # This will fail + step_2 = Stub(name="step 2") step_3 = Stub(name="step 3", terminate_with_success=True) step_1.on_failure = step_3.name - step_1 >> step_2 >> step_3 - pipeline = Pipeline( steps=[step_1, step_2, step_3], - start_at=step_1, add_terminal_nodes=True, ) pipeline.execute() + return pipeline + if __name__ == "__main__": main() diff --git a/examples/parallel-fail.yaml b/examples/parallel-fail.yaml index e622d9e2..589a9710 100644 --- a/examples/parallel-fail.yaml +++ b/examples/parallel-fail.yaml @@ -6,7 +6,7 @@ dag: Note that the branches schema is same as dag schema. You can run this pipeline by: - magnus execute -f examples/parallel-fail.yaml + runnable execute -f examples/parallel-fail.yaml start_at: step 1 steps: step 1: diff --git a/examples/parameters.py b/examples/parameters.py index 1c7aeb20..d039fa83 100644 --- a/examples/parameters.py +++ b/examples/parameters.py @@ -9,6 +9,8 @@ """ +from typing import Tuple + from pydantic import BaseModel @@ -34,57 +36,48 @@ class NestedModel(BaseModel): # (1) def display(simple: int, inner: InnerModel): # (2) """ The parameter "simple" and "inner" can be accessed by name. - Magnus understands the parameter "inner" as a pydantic model from + runnable understands the parameter "inner" as a pydantic model from annotation and casts it as a pydantic model. """ print(simple) print(inner) -def return_parameters(simple: int, inner: InnerModel) -> NestedModel: # (3) +def return_parameters(simple: int, inner: InnerModel) -> Tuple[int, InnerModel]: # (3) """ The parameter "simple" and "inner" can be accessed by name. You can redefine the parameters by returning a pydantic model. """ simple = 2 inner.x = 30 - inner.y = "world!!" - - return NestedModel(simple=simple, inner=inner) - - -""" -The below code is only to provide a full working example. + inner.y = "Hello Universe!!" -In the real world, you can "box magnus" in pipeline definition either in -python or yaml without cluttering your application code. -""" + return simple, inner def main(): - from magnus import Pipeline, Task + from runnable import Pipeline, PythonTask - display = Task(name="display", command="examples.parameters.display") - return_parameters = Task( + display_task = PythonTask(name="display", function=display) + + return_parameters_task = PythonTask( name="return_parameters", - command="examples.parameters.return_parameters", + function=return_parameters, + returns=[ + "simple", + "inner", + ], terminate_with_success=True, ) - display >> return_parameters - pipeline = Pipeline( - start_at=display, - steps=[display, return_parameters], + steps=[display_task, return_parameters_task], add_terminal_nodes=True, ) - run_log = pipeline.execute(parameters_file="examples/parameters_initial.yaml") - params = run_log.parameters + _ = pipeline.execute(parameters_file="examples/parameters_initial.yaml") - ## Reflects the changes done by "return_parameters" function call. - assert params["simple"] == 2 - assert params["inner"] == {"x": 30, "y": "world!!"} + return pipeline if __name__ == "__main__": diff --git a/examples/parameters_api.py b/examples/parameters_api.py deleted file mode 100644 index 26c27ea2..00000000 --- a/examples/parameters_api.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -The initial parameters defined in the parameters file are: -simple: 1 -inner: - x: 10 - y: "hello" - - -You can run this pipeline by: python examples/parameters_api.py - -""" - -from pydantic import BaseModel - - -class InnerModel(BaseModel): - """ - Captures the "inner" part of the parameters. - The model definition can be as nested as you want. - """ - - x: int - y: str - - -class NestedModel(BaseModel): - """ - Captures the whole parameter space of the application. - """ - - simple: int - inner: InnerModel - - -def display(simple: int, inner: InnerModel): - """ - The parameter "simple" and "inner" can be accessed by name. - Magnus understands the parameter "inner" as a pydantic model - from annotation and returns a pydantic model - """ - print(simple) - print(inner) - - -def set_and_get(): - """ - You can also use the python API for fine grained control if functional - specification does not fit your needs. - - get_parameter can be used to either - - return a specific parameter/model if a key is provided. - - return the whole parameter space casted as a - pydantic model or as a dictionary. - - set_parameter can be used to set a parameter/model. - - """ - from magnus import get_parameter, set_parameter - - # You can also get all the parameters as a pydantic model. - all_parameters = get_parameter(cast_as=NestedModel) # (1) - print(all_parameters) - ">>> # simple=1 inner=InnerModel(x=10, y='hello')" - - # get the parameter "inner" and cast it as InnerModel - b = get_parameter(key="inner", cast_as=InnerModel) - - b.x = 100 - b.y = "world" - - # set the parameter "inner" to the new value - set_parameter(inner=b) # (2) - - -""" -The below code is only to provide a full working example. - -In the real world, you can "box magnus" in pipeline definition -either in python or yaml without cluttering your application code. -""" - - -def main(): - from magnus import Pipeline, Task - - display = Task(name="display", command="examples.parameters.display") - - set_and_get = Task( - name="set_and_get", - command="examples.parameters.set_and_get", - terminate_with_success=True, - ) - - display >> set_and_get - - pipeline = Pipeline( - start_at=display, - steps=[display, set_and_get], - add_terminal_nodes=True, - ) - - pipeline.execute(parameters_file="examples/parameters_initial.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/parameters_env.yaml b/examples/parameters_env.yaml index e8c1567b..46d7e420 100644 --- a/examples/parameters_env.yaml +++ b/examples/parameters_env.yaml @@ -3,33 +3,43 @@ dag: This is a simple pipeline that demonstrates how to use environment variables to access parameters. - All parameters are prefixed by MAGNUS_PRM_ in json serialized form. + All parameters are prefixed by runnable_PRM_ in json serialized form. To set a parameter, you need to set the environment variable with the prefix You can run this example: - magnus execute -f examples/parameters_env.yaml -p examples/parameters_initial.yaml + runnable execute -f examples/parameters_env.yaml -p examples/parameters_initial.yaml start_at: display steps: display: type: task command_type: shell - command: env | grep "MAGNUS_PRM" # (1) - # prints MAGNUS_PRM_simple=1 - # prints MAGNUS_PRM_inner={"x": 10, "y": "hello"} + command: | + env | grep simple + env | grep inner + # prints simple=1 + # prints inner={"x": 10, "y": "hello world!!"} next: update params update params: type: task command_type: shell next: display again - command: | # (2) - export MAGNUS_PRM_simple=10 && - export MAGNUS_PRM_inner='{"x": 100, "y": "world"}' + command: | + export simple=10 && + export inner='{"x": 100, "y": "hello universe!!"}' + returns: # collect simple and inner from environment + - name: simple + kind: json + - name: inner + kind: json display again: type: task - command: examples.parameters.display # (3) - # prints MAGNUS_PRM_simple=10 - # prints MAGNUS_PRM_inner={"x": 100, "y": "world"} + command_type: shell + command: | + env | grep simple + env | grep inner + # prints simple=1 + # prints inner={"x": 100, "y": "hello universe!!"} next: success success: type: success diff --git a/examples/parameters_flow.yaml b/examples/parameters_flow.yaml deleted file mode 100644 index 61e3412d..00000000 --- a/examples/parameters_flow.yaml +++ /dev/null @@ -1,21 +0,0 @@ -dag: - description: | - Setting and accessing parameters - start_at: access initial parameters - steps: - access initial parameters: - type: task - command: examples.parameters.display - next: return parameters - return parameters: - type: task - command: examples.parameters.return_parameters - next: show final parameters - show final parameters: - type: task - command: examples.parameters.display - next: success - success: - type: success - fail: - type: fail diff --git a/examples/parameters_initial.yaml b/examples/parameters_initial.yaml index fb0a5d9e..e88e14c0 100644 --- a/examples/parameters_initial.yaml +++ b/examples/parameters_initial.yaml @@ -1,4 +1,4 @@ simple: 1 inner: x: 10 - y: "hello" + y: "hello world!!" diff --git a/examples/parameters_simple.py b/examples/parameters_simple.py new file mode 100644 index 00000000..28c20f6d --- /dev/null +++ b/examples/parameters_simple.py @@ -0,0 +1,93 @@ +""" +The initial parameters defined in the parameters file are: +simple: 1 +inner: + x: 10 + y: "hello" + +You can execute this pipeline by: python examples/parameters.py + +""" + +from pydantic import BaseModel + + +class InnerModel(BaseModel): + """ + Captures the "inner" part of the parameters. + The model definition can be as nested as you want. + """ + + x: int + y: str + + +class NestedModel(BaseModel): # (1) + """ + Captures the whole parameter space of the application. + """ + + simple: int + inner: InnerModel + + +def display(simple: int, inner: InnerModel): # (2) + """ + The parameter "simple" and "inner" can be accessed by name. + runnable understands the parameter "inner" as a pydantic model from + annotation and casts it as a pydantic model. + """ + assert simple == 1 + assert inner.x == 10 + assert inner.y == "hello world!!" + + +class ObjectType: + def __init__(self): + self.salute = "hello" + + +def return_parameters(): + """ + The parameter "simple" and "inner" can be accessed by name. + You can redefine the parameters by returning a pydantic model. + """ + x = 2 + y = "hello Universe!!" + + return x, y, ObjectType() + + +def display_object(obj: ObjectType): + print(obj.salute) + + +def main(): + from runnable import Pipeline, PythonTask, pickled + + display_task = PythonTask(name="display", function=display) + + return_parameters_task = PythonTask( + name="return_parameters", + function=return_parameters, + returns=["x", "y", pickled("obj")], + ) + + display_object_task = PythonTask( + name="display_object", + function=display_object, + terminate_with_success=True, + ) + + pipeline = Pipeline( + steps=[display_task, return_parameters_task, display_object_task], + add_terminal_nodes=True, + ) + + _ = pipeline.execute(parameters_file="examples/parameters_initial.yaml") + + return pipeline + + +if __name__ == "__main__": + main() diff --git a/examples/python-tasks-argo.py b/examples/python-tasks-argo.py deleted file mode 100644 index dcb48122..00000000 --- a/examples/python-tasks-argo.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -This is a simple pipeline that does 2 steps in sequence. - In this example: - 1. First step: returns a "parameter" x as a Pydantic model - 2. Second step: Consumes that parameter and prints it - - This pipeline demonstrates one way to pass small data from one step to another. - - You can run this pipeline by: python examples/python-tasks.py -""" - -from magnus import Pipeline, Task - - -def main(): - step1 = Task( - name="step1", - command="examples.functions.return_parameter", - ) # (1) - step2 = Task( - name="step2", - command="examples.functions.display_parameter", - terminate_with_success=True, - ).depends_on( - step1 - ) # (2), (3) - - pipeline = Pipeline( - start_at=step1, - steps=[step1, step2], - add_terminal_nodes=True, - ) # (4) - - pipeline.execute(configuration_file="examples/configs/argo-config.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/python-tasks.py b/examples/python-tasks.py deleted file mode 100644 index d5a67311..00000000 --- a/examples/python-tasks.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -This is a simple pipeline that does 2 steps in sequence. - In this example: - 1. First step: returns a "parameter" x as a Pydantic model - 2. Second step: Consumes that parameter and prints it - - This pipeline demonstrates one way to pass small data from one step to another. - - You can run this pipeline by: python examples/python-tasks.py -""" -from magnus import Pipeline, Task - - -def main(): - step1 = Task( - name="step1", - command="examples.functions.return_parameter", - ) # (1) - step2 = Task( - name="step2", - command="examples.functions.display_parameter", - terminate_with_success=True, - ).depends_on( - step1 - ) # (2), (3) - - pipeline = Pipeline( - start_at=step1, - steps=[step1, step2], - add_terminal_nodes=True, - ) # (4) - - pipeline.execute() - - -if __name__ == "__main__": - main() diff --git a/examples/python-tasks.yaml b/examples/python-tasks.yaml index de805b3e..fb8ad48d 100644 --- a/examples/python-tasks.yaml +++ b/examples/python-tasks.yaml @@ -1,26 +1,33 @@ dag: description: | - This is a simple pipeline that does 3 steps in sequence. - In this example: - 1. First step: returns a "parameter" x as a Pydantic model - 2. Second step: Consumes that parameter and prints it + This is a simple pipeline that does 3 steps + in sequence. - This pipeline demonstrates one way to pass small data from one step to another. + In this example: + 1. First step: returns a "parameter" x + as a Pydantic model + 2. Second step: Consumes that parameter + and prints it - You can run this pipeline by: magnus execute -f examples/python-tasks.yaml - start_at: step 1 + You can run this pipeline by: + runnable execute -f examples/python-tasks.yaml + start_at: step 1 # (1) steps: - step 1: + step 1: # (2) type: task - command_type: python # (2) - command: examples.functions.return_parameter # (1) - next: step 2 + command: examples.functions.return_parameter # (3) + returns: + - name: x + kind: json + - name: y + kind: json + next: step 2 # (4) step 2: type: task command_type: python command: examples.functions.display_parameter - next: success + next: success # (5) success: - type: success + type: success # (6) fail: type: fail diff --git a/examples/retry-fail.yaml b/examples/retry-fail.yaml index 777895ef..66eca916 100644 --- a/examples/retry-fail.yaml +++ b/examples/retry-fail.yaml @@ -9,7 +9,7 @@ dag: You can run this pipeline by: - magnus execute -f examples/retry-fail.yaml -c examples/configs/fs-catalog-run_log.yaml \ + runnable execute -f examples/retry-fail.yaml -c examples/configs/fs-catalog-run_log.yaml \ --run-id wrong-file-name start_at: Setup steps: @@ -22,7 +22,7 @@ dag: type: task command_type: shell command: | - echo "Hello from magnus" >> data/hello.txt + echo "Hello from runnable" >> data/hello.txt next: Retrieve Content catalog: # (2) put: diff --git a/examples/retry-fixed.yaml b/examples/retry-fixed.yaml index 5c09574d..654d5709 100644 --- a/examples/retry-fixed.yaml +++ b/examples/retry-fixed.yaml @@ -11,7 +11,7 @@ dag: You can run this pipeline by: - magnus execute -f examples/retry-fixed.yaml -c examples/configs/fs-catalog-run_log.yaml \ + runnable execute -f examples/retry-fixed.yaml -c examples/configs/fs-catalog-run_log.yaml \ --use-cached wrong-file-name start_at: Setup @@ -25,7 +25,7 @@ dag: type: stub # (2) command_type: shell command: | - echo "Hello from magnus" >> data/hello.txt + echo "Hello from runnable" >> data/hello.txt next: Retrieve Content catalog: put: diff --git a/examples/secrets.env b/examples/secrets.env deleted file mode 100644 index 62cbd7c5..00000000 --- a/examples/secrets.env +++ /dev/null @@ -1,2 +0,0 @@ -export shell_type="shell type secret" # (1) -kv_style=value # A key value secret type. # (2) diff --git a/examples/secrets.py b/examples/secrets.py deleted file mode 100644 index 9fa7fa9d..00000000 --- a/examples/secrets.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -An example pipeline to demonstrate how to use the secrets manager. - -You can run this pipeline by: - python run examples/secrets.py -""" - -from magnus import get_secret - - -def show_secret(): - shell_variable = get_secret("shell_type") # (1) - key_value_type = get_secret("kv_style") - - assert shell_variable == "shell type secret" - assert key_value_type == "value" - - -def main(): - from magnus import Pipeline, Task - - show = Task( - name="show secret", - command="examples.secrets.show_secret", - terminate_with_success=True, - ) - - pipeline = Pipeline(steps=[show], start_at=show, add_terminal_nodes=True) - - pipeline.execute(configuration_file="examples/configs/dotenv.yaml") - - -if __name__ == "__main__": - main() diff --git a/examples/secrets_env.py b/examples/secrets_env.py deleted file mode 100644 index 9852aaae..00000000 --- a/examples/secrets_env.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -An example pipeline to demonstrate how to use the secrets manager. - -Run this pipeline by: - secret="secret_value" MAGNUS_CONFIGURATION_FILE=examples/configs/secrets-env-default.yaml \ - python examples/secrets_env.py - -""" - - -from magnus import get_secret - - -def show_secret(): - secret = get_secret("secret") - - assert secret == "secret_value" - - -def main(): - from magnus import Pipeline, Task - - show = Task( - name="show secret", - command="examples.secrets_env.show_secret", - terminate_with_success=True, - ) - - pipeline = Pipeline(steps=[show], start_at=show, add_terminal_nodes=True) - - pipeline.execute() - - -if __name__ == "__main__": - main() diff --git a/examples/tutorials/mnist/baseline_comparison.py b/examples/tutorials/mnist/baseline_comparison.py new file mode 100644 index 00000000..02ade587 --- /dev/null +++ b/examples/tutorials/mnist/baseline_comparison.py @@ -0,0 +1,277 @@ +from typing import List + +import numpy as np +from pydantic import BaseModel + + +class TrainParams(BaseModel): + input_shape: tuple = (28, 28, 1) + + kernel_size: tuple = (3, 3) + pool_size: tuple = (2, 2) + + conv_activation: str = "relu" + dense_activation: str = "softmax" + + loss: str = "categorical_crossentropy" + optimizer: str = "adam" + metrics: List[str] = ["accuracy"] + + batch_size: int = 128 + epochs: int = 15 + validation_split: float = 0.1 + + +class BaseLineParams(BaseModel): + num_pixels: int = 784 + kernel_initializer: str = "normal" + pixels_activation: str = "relu" + classes_activation: str = "softmax" + loss: str = "categorical_crossentropy" + optimizer: str = "adam" + + batch_size: int = 128 + epochs: int = 15 + validation_split: float = 0.1 + + metrics: List[str] = ["accuracy"] + + +def load_data(): + import keras + + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + + return x_train, y_train, x_test, y_test + + +def scale_data(x_train: np.ndarray, x_test: np.ndarray): + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + # Make sure images have shape (28, 28, 1) + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + + return x_train, x_test + + +def convert_to_categorically(y_train: np.ndarray, y_test: np.ndarray, num_classes: int): + import keras + + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + + return y_train, y_test + + +def build_model(train_params: TrainParams, num_classes: int): + import keras + from keras import layers + + model = keras.Sequential( + [ + keras.Input(shape=train_params.input_shape), + layers.Conv2D(32, train_params.kernel_size, activation=train_params.conv_activation), + layers.MaxPooling2D(pool_size=train_params.pool_size), + layers.Conv2D(64, kernel_size=train_params.kernel_size, activation=train_params.conv_activation), + layers.MaxPooling2D(pool_size=train_params.pool_size), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation=train_params.dense_activation), + ] + ) + + print(model.summary()) + + model.save("model.keras") + + +def build_baseline_model(baseline_params: BaseLineParams, num_classes: int): + import keras + + model = keras.Sequential() + model.add( + keras.layers.Dense( + baseline_params.num_pixels, + input_dim=baseline_params.num_pixels, + kernel_initializer=baseline_params.kernel_initializer, + activation=baseline_params.pixels_activation, + ) + ) + model.add( + keras.layers.Dense( + num_classes, + kernel_initializer=baseline_params.kernel_initializer, + activation=baseline_params.classes_activation, + ) + ) + + model.compile(loss=baseline_params.loss, optimizer=baseline_params.optimizer, metrics=baseline_params.metrics) + print(model.summary()) + + model.save("baseline_model.keras") + + +def train_model(x_train: np.ndarray, y_train: np.ndarray, train_params: TrainParams): + import keras + + model = keras.models.load_model("model.keras") + model.compile(loss=train_params.loss, optimizer=train_params.optimizer, metrics=train_params.metrics) + + model.fit( + x_train, + y_train, + batch_size=train_params.batch_size, + epochs=train_params.epochs, + validation_split=train_params.validation_split, + ) + + model.save("trained_model.keras") + + +def train_baseline_model(x_train: np.ndarray, y_train: np.ndarray, train_params: BaseLineParams): + import keras + + model = keras.models.load_model("baseline_model.keras") + model.compile(loss=train_params.loss, optimizer=train_params.optimizer, metrics=train_params.metrics) + + _x_train = x_train.reshape(x_train.shape[0], train_params.num_pixels).astype("float32") + # _y_train = y_train.reshape(y_train.shape[0], train_params.num_pixels).astype("float32") + + model.fit( + _x_train, + y_train, + batch_size=train_params.batch_size, + epochs=train_params.epochs, + validation_split=train_params.validation_split, + ) + + model.save("trained_baseline_model.keras") + + +def evaluate_model(x_test: np.ndarray, y_test: np.ndarray): + import keras + + trained_model = keras.models.load_model("trained_model.keras") + + score = trained_model.evaluate(x_test, y_test, verbose=0) + print("Test loss:", score[0]) + print("Test accuracy:", score[1]) + + return score + + +def evaluate_baseline_model(x_test: np.ndarray, y_test: np.ndarray, train_params: BaseLineParams): + import keras + + trained_model = keras.models.load_model("trained_baseline_model.keras") + + _x_test = x_test.reshape(x_test.shape[0], train_params.num_pixels).astype("float32") + + score = trained_model.evaluate(_x_test, y_test, verbose=0) + print("Test loss:", score[0]) + print("Test accuracy:", score[1]) + + return score + + +def main(): + from runnable import Catalog, Parallel, Pipeline, PythonTask, metric, pickled + + # x_train, y_train, x_test, y_test + load_data_task = PythonTask( + function=load_data, + name="load_data", + returns=[pickled("x_train"), pickled("y_train"), pickled("x_test"), pickled("y_test")], + ) + + # def scale_data(x_train: np.ndarray, x_test: np.ndarray) + scale_data_task = PythonTask( + function=scale_data, + name="scale_data", + returns=[pickled("x_train"), pickled("x_test")], + ) + + convert_to_categorically_task = PythonTask( + function=convert_to_categorically, + name="convert_to_categorically", + returns=[pickled("y_train"), pickled("y_test")], + ) + + build_model_task = PythonTask( + function=build_model, + name="build_model", + catalog=Catalog(put=["model.keras"]), + ) + + build_baseline_model_task = PythonTask( + function=build_baseline_model, + name="build_baseline_model", + catalog=Catalog(put=["baseline_model.keras"]), + ) + + train_model_task = PythonTask( + function=train_model, + name="train_model", + catalog=Catalog( + get=["model.keras"], + put=["trained_model.keras"], + ), + ) + + train_baseline_model_task = PythonTask( + function=train_baseline_model, + name="train_baseline_model", + catalog=Catalog( + get=["baseline_model.keras"], + put=["trained_baseline_model.keras"], + ), + ) + + evaluate_model_task = PythonTask( + function=evaluate_model, + name="evaluate_model", + catalog=Catalog( + get=["trained_model.keras"], + ), + returns=[metric("keras_score")], + terminate_with_success=True, + ) + + evaluate_baseline_model_task = PythonTask( + function=evaluate_baseline_model, + name="evaluate_baseline_model", + catalog=Catalog( + get=["trained_baseline_model.keras"], + ), + returns=[metric("baseline_score")], + terminate_with_success=True, + ) + + train_pipeline = Pipeline(steps=[build_model_task, train_model_task, evaluate_model_task]) + baseline_train = Pipeline( + steps=[build_baseline_model_task, train_baseline_model_task, evaluate_baseline_model_task] + ) + + parallel_step = Parallel( + name="train models", + branches={"train": train_pipeline, "baseline": baseline_train}, + terminate_with_success=True, + ) + + pipeline = Pipeline( + steps=[ + load_data_task, + scale_data_task, + convert_to_categorically_task, + parallel_step, + ], + ) + + pipeline.execute(parameters_file="examples/tutorials/mnist/parameters.yaml") + + return pipeline + + +if __name__ == "__main__": + main() diff --git a/examples/tutorials/mnist/hyper_parameter_tuning.py b/examples/tutorials/mnist/hyper_parameter_tuning.py new file mode 100644 index 00000000..6a150dbf --- /dev/null +++ b/examples/tutorials/mnist/hyper_parameter_tuning.py @@ -0,0 +1,176 @@ +from typing import List + +import keras +import numpy as np +from keras import layers +from pydantic import BaseModel + + +class TrainParams(BaseModel): + input_shape: tuple = (28, 28, 1) + + kernel_size: tuple = (3, 3) + pool_size: tuple = (2, 2) + + conv_activation: str = "relu" + dense_activation: str = "softmax" + + loss: str = "categorical_crossentropy" + optimizer: str = "adam" + metrics: List[str] = ["accuracy"] + + batch_size: int = 128 + epochs: int = 15 + validation_split: float = 0.1 + + +def load_data(): + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + + return x_train, y_train, x_test, y_test + + +def scale_data(x_train: np.ndarray, x_test: np.ndarray): + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + # Make sure images have shape (28, 28, 1) + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + + return x_train, x_test + + +def convert_to_categorically(y_train: np.ndarray, y_test: np.ndarray, num_classes: int): + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + + return y_train, y_test + + +def build_model(train_params: TrainParams, hp: List[int], num_classes: int): + hp_id = "_".join(map(str, hp)) + print(hp_id) + + _layers = [ + keras.Input(shape=train_params.input_shape), + ] + + for conv_layer_size in hp: + _layers.append( + keras.layers.Conv2D( + conv_layer_size, + train_params.kernel_size, + activation=train_params.conv_activation, + ) + ) + _layers.append(keras.layers.MaxPooling2D(pool_size=train_params.pool_size)) + + _layers.extend( + [ + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation=train_params.dense_activation), + ] + ) + + model = keras.Sequential(_layers) + model.compile(loss=train_params.loss, optimizer=train_params.optimizer, metrics=train_params.metrics) + + print(model.summary()) + + model.save(f"model{hp_id}.keras") + + +def train_model(x_train: np.ndarray, y_train: np.ndarray, train_params: TrainParams, hp: List[int]): + hp_id = "_".join(map(str, hp)) + model = keras.models.load_model(f"model{hp_id}.keras") + model.compile(loss=train_params.loss, optimizer=train_params.optimizer, metrics=train_params.metrics) + + model.fit( + x_train, + y_train, + batch_size=train_params.batch_size, + epochs=train_params.epochs, + validation_split=train_params.validation_split, + ) + + model.save(f"trained_model{hp_id}.keras") + + +def evaluate_model(x_test: np.ndarray, y_test: np.ndarray, hp: List[int]): + hp_id = "_".join(map(str, hp)) + trained_model = keras.models.load_model(f"trained_model{hp_id}.keras") + + score = trained_model.evaluate(x_test, y_test, verbose=0) + print("Test loss:", score[0]) + print("Test accuracy:", score[1]) + + return score + + +def main(): + from runnable import Catalog, Map, Pipeline, PythonTask, metric, pickled + + # x_train, y_train, x_test, y_test + load_data_task = PythonTask( + function=load_data, + name="load_data", + returns=[pickled("x_train"), pickled("y_train"), pickled("x_test"), pickled("y_test")], + ) + + # def scale_data(x_train: np.ndarray, x_test: np.ndarray) + scale_data_task = PythonTask( + function=scale_data, + name="scale_data", + returns=[pickled("x_train"), pickled("x_test")], + ) + + convert_to_categorically_task = PythonTask( + function=convert_to_categorically, + name="convert_to_categorically", + returns=[pickled("y_train"), pickled("y_test")], + ) + + build_model_task = PythonTask( + function=build_model, + name="build_model", + catalog=Catalog(put=["model*.keras"]), + ) + + train_model_task = PythonTask( + function=train_model, + name="train_model", + catalog=Catalog( + get=["*.keras"], + put=["*.keras"], + ), + ) + + evaluate_model_task = PythonTask( + function=evaluate_model, + name="evaluate_model", + returns=[metric("score")], + catalog=Catalog( + get=["*.keras"], + ), + terminate_with_success=True, + ) + + train_pipeline = Pipeline(steps=[build_model_task, train_model_task, evaluate_model_task]) + + hpt_step = Map( + name="hpt", + branch=train_pipeline, + iterate_on="hpt", + iterate_as="hp", + reducer="lambda *x: max(x, key=lambda x: x[1])", + terminate_with_success=True, + ) + + pipeline = Pipeline(steps=[load_data_task, scale_data_task, convert_to_categorically_task, hpt_step]) + + pipeline.execute(parameters_file="examples/tutorials/mnist/parameters.yaml") + + +if __name__ == "__main__": + main() diff --git a/examples/tutorials/mnist/modular_source.py b/examples/tutorials/mnist/modular_source.py new file mode 100644 index 00000000..67ad6ea5 --- /dev/null +++ b/examples/tutorials/mnist/modular_source.py @@ -0,0 +1,166 @@ +from typing import List + +import numpy as np + +num_classes: int = 10 +input_shape: tuple = (28, 28, 1) + +kernel_size: tuple = (3, 3) +pool_size: tuple = (2, 2) + +conv_activation: str = "relu" +dense_activation: str = "softmax" + +loss: str = "categorical_crossentropy" +optimizer: str = "adam" +metrics: List[str] = ["accuracy"] + +batch_size: int = 128 +epochs: int = 15 +validation_split: float = 0.1 + + +def load_data(): + import keras + + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + + return x_train, y_train, x_test, y_test + + +def scale_data(x_train: np.ndarray, x_test: np.ndarray): + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + # Make sure images have shape (28, 28, 1) + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + + return x_train, x_test + + +def convert_to_categorically(y_train: np.ndarray, y_test: np.ndarray): + import keras + + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + + return y_train, y_test + + +def build_model(): + import keras + from keras import layers + + model = keras.Sequential( + [ + keras.Input(shape=input_shape), + layers.Conv2D(32, kernel_size, activation=conv_activation), + layers.MaxPooling2D(pool_size=pool_size), + layers.Conv2D(64, kernel_size=kernel_size, activation=conv_activation), + layers.MaxPooling2D(pool_size=pool_size), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation=dense_activation), + ] + ) + + print(model.summary()) + + model.save("model.keras") + + +def train_model(x_train: np.ndarray, y_train: np.ndarray): + import keras + + model = keras.models.load_model("model.keras") + model.compile(loss=loss, optimizer=optimizer, metrics=metrics) + + model.fit( + x_train, + y_train, + batch_size=batch_size, + epochs=epochs, + validation_split=validation_split, + ) + + model.save("trained_model.keras") + + +def evaluate_model(x_test: np.ndarray, y_test: np.ndarray): + import keras + + trained_model = keras.models.load_model("trained_model.keras") + + score = trained_model.evaluate(x_test, y_test, verbose=0) + print("Test loss:", score[0]) + print("Test accuracy:", score[1]) + + return score + + +def main(): + from runnable import Catalog, Pipeline, PythonTask, metric, pickled + + # x_train, y_train, x_test, y_test + load_data_task = PythonTask( + function=load_data, + name="load_data", + returns=[pickled("x_train"), pickled("y_train"), pickled("x_test"), pickled("y_test")], + ) + + # def scale_data(x_train: np.ndarray, x_test: np.ndarray) + scale_data_task = PythonTask( + function=scale_data, + name="scale_data", + returns=[pickled("x_train"), pickled("x_test")], + ) + + convert_to_categorically_task = PythonTask( + function=convert_to_categorically, + name="convert_to_categorically", + returns=[pickled("y_train"), pickled("y_test")], + ) + + build_model_task = PythonTask( + function=build_model, + name="build_model", + catalog=Catalog(put=["model.keras"]), + ) + + train_model_task = PythonTask( + function=train_model, + name="train_model", + catalog=Catalog( + get=["model.keras"], + put=["trained_model.keras"], + ), + ) + + evaluate_model_task = PythonTask( + function=evaluate_model, + name="evaluate_model", + catalog=Catalog( + get=["trained_model.keras"], + ), + returns=[metric("score")], + terminate_with_success=True, + ) + + pipeline = Pipeline( + steps=[ + load_data_task, + scale_data_task, + convert_to_categorically_task, + build_model_task, + train_model_task, + evaluate_model_task, + ], + ) + + pipeline.execute() + + return pipeline + + +if __name__ == "__main__": + main() diff --git a/examples/tutorials/mnist/parameters.yaml b/examples/tutorials/mnist/parameters.yaml new file mode 100644 index 00000000..41974f68 --- /dev/null +++ b/examples/tutorials/mnist/parameters.yaml @@ -0,0 +1,31 @@ +num_classes: 10 + +train_params: + hidden_conv_layer_sizes: [32, 64] + loss: 'categorical_crossentropy' + optimizer: 'adam' + kernel_initializer: "normal" + metrics: ['accuracy'] + num_pixels: 784 + input_shape: [28, 28, 1] + kernel_size: [3, 3] + pool_size: [2, 2] + p_dropout: 0.5 + epochs: 6 + batch_size: 64 + +baseline_params: + num_pixels: 784 + kernel_initializer: "normal" + pixels_activation: relu + classes_activation: softmax + optimizer: "adam" + + metrics: ['accuracy'] + +hpt: + - [16, 32] + - [16, 64] + # - [32, 64] + # - [32, 128] + # - [64, 128] diff --git a/examples/tutorials/mnist/parameters_source.py b/examples/tutorials/mnist/parameters_source.py new file mode 100644 index 00000000..10a71b12 --- /dev/null +++ b/examples/tutorials/mnist/parameters_source.py @@ -0,0 +1,158 @@ +from typing import List + +import keras +import numpy as np +from keras import layers +from pydantic import BaseModel + + +class TrainParams(BaseModel): + input_shape: tuple = (28, 28, 1) + + kernel_size: tuple = (3, 3) + pool_size: tuple = (2, 2) + + conv_activation: str = "relu" + dense_activation: str = "softmax" + + loss: str = "categorical_crossentropy" + optimizer: str = "adam" + metrics: List[str] = ["accuracy"] + + batch_size: int = 128 + epochs: int = 15 + validation_split: float = 0.1 + + +def load_data(): + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + + return x_train, y_train, x_test, y_test + + +def scale_data(x_train: np.ndarray, x_test: np.ndarray): + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + # Make sure images have shape (28, 28, 1) + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + + return x_train, x_test + + +def convert_to_categorically(y_train: np.ndarray, y_test: np.ndarray, num_classes: int): + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + + return y_train, y_test + + +def build_model(train_params: TrainParams, num_classes: int): + model = keras.Sequential( + [ + keras.Input(shape=train_params.input_shape), + layers.Conv2D(32, train_params.kernel_size, activation=train_params.conv_activation), + layers.MaxPooling2D(pool_size=train_params.pool_size), + layers.Conv2D(64, kernel_size=train_params.kernel_size, activation=train_params.conv_activation), + layers.MaxPooling2D(pool_size=train_params.pool_size), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation=train_params.dense_activation), + ] + ) + + print(model.summary()) + + model.save("model.keras") + + +def train_model(x_train: np.ndarray, y_train: np.ndarray, train_params: TrainParams): + model = keras.models.load_model("model.keras") + model.compile(loss=train_params.loss, optimizer=train_params.optimizer, metrics=train_params.metrics) + + model.fit( + x_train, + y_train, + batch_size=train_params.batch_size, + epochs=train_params.epochs, + validation_split=train_params.validation_split, + ) + + model.save("trained_model.keras") + + +def evaluate_model(x_test: np.ndarray, y_test: np.ndarray): + trained_model = keras.models.load_model("trained_model.keras") + + score = trained_model.evaluate(x_test, y_test, verbose=0) + print("Test loss:", score[0]) + print("Test accuracy:", score[1]) + + return score + + +def main(): + from runnable import Catalog, Pipeline, PythonTask, pickled + + # x_train, y_train, x_test, y_test + load_data_task = PythonTask( + function=load_data, + name="load_data", + returns=[pickled("x_train"), pickled("y_train"), pickled("x_test"), pickled("y_test")], + ) + + # def scale_data(x_train: np.ndarray, x_test: np.ndarray) + scale_data_task = PythonTask( + function=scale_data, + name="scale_data", + returns=[pickled("x_train"), pickled("x_test")], + ) + + convert_to_categorically_task = PythonTask( + function=convert_to_categorically, + name="convert_to_categorically", + returns=[pickled("y_train"), pickled("y_test")], + ) + + build_model_task = PythonTask( + function=build_model, + name="build_model", + catalog=Catalog(put=["model.keras"]), + ) + + train_model_task = PythonTask( + function=train_model, + name="train_model", + catalog=Catalog( + get=["model.keras"], + put=["trained_model.keras"], + ), + ) + + evaluate_model_task = PythonTask( + function=evaluate_model, + name="evaluate_model", + catalog=Catalog( + get=["trained_model.keras"], + ), + terminate_with_success=True, + ) + + pipeline = Pipeline( + steps=[ + load_data_task, + scale_data_task, + convert_to_categorically_task, + build_model_task, + train_model_task, + evaluate_model_task, + ], + ) + + pipeline.execute(parameters_file="examples/tutorials/mnist/parameters.yaml") + + return pipeline + + +if __name__ == "__main__": + main() diff --git a/examples/tutorials/mnist/source.py b/examples/tutorials/mnist/source.py new file mode 100644 index 00000000..9aee27eb --- /dev/null +++ b/examples/tutorials/mnist/source.py @@ -0,0 +1,80 @@ +""" +Title: Simple MNIST convnet +Author: [fchollet](https://twitter.com/fchollet) +Date created: 2015/06/19 +Last modified: 2020/04/21 +Description: A simple convnet that achieves ~99% test accuracy on MNIST. +Accelerator: GPU +""" + +""" +## Setup +""" + +import keras +import numpy as np +from keras import layers + +""" +## Prepare the data +""" + +# Model / data parameters +num_classes = 10 +input_shape = (28, 28, 1) + +# Load the data and split it between train and test sets +(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + +# Scale images to the [0, 1] range +x_train = x_train.astype("float32") / 255 +x_test = x_test.astype("float32") / 255 +# Make sure images have shape (28, 28, 1) +x_train = np.expand_dims(x_train, -1) +x_test = np.expand_dims(x_test, -1) +print("x_train shape:", x_train.shape) +print(x_train.shape[0], "train samples") +print(x_test.shape[0], "test samples") + + +# convert class vectors to binary class matrices +y_train = keras.utils.to_categorical(y_train, num_classes) +y_test = keras.utils.to_categorical(y_test, num_classes) + +""" +## Build the model +""" + +model = keras.Sequential( + [ + keras.Input(shape=input_shape), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation="softmax"), + ] +) + +model.summary() + +""" +## Train the model +""" + +batch_size = 128 +epochs = 15 + +model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) + +model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1) + +""" +## Evaluate the trained model +""" + +score = model.evaluate(x_test, y_test, verbose=0) +print("Test loss:", score[0]) +print("Test accuracy:", score[1]) diff --git a/examples/tutorials/reddit_text_classification/parameters.yaml b/examples/tutorials/reddit_text_classification/parameters.yaml new file mode 100644 index 00000000..e662e19c --- /dev/null +++ b/examples/tutorials/reddit_text_classification/parameters.yaml @@ -0,0 +1,9 @@ +url: https://raw.githubusercontent.com/axsauze/reddit-classification-exploration/master/data/reddit_train.csv +encoding: ISO-8859-1 +features_column: BODY +labels_column: REMOVED + +max_features: 1000 +ngram_range: 3 + +c_param: 0.1 diff --git a/examples/tutorials/reddit_text_classification/pipeline.py b/examples/tutorials/reddit_text_classification/pipeline.py new file mode 100644 index 00000000..50b57786 --- /dev/null +++ b/examples/tutorials/reddit_text_classification/pipeline.py @@ -0,0 +1,52 @@ +from examples.tutorials.reddit_text_classification.steps import ( + clean, + extract_text, + model_fit, + tfidf, + tokenize, +) +from runnable import Pipeline, PythonTask, Stub, pickled + + +def driver(): + x, labels = extract_text( + url="https://raw.githubusercontent.com/axsauze/reddit-classification-exploration/master/data/reddit_train.csv", + encoding="ISO-8859-1", + features_column="BODY", + labels_column="REMOVED", + ) + + cleaned_x = clean(x) + tokenised_x = tokenize(cleaned_x) + vectorised_x = tfidf(tokenised_x, max_features=1000, ngram_range=3) + y_probabilities = model_fit(vectorised_x, labels, c_param=0.1) + + print(y_probabilities) + + +def runnable_pipeline(): + extract_task = Stub(name="extract", function=extract_text, returns=[pickled("x"), pickled("labels")]) + clean_task = Stub(name="clean", function=clean, returns=[pickled("cleaned_x")]) + tokenize_task = Stub(name="tokenize", function=tokenize, returns=[pickled("tokenised_x")]) + vectorise_task = Stub(name="tfidf", function=tfidf, returns=[pickled("vectorised_x")]) + + model_fit_task = Stub( + name="model_fit", + function=model_fit, + returns=[pickled("y_probabilities"), pickled("lr_model")], + terminate_with_success=True, + ) + + pipeline = Pipeline( + steps=[extract_task, clean_task, tokenize_task, vectorise_task, model_fit_task], + add_terminal_nodes=True, + ) + + pipeline.execute(parameters_file="examples/tutorials/reddit_text_classification/parameters.yaml") + + return pipeline + + +if __name__ == "__main__": + # driver() + runnable_pipeline() diff --git a/examples/tutorials/reddit_text_classification/steps.py b/examples/tutorials/reddit_text_classification/steps.py new file mode 100644 index 00000000..86806896 --- /dev/null +++ b/examples/tutorials/reddit_text_classification/steps.py @@ -0,0 +1,127 @@ +import logging +import re +from html import unescape + +import numpy as np +import pandas as pd +import spacy +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.linear_model import LogisticRegression + +nlp = spacy.load("en_core_web_sm") + +logger = logging.getLogger(__name__) +logger.setLevel(logging.ERROR) + + +class CleanTransformer: + __uplus_pattern = re.compile("\<[uU]\+(?P[a-zA-Z0-9]+)\>") + __markup_link_pattern = re.compile("\[(.*)\]\((.*)\)") + + def predict(self, X, feature_names=[]): + logger.warning(X) + f = np.vectorize(CleanTransformer.transform_clean_text) + X_clean = f(X) + logger.warning(X_clean) + return X_clean + + def fit(self, X, y=None, **fit_params): + return self + + @staticmethod + def transform_clean_text(raw_text): + try: + decoded = raw_text.encode("ISO-8859-1").decode("utf-8") + except: + decoded = raw_text.encode("ISO-8859-1").decode("cp1252") + html_unescaped = unescape(decoded) + html_unescaped = re.sub(r"\r\n", " ", html_unescaped) + html_unescaped = re.sub(r"\r\r\n", " ", html_unescaped) + html_unescaped = re.sub(r"\r", " ", html_unescaped) + html_unescaped = html_unescaped.replace(">", " > ") + html_unescaped = html_unescaped.replace("<", " < ") + html_unescaped = html_unescaped.replace("--", " - ") + html_unescaped = CleanTransformer.__uplus_pattern.sub(" U\g ", html_unescaped) + html_unescaped = CleanTransformer.__markup_link_pattern.sub(" \1 \2 ", html_unescaped) + html_unescaped = html_unescaped.replace("\\", "") + return html_unescaped + + +class TokenizeTransformer: + __symbols = set("!$%^&*()_+|~-=`{}[]:\";'<>?,./-") + + def predict(self, X, feature_names=[]): + logger.warning(X) + f = np.vectorize(TokenizeTransformer.transform_to_token, otypes=[object]) + X_tokenized = f(X) + logger.warning(X_tokenized) + return X_tokenized + + def fit(self, X, y=None, **fit_params): + return self + + @staticmethod + def transform_to_token(text): + str_text = str(text) + doc = nlp(str_text, disable=["parser", "tagger", "ner"]) + tokens = [] + for token in doc: + if token.like_url: + clean_token = "URL" + else: + clean_token = token.lemma_.lower().strip() + if len(clean_token) < 1 or clean_token in TokenizeTransformer.__symbols: + continue + tokens.append(clean_token) + return tokens + + +def extract_text(url: str, encoding: str, features_column: str, labels_column: str): + df = pd.read_csv(url, encoding=encoding) + + df.to_csv("reddit_text", index=False, header=False) + + x = df[features_column].values + y = df[labels_column].values + + return x, y + + +def clean(x: pd.DataFrame): + clean_text_transformer = CleanTransformer() + + cleaned_x = clean_text_transformer.predict(x) + + return cleaned_x + + +def tokenize(cleaned_x: pd.DataFrame): + tokeniser = TokenizeTransformer() + + tokenised_x = tokeniser.predict(cleaned_x) + return tokenised_x + + +def tfidf(tokenised_x: pd.DataFrame, max_features: int, ngram_range: int): + tfidf_vectorizer = TfidfVectorizer( + max_features=max_features, + preprocessor=lambda x: x, # We're using cleantext + tokenizer=lambda x: x, # We're using spacy + token_pattern=None, + ngram_range=(1, ngram_range), + ) + + tfidf_vectorizer.fit(tokenised_x) + + vectorised_x = tfidf_vectorizer.transform(tokenised_x) + return vectorised_x + + +def model_fit(vectorised_x: pd.DataFrame, labels: pd.Series, c_param: float): + lr_model = LogisticRegression(C=c_param, solver="sag") + + lr_model.fit(vectorised_x, labels) + + y_probabilities = lr_model.predict_proba(vectorised_x) + + return y_probabilities, lr_model diff --git a/magnus/__init__.py b/magnus/__init__.py deleted file mode 100644 index 98c1aee8..00000000 --- a/magnus/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# ruff: noqa - -# TODO: Might need to add Rich to pyinstaller part -import logging -from logging.config import dictConfig - -from magnus import defaults - -dictConfig(defaults.LOGGING_CONFIG) -logger = logging.getLogger(defaults.LOGGER_NAME) - -from magnus.interaction import ( - end_interactive_session, - get_experiment_tracker_context, - get_from_catalog, - get_object, - get_parameter, - get_run_id, - get_run_log, - get_secret, - put_in_catalog, - put_object, - start_interactive_session, - set_parameter, - track_this, -) # noqa -from magnus.sdk import Stub, Pipeline, Task, Parallel, Map, Catalog, Success, Fail # noqa - - -# TODO: Think of model registry as a central place to store models. -# TODO: Implement Sagemaker pipelines as a executor. - - -# TODO: Think of way of generating dag hash without executor configuration diff --git a/magnus/experiment_tracker.py b/magnus/experiment_tracker.py deleted file mode 100644 index c4d39436..00000000 --- a/magnus/experiment_tracker.py +++ /dev/null @@ -1,139 +0,0 @@ -import contextlib -import json -import logging -import os -from abc import ABC, abstractmethod -from collections import defaultdict -from typing import Any, ContextManager, Dict, Tuple, Union - -from pydantic import BaseModel, ConfigDict - -import magnus.context as context -from magnus import defaults -from magnus.utils import remove_prefix - -logger = logging.getLogger(defaults.LOGGER_NAME) - - -def retrieve_step_details(key: str) -> Tuple[str, int]: - key = remove_prefix(key, defaults.TRACK_PREFIX) - data = key.split(defaults.STEP_INDICATOR) - - key = data[0].lower() - step = 0 - - if len(data) > 1: - step = int(data[1]) - - return key, step - - -def get_tracked_data() -> Dict[str, Any]: - tracked_data: Dict[str, Any] = defaultdict(dict) - for env_var, value in os.environ.items(): - if env_var.startswith(defaults.TRACK_PREFIX): - key, step = retrieve_step_details(env_var) - - # print(value, type(value)) - try: - value = json.loads(value) - except json.decoder.JSONDecodeError: - logger.warning(f"Tracker {key} could not be JSON decoded, adding the literal value") - - tracked_data[key][step] = value - del os.environ[env_var] - - for key, value in tracked_data.items(): - if len(value) == 1: - tracked_data[key] = value[0] - - return tracked_data - - -# --8<-- [start:docs] - - -class BaseExperimentTracker(ABC, BaseModel): - """ - Base Experiment tracker class definition. - """ - - service_name: str = "" - service_type: str = "experiment_tracker" - - @property - def _context(self): - return context.run_context - - model_config = ConfigDict(extra="forbid") - - @property - def client_context(self) -> ContextManager: - """ - Returns the client context. - """ - return contextlib.nullcontext() - - def publish_data(self, tracked_data: Dict[str, Any]): - for key, value in tracked_data.items(): - if isinstance(value, dict): - for key2, value2 in value.items(): - self.log_metric(key, value2, step=key2) - continue - self.log_metric(key, value) - - @abstractmethod - def log_metric(self, key: str, value: Union[int, float], step: int = 0): - """ - Sets the metric in the experiment tracking. - - Args: - key (str): The key against you want to store the value - value (float): The value of the metric - step (int): Optional step at which it was recorded - - Raises: - NotImplementedError: Base class, hence not implemented - """ - raise NotImplementedError - - @abstractmethod - def log_parameter(self, key: str, value: Any): - """ - Logs a parameter in the experiment tracking. - - Args: - key (str): The key against you want to store the value - value (any): The value of the metric - - Raises: - NotImplementedError: Base class, hence not implemented - """ - pass - - -# --8<-- [end:docs] - - -class DoNothingTracker(BaseExperimentTracker): - """ - A Do nothing tracker - """ - - service_name: str = "do-nothing" - - def log_metric(self, key: str, value: Union[int, float], step: int = 0): - """ - Sets the metric in the experiment tracking. - - Args: - key (str): The key against you want to store the value - value (float): The value of the metric - """ - ... - - def log_parameter(self, key: str, value: Any): - """ - Since this is a Do nothing tracker, we don't need to log anything. - """ - ... diff --git a/magnus/extensions/experiment_tracker/mlflow/implementation.py b/magnus/extensions/experiment_tracker/mlflow/implementation.py deleted file mode 100644 index 4cce45c0..00000000 --- a/magnus/extensions/experiment_tracker/mlflow/implementation.py +++ /dev/null @@ -1,94 +0,0 @@ -import functools -import logging -from typing import Any, Union - -from pydantic import ConfigDict, PrivateAttr - -from magnus import defaults -from magnus.experiment_tracker import BaseExperimentTracker - -logger = logging.getLogger(defaults.NAME) - - -class MLFlowExperimentTracker(BaseExperimentTracker): - """ - A MLFlow experiment tracker. - - TODO: Need to set up credentials from secrets - """ - - service_name: str = "mlflow" - - server_url: str - autolog: bool = False - - _default_experiment_name: str = PrivateAttr(default="Default") - _active_run_id: str = PrivateAttr(default="") - _client: Any = PrivateAttr(default=None) - - model_config = ConfigDict(extra="forbid") - - def model_post_init(self, __context: Any) -> None: - try: - import mlflow - except ImportError: - raise Exception("You need to install mlflow to use MLFlowExperimentTracker.") - - self._client = mlflow - - self._client.set_tracking_uri(self.server_url) - - if self.autolog: - self._client.autolog(log_models=False) - - @functools.cached_property - def experiment_id(self): - experiment_name = self._default_experiment_name - - # If a tag is provided, we should create that as our experiment - if self._context.tag: - experiment_name = self._context.tag - - experiment = self._client.get_experiment_by_name(experiment_name) - if not experiment: - # Create the experiment and get it. - experiment = self._client.create_experiment(experiment_name) - experiment = self._client.get_experiment(experiment) - - return experiment.experiment_id - - @functools.cached_property - def run_name(self): - return self._context.run_id - - @property - def client_context(self): - if self._active_run_id: - return self._client.start_run( - run_id=self._active_run_id, experiment_id=self.experiment_id, run_name=self.run_name - ) - - active_run = self._client.start_run(run_name=self.run_name, experiment_id=self.experiment_id) - self._active_run_id = active_run.info.run_id - return active_run - - def log_metric(self, key: str, value: Union[int, float], step: int = 0): - """ - Sets the metric in the experiment tracking. - - Args: - key (str): The key against you want to store the value - value (Any): The value of the metric - """ - if not isinstance(value, float) or isinstance(value, int): - msg = f"Only float/int values are accepted as metrics. Setting the metric {key} as parameter {key}_{step}" - logger.warning(msg) - self.log_parameter(key=key, value=value, step=step) - return - - with self.client_context as _: - self._client.log_metric(key, float(value), step=step or None) - - def log_parameter(self, key: str, value: Any, step: int = 0): - with self.client_context as _: - self._client.log_param(key + f"_{str(step)}", value) diff --git a/magnus/interaction.py b/magnus/interaction.py deleted file mode 100644 index 01534c73..00000000 --- a/magnus/interaction.py +++ /dev/null @@ -1,399 +0,0 @@ -from __future__ import annotations - -import json -import logging -import os -from functools import wraps -from typing import Any, ContextManager, Dict, Optional, TypeVar, Union, cast, overload - -from pydantic import BaseModel - -import magnus.context as context -from magnus import defaults, exceptions, parameters, pickler, utils -from magnus.datastore import RunLog, StepLog - -logger = logging.getLogger(defaults.LOGGER_NAME) - -CastT = TypeVar("CastT") - - -def check_context(func): - @wraps(func) - def wrapper(*args, **kwargs): - if not context.run_context.executor: - msg = ( - "There are no active executor and services. This should not have happened and is a bug." - " Please raise a bug report." - ) - raise Exception(msg) - result = func(*args, **kwargs) - return result - - return wrapper - - -@check_context -def track_this(step: int = 0, **kwargs): - """ - Tracks key-value pairs to the experiment tracker. - - The value is dumped as a dict, by alias, if it is a pydantic model. - - Args: - step (int, optional): The step to track the data at. Defaults to 0. - **kwargs (dict): The key-value pairs to track. - - Examples: - >>> track_this(step=0, my_int_param=123, my_float_param=123.45, my_str_param='hello world') - >>> track_this(step=1, my_int_param=456, my_float_param=456.78, my_str_param='goodbye world') - """ - prefix = defaults.TRACK_PREFIX - - for key, value in kwargs.items(): - logger.info(f"Tracking {key} with value: {value}") - - if isinstance(value, BaseModel): - value = value.model_dump(by_alias=True) - - os.environ[prefix + key + f"{defaults.STEP_INDICATOR}{step}"] = json.dumps(value) - - -@check_context -def set_parameter(**kwargs) -> None: - """ - Store a set of parameters. - - !!! note - The parameters are not stored in run log at this point in time. - They are collected now and stored in the run log after completion of the task. - - Parameters: - **kwargs (dict): A dictionary of key-value pairs to store as parameters. - - Returns: - None - - Examples: - >>> set_parameter(my_int_param=123, my_float_param=123.45, my_bool_param=True, my_str_param='hello world') - >>> get_parameter('my_int_param', int) - 123 - >>> get_parameter('my_float_param', float) - 123.45 - >>> get_parameter('my_bool_param', bool) - True - >>> get_parameter('my_str_param', str) - 'hello world' - - >>> # Example of using Pydantic models - >>> class MyModel(BaseModel): - ... field1: str - ... field2: int - >>> set_parameter(my_model_param=MyModel(field1='value1', field2=2)) - >>> get_parameter('my_model_param', MyModel) - MyModel(field1='value1', field2=2) - - """ - parameters.set_user_defined_params_as_environment_variables(kwargs) - - -@overload -def get_parameter(key: str, cast_as: Optional[CastT]) -> CastT: - ... - - -@overload -def get_parameter(cast_as: Optional[CastT]) -> CastT: - ... - - -@check_context -def get_parameter(key: Optional[str] = None, cast_as: Optional[CastT] = None) -> Union[Dict[str, Any], CastT]: - """ - Get a parameter by its key. - If the key is not provided, all parameters will be returned. - - cast_as is not required for JSON supported type (int, float, bool, str). - For complex nested parameters, cast_as could package them into a pydantic model. - If cast_as is not provided, the type will remain as dict for nested structures. - - Note that the cast_as pydantic model is the class, not an instance. - - Args: - key (str, optional): The key of the parameter to retrieve. If not provided, all parameters will be returned. - cast_as (Type, optional): The type to cast the parameter to. If not provided, the type will remain as it is - for simple data types (int, float, bool, str). For nested parameters, it would be a dict. - - Raises: - Exception: If the parameter does not exist and key is not provided. - ValidationError: If the parameter cannot be cast as pydantic model, when cast_as is provided. - - Examples: - >>> get_parameter('my_int_param', int) - 123 - >>> get_parameter('my_float_param', float) - 123.45 - >>> get_parameter('my_bool_param', bool) - True - >>> get_parameter('my_str_param', str) - 'hello world' - >>> get_parameter('my_model_param', MyModel) - MyModel(field1='value1', field2=2) - >>> get_parameter(cast_as=MyModel) - MyModel(field1='value1', field2=2) - - """ - params = parameters.get_user_set_parameters(remove=False) - - if not key: - # Return all parameters - return cast(CastT, parameters.cast_parameters_as_type(params, cast_as)) # type: ignore - - if key not in params: - raise Exception(f"Parameter {key} is not set before") - - # Return the parameter value, casted as asked. - return cast(CastT, parameters.cast_parameters_as_type(params[key], cast_as)) # type: ignore - - -@check_context -def get_secret(secret_name: str) -> str: - """ - Retrieve a secret from the secret store. - - Args: - secret_name (str): The name of the secret to retrieve. - - Raises: - SecretNotFoundError: If the secret does not exist in the store. - - Returns: - str: The secret value. - """ - secrets_handler = context.run_context.secrets_handler - try: - return secrets_handler.get(name=secret_name) - except exceptions.SecretNotFoundError: - logger.exception(f"No secret by the name {secret_name} found in the store") - raise - - -@check_context -def get_from_catalog(name: str, destination_folder: str = ""): - """ - Get data from the catalog. - - The name can be a wildcard pattern following globing rules. - - Args: - name (str): The name of the data catalog entry. - destination_folder (str, optional): The destination folder to download the data to. - If not provided, the default destination folder set in the catalog will be used. - """ - if not destination_folder: - destination_folder = context.run_context.catalog_handler.compute_data_folder - - data_catalog = context.run_context.catalog_handler.get( - name, - run_id=context.run_context.run_id, - ) - - if context.run_context.executor._context_step_log: - context.run_context.executor._context_step_log.add_data_catalogs(data_catalog) - else: - logger.warning("Step log context was not found during interaction! The step log will miss the record") - - -@check_context -def put_in_catalog(filepath: str): - """ - Add a file or folder to the data catalog. - You can use wild cards following globing rules. - - Args: - filepath (str): The path to the file or folder added to the catalog - """ - - data_catalog = context.run_context.catalog_handler.put( - filepath, - run_id=context.run_context.run_id, - ) - if not data_catalog: - logger.warning(f"No catalog was done by the {filepath}") - - if context.run_context.executor._context_step_log: - context.run_context.executor._context_step_log.add_data_catalogs(data_catalog) - else: - logger.warning("Step log context was not found during interaction! The step log will miss the record") - - -@check_context -def put_object(data: Any, name: str): - """ - Serialize and store a python object in the data catalog. - - This function behaves the same as `put_in_catalog` - but with python objects. - - Args: - data (Any): The python data object to store. - name (str): The name to store it against. - """ - native_pickler = pickler.NativePickler() - - native_pickler.dump(data=data, path=name) - put_in_catalog(f"{name}{native_pickler.extension}") - - # Remove the file - os.remove(f"{name}{native_pickler.extension}") - - -@check_context -def get_object(name: str) -> Any: - """ - Retrieve and deserialize a python object from the data catalog. - - This function behaves the same as `get_from_catalog` but with - python objects. - - Returns: - Any : The object - """ - native_pickler = pickler.NativePickler() - - get_from_catalog(name=f"{name}{native_pickler.extension}", destination_folder=".") - - try: - data = native_pickler.load(name) - - # Remove the file - os.remove(f"{name}{native_pickler.extension}") - return data - except FileNotFoundError as e: - msg = f"No object by the name {name} has been put in the catalog before." - logger.exception(msg) - raise e - - -@check_context -def get_run_id() -> str: - """ - Returns the run_id of the current run. - - You can also access this from the environment variable `MAGNUS_RUN_ID`. - """ - return context.run_context.run_id - - -@check_context -def get_run_log() -> RunLog: - """ - Returns the run_log of the current run. - - The return is a deep copy of the run log to prevent any modification. - """ - return context.run_context.run_log_store.get_run_log_by_id( - context.run_context.run_id, - full=True, - ).copy(deep=True) - - -@check_context -def get_tag() -> str: - """ - Returns the tag from the environment. - - Returns: - str: The tag if provided for the run, otherwise None - """ - return context.run_context.tag - - -@check_context -def get_experiment_tracker_context() -> ContextManager: - """ - Return a context session of the experiment tracker. - - You can start to use the context with the python ```with``` statement. - """ - experiment_tracker = context.run_context.experiment_tracker - return experiment_tracker.client_context - - -def start_interactive_session(run_id: str = "", config_file: str = "", tag: str = "", parameters_file: str = ""): - """ - During interactive python coding, either via notebooks or ipython, you can start a magnus session by calling - this function. The executor would always be local executor as its interactive. - - If this was called during a pipeline/function/notebook execution, it will be ignored. - - Args: - run_id (str, optional): The run id to use. Defaults to "" and would be created if not provided. - config_file (str, optional): The configuration file to use. Defaults to "" and magnus defaults. - tag (str, optional): The tag to attach to the run. Defaults to "". - parameters_file (str, optional): The parameters file to use. Defaults to "". - """ - - from magnus import entrypoints, graph # pylint: disable=import-outside-toplevel - - if context.run_context.executor: - logger.warn("This is not an interactive session or a session has already been activated.") - return - - run_id = utils.generate_run_id(run_id=run_id) - context.run_context = entrypoints.prepare_configurations( - configuration_file=config_file, - run_id=run_id, - tag=tag, - parameters_file=parameters_file, - force_local_executor=True, - ) - - executor = context.run_context.executor - - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=config_file, tag=tag) - - context.run_context.execution_plan = defaults.EXECUTION_PLAN.INTERACTIVE.value - executor.prepare_for_graph_execution() - step_config = { - "command": "interactive", - "command_type": "python", - "type": "task", - "next": "success", - } - - node = graph.create_node(name="interactive", step_config=step_config) - step_log = context.run_context.run_log_store.create_step_log("interactive", node._get_step_log_name()) - executor.add_code_identities(node=node, step_log=step_log) - - step_log.step_type = node.node_type - step_log.status = defaults.PROCESSING - executor._context_step_log = step_log - - -def end_interactive_session(): - """ - Ends an interactive session. - - Does nothing if the executor is not interactive. - """ - - if not context.run_context.executor: - logger.warn("There is no active session in play, doing nothing!") - return - - if context.run_context.execution_plan != defaults.EXECUTION_PLAN.INTERACTIVE.value: - logger.warn("There is not an interactive session, doing nothing!") - return - - tracked_data = utils.get_tracked_data() - set_parameters = parameters.get_user_set_parameters(remove=True) - - step_log = cast(StepLog, context.run_context.executor._context_step_log) - step_log.user_defined_metrics = tracked_data - context.run_context.run_log_store.add_step_log(step_log, context.run_context.run_id) - - context.run_context.run_log_store.set_parameters(context.run_context.run_id, set_parameters) - - context.run_context.executor._context_step_log = None - context.run_context.execution_plan = "" - context.run_context.executor = None # type: ignore diff --git a/magnus/names.py b/magnus/names.py deleted file mode 100644 index 9e03b1e1..00000000 --- a/magnus/names.py +++ /dev/null @@ -1,708 +0,0 @@ -import random - -left = [ - "descent", - "citric", - "grating", - "glossy", - "undecidable", - "senile", - "dark", - "rude", - "tart", - "rubbery", - "internal", - "primordial", - "timid", - "trite", - "inverted", - "rancid", - "finite", - "hidden", - "grouchy", - "local", - "affable", - "vintage", - "online", - "grilled", - "absolute", - "chocolaty", - "fermented", - "obnoxious", - "sensitive", - "exponential", - "cheerful", - "persistent", - "spicy", - "humble", - "mean", - "ternary", - "acidic", - "novel", - "inverse", - "shortest", - "grizzled", - "honest", - "symmetric", - "jumbo", - "naive", - "recursive", - "serious", - "rowdy", - "offline", - "quick", - "parallel", - "stern", - "decidable", - "pureed", - "nutty", - "brute", - "damaged", - "gentle", - "deterministic", - "ripe", - "piquant", - "poached", - "unsolvable", - "abstract", - "sticky", - "gritty", - "dynamic", - "candied", - "loyal", - "flavorful", - "visible", - "zesty", - "approximate", - "icy", - "immature", - "bitter", - "stable", - "threaded", - "juicy", - "baked", - "hushed", - "frozen", - "selfish", - "planar", - "clever", - "shabby", - "extended", - "concave", - "pleasant", - "perfect", - "sugary", - "patient", - "unsorted", - "odious", - "piercing", - "careful", - "tattered", - "ferocious", - "crunchy", - "toasted", - "forgiving", - "heartless", - "sleek", - "undirected", - "optimal", - "tractable", - "sharp", - "eager", - "plain", - "shrewd", - "maximum", - "pounded", - "dull", - "zingy", - "counting", - "sophisticated", - "contemporary", - "proud", - "yummy", - "radiant", - "religious", - "glowing", - "messy", - "external", - "balanced", - "new", - "prepared", - "refined", - "grim", - "syrupy", - "graceful", - "annoying", - "tender", - "blazing", - "noisy", - "delicious", - "matte", - "witty", - "polite", - "nearest", - "helpful", - "thundering", - "adventurous", - "milky", - "mute", - "ordered", - "blended", - "pallid", - "ascent", - "roaring", - "brave", - "curious", - "devout", - "energetic", - "burning", - "merciless", - "orthogonal", - "juvenile", - "accepting", - "topped", - "lean", - "greasy", - "deafening", - "reduced", - "obliging", - "null", - "rank", - "shiny", - "forward", - "boolean", - "partial", - "current", - "brilliant", - "bland", - "violent", - "amiable", - "loud", - "savage", - "bright", - "threadbare", - "minimum", - "creamy", - "doughy", - "mild", - "wise", - "urbane", - "greedy", - "genteel", - "sweet", - "fresh", - "sunny", - "linear", - "uniform", - "cheesy", - "cold", - "cyclic", - "obsolete", - "calm", - "augmenting", - "asymptotic", - "tough", - "proper", - "quiet", - "bounded", - "rich", - "complete", - "archaic", - "seasoned", - "intractable", - "light", - "funny", - "muffled", - "silly", - "clean", - "edible", - "vicious", - "dyadic", - "simple", - "smoky", - "caramelized", - "fixed", - "excited", - "recent", - "cautious", - "nervous", - "muted", - "trusting", - "mode", - "oriented", - "savory", - "active", - "young", - "amortized", - "ambitious", - "meek", - "ragged", - "terminal", - "blaring", - "factorial", - "relaxed", - "mashed", - "weighted", - "rectilinear", - "warm", - "cruel", - "organic", - "faded", - "generous", - "lazy", - "worn", - "fried", - "isomorphic", - "booming", - "nutritious", - "median", - "colorful", - "tossed", - "briny", - "lower", - "exact", - "oscillating", - "friendly", - "nondeterministic", - "humane", - "sour", - "happy", - "creative", - "marinated", - "dry", - "simmered", - "strong", - "connected", - "free", - "amicable", - "exhaustive", - "vibrant", - "indulgent", - "sparse", - "swarm", - "quadratic", - "jellied", - "courtly", - "independent", - "salty", - "faint", - "adaptive", - "antique", - "polynomial", - "saucy", - "randomized", - "binary", - "average", - "cloying", - "frayed", - "objective", - "blocking", - "steamed", - "feasible", - "random", - "stale", - "braised", - "gourmet", - "chalky", - "moist", - "formal", - "brute force", - "ancient", - "wan", - "square", - "matching", - "smoked", - "unary", - "covering", - "kind", - "modern", - "tense", - "old", - "associative", - "daring", - "spatial", - "constant", - "stringy", - "concurrent", - "inventive", - "brutal", - "bipartite", - "charitable", - "mature", - "universal", - "stubborn", - "best", - "callous", - "commutative", - "advanced", - "vain", - "neat", - "soft", - "flat", - "golden", - "oily", - "merry", - "intense", - "succulent", - "pale", - "drab", - "impulsive", - "crispy", - "lenient", - "raw", - "tangy", - "inclusive", - "minty", - "acyclic", - "smart", - "chewy", - "bold", - "aged", - "vivid", - "silent", - "weathered", - "respectful", - "buttery", - "hoary", - "elegant", -] - -right = [ - # Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB - "albattani", - # Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen - "allen", - # June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida - "almeida", - # Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi - "agnesi", - # Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes - "archimedes", - # Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli - "ardinghelli", - # Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata - "aryabhata", - # Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin - "austin", - # Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. - "babbage", - # Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach - "banach", - # John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen - "bardeen", - # Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik - "bartik", - # Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi - "bassi", - # Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver - "beaver", - # Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell - "bell", - # Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz - "benz", - # Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as 'father of Indian nuclear programme'- https://en.wikipedia.org/wiki/Homi_J._Bhabha - "bhabha", - # Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus - "bhaskara", - # Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell - "blackwell", - # Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. - "bohr", - # Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth - "booth", - # Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg - "borg", - # Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose - "bose", - # Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville - "boyd", - # Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero - "brahmagupta", - # Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain - "brattain", - # Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) - "brown", - # Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson - "carson", - # Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar - "chandrasekhar", - # Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) - "shannon", - # Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke - "clarke", - # Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden - "colden", - # Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori - "cori", - # Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray - "cray", - # This entry reflects a husband and wife team who worked together: - # Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran - # Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran - "curran", - # Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. - "curie", - # Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. - "darwin", - # Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. - "davinci", - # Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. - "dijkstra", - # Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky - "dubinsky", - # Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley - "easley", - # Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison - "edison", - # Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein - "einstein", - # Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion - "elion", - # Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart - "engelbart", - # Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid - "euclid", - # Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler - "euler", - # Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat - "fermat", - # Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. - "fermi", - # Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman - "feynman", - # Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. - "franklin", - # Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei - "galileo", - # William Henry 'Bill' Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates - "gates", - # Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) - "goldberg", - # Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine - "goldstine", - # Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser - "goldwasser", - # James Golick, all around gangster. - "golick", - # Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall - "goodall", - # Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt - "haibt", - # Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) - "hamilton", - # Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking - "hawking", - # Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg - "heisenberg", - # Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann - "hermann", - # Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD - "heyrovsky", - # Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin - "hodgkin", - # Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover - "hoover", - # Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term 'debugging' for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper - "hopper", - # Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle - "hugle", - # Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia - "hypatia", - # Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) - "jackson", - # Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil - "jang", - # Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik - "jennings", - # Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen - "jepsen", - # Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson - "johnson", - # Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie - "joliot", - # Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones - "jones", - # A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam - "kalam", - # Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare - "kare", - # Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller - "keller", - # Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler - "kepler", - # Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana - "khorana", - # Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby - "kilby", - # Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch - "kirch", - # Donald Knuth - American computer scientist, author of 'The Art of Computer Programming' and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth - "knuth", - # Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya - "kowalevski", - # Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande - "lalande", - # Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr - "lamarr", - # Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport - "lamport", - # Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey - "leakey", - # Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt - "leavitt", - # Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin - "lewin", - # Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum - "lichterman", - # Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov - "liskov", - # Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) - "lovelace", - # Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re - "lumiere", - # Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) - "mahavira", - # Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer - "mayer", - # John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) - "mccarthy", - # Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock - "mcclintock", - # Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean - "mclean", - # Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli - "mcnulty", - # Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner - "meitner", - # Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky - "meninsky", - # Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf - "mestorf", - # Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky - "minsky", - # Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani - "mirzakhani", - # Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse - "morse", - # Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock - "murdock", - # John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture - "neumann", - # Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton - "newton", - # Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform - "nightingale", - # Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel - "nobel", - # Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether - "noether", - # Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 - "northcutt", - # Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce - "noyce", - # Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems - "panini", - # Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 - "pare", - # Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. - "pasteur", - # Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin - "payne", - # Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman - "perlman", - # Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike - "pike", - # Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 - "poincare", - # Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras - "poitras", - # Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy - "ptolemy", - # C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman - "raman", - # Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan - "ramanujan", - # Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride - "ride", - # Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) - "montalcini", - # Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie - "ritchie", - # Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen - "roentgen", - # Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin - "rosalind", - # Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha - "saha", - # Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet - "sammet", - # Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) - "shaw", - # Dame Stephanie 'Steve' Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley - "shirley", - # William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley - "shockley", - # Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi - "sinoussi", - # Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton - "snyder", - # Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence - "spence", - # Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman - "stallman", - # Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker - "stonebraker", - # Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson - "swanson", - # Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz - "swartz", - # Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles - "swirles", - # Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla - "tesla", - # Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson - "thompson", - # Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds - "torvalds", - # Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. - "turing", - # Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions - "varahamihira", - # Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya - "visvesvaraya", - # Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard - "volhard", - # Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer - "wescoff", - # Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles - "wiles", - # Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams - "williams", - # Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson - "wilson", - # Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing - "wing", - # Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak - "wozniak", - # The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers - "wright", - # Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow - "yalow", - # Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath - "yonath", -] - - -def get_random_name(sep="-") -> str: - """ - Returns a random name in the format of docker container names. - - Args: - sep (str, optional): The seperator to use between the names. Defaults to '-'. - - Returns: - str: The random name. - """ - r = random.SystemRandom() - name = "%s%s%s" % (r.choice(left), sep, r.choice(right)) - return name diff --git a/magnus/tasks.py b/magnus/tasks.py deleted file mode 100644 index 6fee16e9..00000000 --- a/magnus/tasks.py +++ /dev/null @@ -1,392 +0,0 @@ -import ast -import contextlib -import importlib -import io -import json -import logging -import os -import subprocess -import sys -from typing import Any, Dict, Tuple - -from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator -from pydantic._internal._model_construction import ModelMetaclass -from stevedore import driver - -import magnus.context as context -from magnus import defaults, parameters, utils -from magnus.defaults import TypeMapVariable - -logger = logging.getLogger(defaults.LOGGER_NAME) -logging.getLogger("stevedore").setLevel(logging.CRITICAL) - - -# TODO: Can we add memory peak, cpu usage, etc. to the metrics? - - -class BaseTaskType(BaseModel): - """A base task class which does the execution of command defined by the user.""" - - task_type: str = Field(serialization_alias="command_type") - node_name: str = Field(exclude=True) - - model_config = ConfigDict(extra="forbid") - - @property - def _context(self): - return context.run_context - - def get_cli_options(self) -> Tuple[str, dict]: - """ - Key is the name of the cli option and value is the value of the cli option. - This should always be in sync with the cli options defined in execute_*. - - Returns: - str: The name of the cli option. - dict: The dict of cli options for the task. - - Raises: - NotImplementedError: Base class, not implemented - """ - raise NotImplementedError() - - def _get_parameters(self, map_variable: TypeMapVariable = None, **kwargs) -> Dict[str, Any]: - """ - By this step, all the parameters are present as environment variables as json strings. - Return the parameters in scope for the execution. - - Args: - map_variable (dict, optional): If the command is part of map node, the value of map. Defaults to None. - - Returns: - dict: The parameters dictionary in-scope for the task execution - """ - return parameters.get_user_set_parameters(remove=False) - - def execute_command(self, map_variable: TypeMapVariable = None, **kwargs): - """The function to execute the command. - - And map_variable is sent in as an argument into the function. - - Args: - map_variable (dict, optional): If the command is part of map node, the value of map. Defaults to None. - - Raises: - NotImplementedError: Base class, not implemented - """ - raise NotImplementedError() - - def _set_parameters(self, params: BaseModel, **kwargs): - """Set the parameters back to the environment variables. - - Args: - parameters (dict, optional): The parameters to set back as env variables. Defaults to None. - """ - # Nothing to do - if not params: - return - - if not isinstance(params, BaseModel) or isinstance(params, ModelMetaclass): - raise ValueError("Output variable of a function can only be a pydantic model or dynamic model.") - - parameters.set_user_defined_params_as_environment_variables(params.model_dump(by_alias=True)) - - @contextlib.contextmanager - def output_to_file(self, map_variable: TypeMapVariable = None): - """Context manager to put the output of a function execution to catalog. - - Args: - map_variable (dict, optional): If the command is part of map node, the value of map. Defaults to None. - - """ - from magnus import put_in_catalog # Causing cyclic imports - - log_file_name = self.node_name.replace(" ", "_") + ".execution.log" - if map_variable: - for _, value in map_variable.items(): - log_file_name += "_" + str(value) - - log_file = open(log_file_name, "w") - - f = io.StringIO() - try: - with contextlib.redirect_stdout(f): - yield - finally: - print(f.getvalue()) # print to console - log_file.write(f.getvalue()) # Print to file - - f.close() - log_file.close() - put_in_catalog(log_file.name) - os.remove(log_file.name) - - -class EasyModel(BaseModel): - model_config = ConfigDict(extra="allow") - - -def make_pydantic_model( - variables: Dict[str, Any], - prefix: str = "", -) -> BaseModel: - prefix_removed = {utils.remove_prefix(k, prefix): v for k, v in variables.items()} - return EasyModel(**prefix_removed) - - -class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods - """The task class for python command.""" - - task_type: str = Field(default="python", serialization_alias="command_type") - command: str - - @field_validator("command") - @classmethod - def validate_command(cls, command: str): - if not command: - raise Exception("Command cannot be empty for shell task") - - return command - - def get_cli_options(self) -> Tuple[str, dict]: - """Return the cli options for the task. - - Returns: - dict: The cli options for the task - """ - return "function", {"command": self.command} - - def execute_command(self, map_variable: TypeMapVariable = None, **kwargs): - """Execute the notebook as defined by the command.""" - module, func = utils.get_module_and_attr_names(self.command) - sys.path.insert(0, os.getcwd()) # Need to add the current directory to path - imported_module = importlib.import_module(module) - f = getattr(imported_module, func) - - params = self._get_parameters() - filtered_parameters = parameters.filter_arguments_for_func(f, params, map_variable) - - if map_variable: - os.environ[defaults.MAP_VARIABLE] = json.dumps(map_variable) - - logger.info(f"Calling {func} from {module} with {filtered_parameters}") - - with self.output_to_file(map_variable=map_variable) as _: - try: - user_set_parameters = f(**filtered_parameters) - except Exception as _e: - msg = f"Call to the function {self.command} with {filtered_parameters} did not succeed.\n" - logger.exception(msg) - logger.exception(_e) - raise - - if map_variable: - del os.environ[defaults.MAP_VARIABLE] - - self._set_parameters(user_set_parameters) - - -class NotebookTaskType(BaseTaskType): - """The task class for Notebook based execution.""" - - task_type: str = Field(default="notebook", serialization_alias="command_type") - command: str - notebook_output_path: str = Field(default="", validate_default=True) - output_cell_tag: str = Field(default="magnus_output", validate_default=True) - optional_ploomber_args: dict = {} - - _output_tag: str = "magnus_output" - - @field_validator("command") - @classmethod - def notebook_should_end_with_ipynb(cls, command: str): - if not command: - raise Exception("Command should point to the ipynb file") - - if not command.endswith(".ipynb"): - raise Exception("Notebook task should point to a ipynb file") - - return command - - @field_validator("notebook_output_path") - @classmethod - def correct_notebook_output_path(cls, notebook_output_path: str, info: ValidationInfo): - if notebook_output_path: - return notebook_output_path - - command = info.data["command"] - return "".join(command.split(".")[:-1]) + "_out.ipynb" - - def get_cli_options(self) -> Tuple[str, dict]: - return "notebook", {"command": self.command, "notebook-output-path": self.notebook_output_path} - - def _parse_notebook_for_output(self, notebook: Any): - collected_params = {} - - for cell in notebook.cells: - d = cell.dict() - # identify the tags attached to the cell. - tags = d.get("metadata", {}).get("tags", {}) - if self.output_cell_tag in tags: - # There is a tag that has output - outputs = d["outputs"] - - for out in outputs: - params = out.get("text", "{}") - collected_params.update(ast.literal_eval(params)) - - return collected_params - - def execute_command(self, map_variable: TypeMapVariable = None, **kwargs): - """Execute the python notebook as defined by the command. - - Args: - map_variable (dict, optional): If the node is part of internal branch. Defaults to None. - - Raises: - ImportError: If necessary dependencies are not installed - Exception: If anything else fails - """ - try: - import ploomber_engine as pm - - from magnus import put_in_catalog # Causes issues with cyclic import - - parameters = self._get_parameters() - filtered_parameters = parameters - - notebook_output_path = self.notebook_output_path - - if map_variable: - os.environ[defaults.MAP_VARIABLE] = json.dumps(map_variable) - - for _, value in map_variable.items(): - notebook_output_path += "_" + str(value) - - ploomber_optional_args = self.optional_ploomber_args - - kwds = { - "input_path": self.command, - "output_path": notebook_output_path, - "parameters": filtered_parameters, - "log_output": True, - "progress_bar": False, - } - kwds.update(ploomber_optional_args) - - collected_params: Dict[str, Any] = {} - with self.output_to_file(map_variable=map_variable) as _: - out = pm.execute_notebook(**kwds) - collected_params = self._parse_notebook_for_output(out) - - collected_params_model = make_pydantic_model(collected_params) - self._set_parameters(collected_params_model) - - put_in_catalog(notebook_output_path) - if map_variable: - del os.environ[defaults.MAP_VARIABLE] - - except ImportError as e: - msg = ( - "Task type of notebook requires ploomber engine to be installed. Please install via optional: notebook" - ) - raise Exception(msg) from e - - -class ShellTaskType(BaseTaskType): - """ - The task class for shell based commands. - """ - - task_type: str = Field(default="shell", serialization_alias="command_type") - command: str - - @field_validator("command") - @classmethod - def validate_command(cls, command: str): - if not command: - raise Exception("Command cannot be empty for shell task") - - return command - - def execute_command(self, map_variable: TypeMapVariable = None, **kwargs): - # Using shell=True as we want to have chained commands to be executed in the same shell. - """Execute the shell command as defined by the command. - - Args: - map_variable (dict, optional): If the node is part of an internal branch. Defaults to None. - """ - subprocess_env = os.environ.copy() - - if map_variable: - subprocess_env[defaults.MAP_VARIABLE] = json.dumps(map_variable) - - command = self.command.strip() + " && env | grep MAGNUS" - logger.info(f"Executing shell command: {command}") - - output_parameters = {} - - with subprocess.Popen( - command, - shell=True, - env=subprocess_env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - ) as proc, self.output_to_file(map_variable=map_variable) as _: - for line in proc.stdout: # type: ignore - logger.info(line) - print(line) - - if line.startswith(defaults.PARAMETER_PREFIX): - key, value = line.strip().split("=", 1) - try: - output_parameters[key] = json.loads(value) - except json.JSONDecodeError: - output_parameters[key] = value # simple data types - - if line.startswith(defaults.TRACK_PREFIX): - key, value = line.split("=", 1) - os.environ[key] = value.strip() - - proc.wait() - if proc.returncode != 0: - raise Exception("Command failed") - - self._set_parameters( - params=make_pydantic_model( - output_parameters, - defaults.PARAMETER_PREFIX, - ) - ) - - -def create_task(kwargs_for_init) -> BaseTaskType: - """ - Creates a task object from the command configuration. - - Args: - A dictionary of keyword arguments that are sent by the user to the task. - Check against the model class for the validity of it. - - Returns: - tasks.BaseTaskType: The command object - """ - # The dictionary cannot be modified - kwargs = kwargs_for_init.copy() - command_type = kwargs.pop("command_type", defaults.COMMAND_TYPE) - - try: - task_mgr = driver.DriverManager( - namespace="tasks", - name=command_type, - invoke_on_load=True, - invoke_kwds=kwargs, - ) - return task_mgr.driver - except Exception as _e: - msg = ( - f"Could not find the task type {command_type}. Please ensure you have installed " - "the extension that provides the node type." - ) - raise Exception(msg) from _e diff --git a/mkdocs.yml b/mkdocs.yml index c587c87e..2c857868 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,14 +1,14 @@ -site_name: Magnus +site_name: runnable site_description: "Pipelines made easy" -strict: true -repo_url: https://github.com/AstraZeneca/magnus-core +strict: false +repo_url: https://github.com/vijayvammi/runnable # TODO: Set up versioning docs_dir: "docs" theme: - logo: assets/logo.png - favicon: assets/favicon.png + logo: assets/sport.png + favicon: assets/sport.png name: material font: code: Roboto Mono @@ -37,8 +37,8 @@ theme: - content.tabs.link - header.autohide - navigation.top - - toc.integrate - toc.follow + - toc.integrate - search.suggest - navigation.tabs - navigation.tabs.sticky @@ -47,6 +47,8 @@ theme: - navigation.instant - navigation.instant.progress - navigation.tracking + - navigation.footer + # - navigation.indexes # Extensions markdown_extensions: @@ -108,26 +110,25 @@ extra_css: - css/extra.css nav: - - "Magnus": + - "runnable": - "Introduction": "index.md" - "Usage": "usage.md" - - "Example": - - "Pipeline Definition": "example/example.md" - - "Steps": "example/steps.md" - - "Flow of data": "example/dataflow.md" - - "Reproducibility": "example/reproducibility.md" - - "Experiment tracking": "example/experiment-tracking.md" - - "Secrets": "example/secrets.md" - - "Retry failures": "example/retry-after-failure.md" - - "Why magnus?": "why-magnus.md" - - "Concepts": + # - "Example": + # - "Pipeline Definition": "example/example.md" + # - "Steps": "example/steps.md" + # - "Flow of data": "example/dataflow.md" + # - "Reproducibility": "example/reproducibility.md" + # - "Experiment tracking": "example/experiment-tracking.md" + # - "Secrets": "example/secrets.md" + # - "Retry failures": "example/retry-after-failure.md" + - "Why runnable?": "why-runnable.md" + - "Define pipeline": - "tl;dr": "concepts/the-big-picture.md" - "Pipeline": "concepts/pipeline.md" - - "Executor": "concepts/executor.md" - - "Parameters": "concepts/parameters.md" - - "Run log": "concepts/run-log.md" + # - "Executor": "concepts/executor.md" + #- "Parameters": "concepts/parameters.md" - "Catalog": "concepts/catalog.md" - - "Experiment tracking": "concepts/experiment-tracking.md" + # - "Experiment tracking": "concepts/experiment-tracking.md" - "Secrets": "concepts/secrets.md" - "Nodes": - "Stub": "concepts/stub.md" @@ -135,19 +136,20 @@ nav: - "Parallel": "concepts/parallel.md" - "Map": "concepts/map.md" - "Nesting": "concepts/nesting.md" - - "Configurations": + - "Reproducibility": "concepts/run-log.md" + - "Run pipeline": - "Overview": "configurations/overview.md" - "Executor": - "local": "configurations/executors/local.md" - "mocked": "configurations/executors/mocked.md" + - "retry": "configurations/executors/mocked.md" - "local-container": "configurations/executors/local-container.md" - "argo workflows": "configurations/executors/argo.md" - "container environments": "configurations/executors/container-environments.md" - "Run log": "configurations/run-log.md" - "Catalog": "configurations/catalog.md" - "Secrets": "configurations/secrets.md" - - "Experiment tracking": "configurations/experiment-tracking.md" - - "Python API": "interactions.md" + # - "Experiment tracking": "configurations/experiment-tracking.md" - "Python SDK": "sdk.md" - "Extensions": "extensions.md" - - "Roadmap": "roadmap.md" + # - "Roadmap": "roadmap.md" diff --git a/poetry.lock b/poetry.lock index a7c028eb..b0b7bab5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,16 @@ # This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +[[package]] +name = "absl-py" +version = "2.1.0" +description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." +optional = false +python-versions = ">=3.7" +files = [ + {file = "absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff"}, + {file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"}, +] + [[package]] name = "altgraph" version = "0.17.4" @@ -22,20 +33,6 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - -[[package]] -name = "appnope" -version = "0.1.3" -description = "Disable App Nap on macOS >= 10.9" -optional = false -python-versions = "*" -files = [ - {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, - {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, -] - [[package]] name = "arrow" version = "1.2.3" @@ -68,6 +65,21 @@ six = ">=1.12.0" astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] +[[package]] +name = "astunparse" +version = "1.6.3" +description = "An AST unparser for Python" +optional = false +python-versions = "*" +files = [ + {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, + {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, +] + +[package.dependencies] +six = ">=1.6.1,<2.0" +wheel = ">=0.23.0,<1.0" + [[package]] name = "attrs" version = "23.2.0" @@ -98,23 +110,9 @@ files = [ {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"}, ] -[package.dependencies] -pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} - [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] -[[package]] -name = "backcall" -version = "0.2.0" -description = "Specifications for callback functions passed in to an API" -optional = false -python-versions = "*" -files = [ - {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, - {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, -] - [[package]] name = "backoff" version = "2.2.1" @@ -211,15 +209,72 @@ webencodings = "*" [package.extras] css = ["tinycss2 (>=1.1.0,<1.3)"] +[[package]] +name = "blis" +version = "0.7.11" +description = "The Blis BLAS-like linear algebra library, as a self-contained C-extension." +optional = false +python-versions = "*" +files = [ + {file = "blis-0.7.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd5fba34c5775e4c440d80e4dea8acb40e2d3855b546e07c4e21fad8f972404c"}, + {file = "blis-0.7.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:31273d9086cab9c56986d478e3ed6da6752fa4cdd0f7b5e8e5db30827912d90d"}, + {file = "blis-0.7.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d06883f83d4c8de8264154f7c4a420b4af323050ed07398c1ff201c34c25c0d2"}, + {file = "blis-0.7.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee493683e3043650d4413d531e79e580d28a3c7bdd184f1b9cfa565497bda1e7"}, + {file = "blis-0.7.11-cp310-cp310-win_amd64.whl", hash = "sha256:a73945a9d635eea528bccfdfcaa59dd35bd5f82a4a40d5ca31f08f507f3a6f81"}, + {file = "blis-0.7.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1b68df4d01d62f9adaef3dad6f96418787265a6878891fc4e0fabafd6d02afba"}, + {file = "blis-0.7.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:162e60d941a8151418d558a94ee5547cb1bbeed9f26b3b6f89ec9243f111a201"}, + {file = "blis-0.7.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:686a7d0111d5ba727cd62f374748952fd6eb74701b18177f525b16209a253c01"}, + {file = "blis-0.7.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0421d6e44cda202b113a34761f9a062b53f8c2ae8e4ec8325a76e709fca93b6e"}, + {file = "blis-0.7.11-cp311-cp311-win_amd64.whl", hash = "sha256:0dc9dcb3843045b6b8b00432409fd5ee96b8344a324e031bfec7303838c41a1a"}, + {file = "blis-0.7.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dadf8713ea51d91444d14ad4104a5493fa7ecc401bbb5f4a203ff6448fadb113"}, + {file = "blis-0.7.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5bcdaf370f03adaf4171d6405a89fa66cb3c09399d75fc02e1230a78cd2759e4"}, + {file = "blis-0.7.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7de19264b1d49a178bf8035406d0ae77831f3bfaa3ce02942964a81a202abb03"}, + {file = "blis-0.7.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea55c6a4a60fcbf6a0fdce40df6e254451ce636988323a34b9c94b583fc11e5"}, + {file = "blis-0.7.11-cp312-cp312-win_amd64.whl", hash = "sha256:5a305dbfc96d202a20d0edd6edf74a406b7e1404f4fa4397d24c68454e60b1b4"}, + {file = "blis-0.7.11-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:68544a1cbc3564db7ba54d2bf8988356b8c7acd025966e8e9313561b19f0fe2e"}, + {file = "blis-0.7.11-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:075431b13b9dd7b411894d4afbd4212acf4d0f56c5a20628f4b34902e90225f1"}, + {file = "blis-0.7.11-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:324fdf62af9075831aa62b51481960e8465674b7723f977684e32af708bb7448"}, + {file = "blis-0.7.11-cp36-cp36m-win_amd64.whl", hash = "sha256:afebdb02d2dcf9059f23ce1244585d3ce7e95c02a77fd45a500e4a55b7b23583"}, + {file = "blis-0.7.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2e62cd14b20e960f21547fee01f3a0b2ac201034d819842865a667c969c355d1"}, + {file = "blis-0.7.11-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b01c05a5754edc0b9a3b69be52cbee03f645b2ec69651d12216ea83b8122f0"}, + {file = "blis-0.7.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfee5ec52ba1e9002311d9191f7129d7b0ecdff211e88536fb24c865d102b50d"}, + {file = "blis-0.7.11-cp37-cp37m-win_amd64.whl", hash = "sha256:844b6377e3e7f3a2e92e7333cc644095386548ad5a027fdc150122703c009956"}, + {file = "blis-0.7.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6df00c24128e323174cde5d80ebe3657df39615322098ce06613845433057614"}, + {file = "blis-0.7.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:809d1da1331108935bf06e22f3cf07ef73a41a572ecd81575bdedb67defe3465"}, + {file = "blis-0.7.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bfabd5272bbbe504702b8dfe30093653d278057656126716ff500d9c184b35a6"}, + {file = "blis-0.7.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca684f5c2f05269f17aefe7812360286e9a1cee3afb96d416485efd825dbcf19"}, + {file = "blis-0.7.11-cp38-cp38-win_amd64.whl", hash = "sha256:688a8b21d2521c2124ee8dfcbaf2c385981ccc27e313e052113d5db113e27d3b"}, + {file = "blis-0.7.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2ff7abd784033836b284ff9f4d0d7cb0737b7684daebb01a4c9fe145ffa5a31e"}, + {file = "blis-0.7.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9caffcd14795bfe52add95a0dd8426d44e737b55fcb69e2b797816f4da0b1d2"}, + {file = "blis-0.7.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fb36989ed61233cfd48915896802ee6d3d87882190000f8cfe0cf4a3819f9a8"}, + {file = "blis-0.7.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ea09f961871f880d5dc622dce6c370e4859559f0ead897ae9b20ddafd6b07a2"}, + {file = "blis-0.7.11-cp39-cp39-win_amd64.whl", hash = "sha256:5bb38adabbb22f69f22c74bad025a010ae3b14de711bf5c715353980869d491d"}, + {file = "blis-0.7.11.tar.gz", hash = "sha256:cec6d48f75f7ac328ae1b6fbb372dde8c8a57c89559172277f66e01ff08d4d42"}, +] + +[package.dependencies] +numpy = {version = ">=1.19.0", markers = "python_version >= \"3.9\""} + +[[package]] +name = "catalogue" +version = "2.0.10" +description = "Super lightweight function registries for your library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "catalogue-2.0.10-py3-none-any.whl", hash = "sha256:58c2de0020aa90f4a2da7dfad161bf7b3b054c86a5f09fcedc0b2b740c109a9f"}, + {file = "catalogue-2.0.10.tar.gz", hash = "sha256:4f56daa940913d3f09d589c191c74e5a6d51762b3a9e37dd53b7437afd6cda15"}, +] + [[package]] name = "certifi" -version = "2023.11.17" +version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] [[package]] @@ -427,6 +482,26 @@ click = ">=4.0" [package.extras] dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] +[[package]] +name = "cloudpathlib" +version = "0.16.0" +description = "pathlib-style classes for cloud storage services." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cloudpathlib-0.16.0-py3-none-any.whl", hash = "sha256:f46267556bf91f03db52b5df7a152548596a15aabca1c8731ef32b0b25a1a6a3"}, + {file = "cloudpathlib-0.16.0.tar.gz", hash = "sha256:cdfcd35d46d529587d744154a0bdf962aca953b725c8784cd2ec478354ea63a3"}, +] + +[package.dependencies] +typing_extensions = {version = ">4", markers = "python_version < \"3.11\""} + +[package.extras] +all = ["cloudpathlib[azure]", "cloudpathlib[gs]", "cloudpathlib[s3]"] +azure = ["azure-storage-blob (>=12)"] +gs = ["google-cloud-storage"] +s3 = ["boto3"] + [[package]] name = "cloudpickle" version = "3.0.0" @@ -460,65 +535,143 @@ files = [ {file = "commit_linter-1.0.3-py3-none-any.whl", hash = "sha256:404fc2adb7ad49fddc27b1fa4424c17bf41556dbaed8dc4c274ca1e104825fbb"}, ] +[[package]] +name = "confection" +version = "0.1.4" +description = "The sweetest config system for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "confection-0.1.4-py3-none-any.whl", hash = "sha256:a658818d004939069c3e2b3db74a2cb9d956a5e61a1c9ad61788e0ee09a7090f"}, + {file = "confection-0.1.4.tar.gz", hash = "sha256:e80f22fd008b5231a2e8852fac6de9e28f2276a04031d0536cff74fe4a990c8f"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<3.0.0" +srsly = ">=2.4.0,<3.0.0" + +[[package]] +name = "contourpy" +version = "1.2.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.9" +files = [ + {file = "contourpy-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0274c1cb63625972c0c007ab14dd9ba9e199c36ae1a231ce45d725cbcbfd10a8"}, + {file = "contourpy-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab459a1cbbf18e8698399c595a01f6dcc5c138220ca3ea9e7e6126232d102bb4"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fdd887f17c2f4572ce548461e4f96396681212d858cae7bd52ba3310bc6f00f"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d16edfc3fc09968e09ddffada434b3bf989bf4911535e04eada58469873e28e"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c203f617abc0dde5792beb586f827021069fb6d403d7f4d5c2b543d87edceb9"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b69303ceb2e4d4f146bf82fda78891ef7bcd80c41bf16bfca3d0d7eb545448aa"}, + {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:884c3f9d42d7218304bc74a8a7693d172685c84bd7ab2bab1ee567b769696df9"}, + {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4a1b1208102be6e851f20066bf0e7a96b7d48a07c9b0cfe6d0d4545c2f6cadab"}, + {file = "contourpy-1.2.0-cp310-cp310-win32.whl", hash = "sha256:34b9071c040d6fe45d9826cbbe3727d20d83f1b6110d219b83eb0e2a01d79488"}, + {file = "contourpy-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd2f1ae63998da104f16a8b788f685e55d65760cd1929518fd94cd682bf03e41"}, + {file = "contourpy-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd10c26b4eadae44783c45ad6655220426f971c61d9b239e6f7b16d5cdaaa727"}, + {file = "contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5c6b28956b7b232ae801406e529ad7b350d3f09a4fde958dfdf3c0520cdde0dd"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebeac59e9e1eb4b84940d076d9f9a6cec0064e241818bcb6e32124cc5c3e377a"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:139d8d2e1c1dd52d78682f505e980f592ba53c9f73bd6be102233e358b401063"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e9dc350fb4c58adc64df3e0703ab076f60aac06e67d48b3848c23647ae4310e"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18fc2b4ed8e4a8fe849d18dce4bd3c7ea637758c6343a1f2bae1e9bd4c9f4686"}, + {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:16a7380e943a6d52472096cb7ad5264ecee36ed60888e2a3d3814991a0107286"}, + {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8d8faf05be5ec8e02a4d86f616fc2a0322ff4a4ce26c0f09d9f7fb5330a35c95"}, + {file = "contourpy-1.2.0-cp311-cp311-win32.whl", hash = "sha256:67b7f17679fa62ec82b7e3e611c43a016b887bd64fb933b3ae8638583006c6d6"}, + {file = "contourpy-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:99ad97258985328b4f207a5e777c1b44a83bfe7cf1f87b99f9c11d4ee477c4de"}, + {file = "contourpy-1.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:575bcaf957a25d1194903a10bc9f316c136c19f24e0985a2b9b5608bdf5dbfe0"}, + {file = "contourpy-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9e6c93b5b2dbcedad20a2f18ec22cae47da0d705d454308063421a3b290d9ea4"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464b423bc2a009088f19bdf1f232299e8b6917963e2b7e1d277da5041f33a779"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68ce4788b7d93e47f84edd3f1f95acdcd142ae60bc0e5493bfd120683d2d4316"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7d1f8871998cdff5d2ff6a087e5e1780139abe2838e85b0b46b7ae6cc25399"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e739530c662a8d6d42c37c2ed52a6f0932c2d4a3e8c1f90692ad0ce1274abe0"}, + {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:247b9d16535acaa766d03037d8e8fb20866d054d3c7fbf6fd1f993f11fc60ca0"}, + {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:461e3ae84cd90b30f8d533f07d87c00379644205b1d33a5ea03381edc4b69431"}, + {file = "contourpy-1.2.0-cp312-cp312-win32.whl", hash = "sha256:1c2559d6cffc94890b0529ea7eeecc20d6fadc1539273aa27faf503eb4656d8f"}, + {file = "contourpy-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:491b1917afdd8638a05b611a56d46587d5a632cabead889a5440f7c638bc6ed9"}, + {file = "contourpy-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5fd1810973a375ca0e097dee059c407913ba35723b111df75671a1976efa04bc"}, + {file = "contourpy-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:999c71939aad2780f003979b25ac5b8f2df651dac7b38fb8ce6c46ba5abe6ae9"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7caf9b241464c404613512d5594a6e2ff0cc9cb5615c9475cc1d9b514218ae8"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:266270c6f6608340f6c9836a0fb9b367be61dde0c9a9a18d5ece97774105ff3e"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbd50d0a0539ae2e96e537553aff6d02c10ed165ef40c65b0e27e744a0f10af8"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11f8d2554e52f459918f7b8e6aa20ec2a3bce35ce95c1f0ef4ba36fbda306df5"}, + {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ce96dd400486e80ac7d195b2d800b03e3e6a787e2a522bfb83755938465a819e"}, + {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d3364b999c62f539cd403f8123ae426da946e142312a514162adb2addd8d808"}, + {file = "contourpy-1.2.0-cp39-cp39-win32.whl", hash = "sha256:1c88dfb9e0c77612febebb6ac69d44a8d81e3dc60f993215425b62c1161353f4"}, + {file = "contourpy-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:78e6ad33cf2e2e80c5dfaaa0beec3d61face0fb650557100ee36db808bfa6843"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:be16975d94c320432657ad2402f6760990cb640c161ae6da1363051805fa8108"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b95a225d4948b26a28c08307a60ac00fb8671b14f2047fc5476613252a129776"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d7e03c0f9a4f90dc18d4e77e9ef4ec7b7bbb437f7f675be8e530d65ae6ef956"}, + {file = "contourpy-1.2.0.tar.gz", hash = "sha256:171f311cb758de7da13fc53af221ae47a5877be5a0843a9fe150818c51ed276a"}, +] + +[package.dependencies] +numpy = ">=1.20,<2.0" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.6.1)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] + [[package]] name = "coverage" -version = "7.4.0" +version = "7.4.4" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, - {file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"}, - {file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"}, - {file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"}, - {file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"}, - {file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"}, - {file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"}, - {file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"}, - {file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"}, - {file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"}, - {file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"}, - {file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"}, - {file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"}, - {file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"}, + {file = "coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2"}, + {file = "coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c"}, + {file = "coverage-7.4.4-cp310-cp310-win32.whl", hash = "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d"}, + {file = "coverage-7.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f"}, + {file = "coverage-7.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf"}, + {file = "coverage-7.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b"}, + {file = "coverage-7.4.4-cp311-cp311-win32.whl", hash = "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286"}, + {file = "coverage-7.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec"}, + {file = "coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76"}, + {file = "coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9"}, + {file = "coverage-7.4.4-cp312-cp312-win32.whl", hash = "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0"}, + {file = "coverage-7.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e"}, + {file = "coverage-7.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384"}, + {file = "coverage-7.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c"}, + {file = "coverage-7.4.4-cp38-cp38-win32.whl", hash = "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e"}, + {file = "coverage-7.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8"}, + {file = "coverage-7.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d"}, + {file = "coverage-7.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade"}, + {file = "coverage-7.4.4-cp39-cp39-win32.whl", hash = "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57"}, + {file = "coverage-7.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c"}, + {file = "coverage-7.4.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677"}, + {file = "coverage-7.4.4.tar.gz", hash = "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49"}, ] [package.dependencies] @@ -528,24 +681,61 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 toml = ["tomli"] [[package]] -name = "databricks-cli" -version = "0.18.0" -description = "A command line interface for Databricks" -optional = true -python-versions = ">=3.7" +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" files = [ - {file = "databricks-cli-0.18.0.tar.gz", hash = "sha256:87569709eda9af3e9db8047b691e420b5e980c62ef01675575c0d2b9b4211eb7"}, - {file = "databricks_cli-0.18.0-py2.py3-none-any.whl", hash = "sha256:1176a5f42d3e8af4abfc915446fb23abc44513e325c436725f5898cbb9e3384b"}, + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, ] -[package.dependencies] -click = ">=7.0" -oauthlib = ">=3.1.0" -pyjwt = ">=1.7.0" -requests = ">=2.17.3" -six = ">=1.10.0" -tabulate = ">=0.7.7" -urllib3 = ">=1.26.7,<3" +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "cymem" +version = "2.0.8" +description = "Manage calls to calloc/free through Cython" +optional = false +python-versions = "*" +files = [ + {file = "cymem-2.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77b5d3a73c41a394efd5913ab7e48512054cd2dabb9582d489535456641c7666"}, + {file = "cymem-2.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bd33da892fb560ba85ea14b1528c381ff474048e861accc3366c8b491035a378"}, + {file = "cymem-2.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a551eda23eebd6d076b855f77a5ed14a1d1cae5946f7b3cb5de502e21b39b0"}, + {file = "cymem-2.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8260445652ae5ab19fff6851f32969a7b774f309162e83367dd0f69aac5dbf7"}, + {file = "cymem-2.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:a63a2bef4c7e0aec7c9908bca0a503bf91ac7ec18d41dd50dc7dff5d994e4387"}, + {file = "cymem-2.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6b84b780d52cb2db53d4494fe0083c4c5ee1f7b5380ceaea5b824569009ee5bd"}, + {file = "cymem-2.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d5f83dc3cb5a39f0e32653cceb7c8ce0183d82f1162ca418356f4a8ed9e203e"}, + {file = "cymem-2.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ac218cf8a43a761dc6b2f14ae8d183aca2bbb85b60fe316fd6613693b2a7914"}, + {file = "cymem-2.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42c993589d1811ec665d37437d5677b8757f53afadd927bf8516ac8ce2d3a50c"}, + {file = "cymem-2.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:ab3cf20e0eabee9b6025ceb0245dadd534a96710d43fb7a91a35e0b9e672ee44"}, + {file = "cymem-2.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cb51fddf1b920abb1f2742d1d385469bc7b4b8083e1cfa60255e19bc0900ccb5"}, + {file = "cymem-2.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9235957f8c6bc2574a6a506a1687164ad629d0b4451ded89d49ebfc61b52660c"}, + {file = "cymem-2.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2cc38930ff5409f8d61f69a01e39ecb185c175785a1c9bec13bcd3ac8a614ba"}, + {file = "cymem-2.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf49e3ea2c441f7b7848d5c61b50803e8cbd49541a70bb41ad22fce76d87603"}, + {file = "cymem-2.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:ecd12e3bacf3eed5486e4cd8ede3c12da66ee0e0a9d0ae046962bc2bb503acef"}, + {file = "cymem-2.0.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:167d8019db3b40308aabf8183fd3fbbc256323b645e0cbf2035301058c439cd0"}, + {file = "cymem-2.0.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17cd2c2791c8f6b52f269a756ba7463f75bf7265785388a2592623b84bb02bf8"}, + {file = "cymem-2.0.8-cp36-cp36m-win_amd64.whl", hash = "sha256:6204f0a3307bf45d109bf698ba37997ce765f21e359284328e4306c7500fcde8"}, + {file = "cymem-2.0.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b9c05db55ea338648f8e5f51dd596568c7f62c5ae32bf3fa5b1460117910ebae"}, + {file = "cymem-2.0.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ce641f7ba0489bd1b42a4335a36f38c8507daffc29a512681afaba94a0257d2"}, + {file = "cymem-2.0.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6b83a5972a64f62796118da79dfeed71f4e1e770b2b7455e889c909504c2358"}, + {file = "cymem-2.0.8-cp37-cp37m-win_amd64.whl", hash = "sha256:ada6eb022e4a0f4f11e6356a5d804ceaa917174e6cf33c0b3e371dbea4dd2601"}, + {file = "cymem-2.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1e593cd57e2e19eb50c7ddaf7e230b73c890227834425b9dadcd4a86834ef2ab"}, + {file = "cymem-2.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d513f0d5c6d76facdc605e42aa42c8d50bb7dedca3144ec2b47526381764deb0"}, + {file = "cymem-2.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e370dd54359101b125bfb191aca0542718077b4edb90ccccba1a28116640fed"}, + {file = "cymem-2.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84f8c58cde71b8fc7024883031a4eec66c0a9a4d36b7850c3065493652695156"}, + {file = "cymem-2.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a6edddb30dd000a27987fcbc6f3c23b7fe1d74f539656952cb086288c0e4e29"}, + {file = "cymem-2.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b896c83c08dadafe8102a521f83b7369a9c5cc3e7768eca35875764f56703f4c"}, + {file = "cymem-2.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4f8f2bfee34f6f38b206997727d29976666c89843c071a968add7d61a1e8024"}, + {file = "cymem-2.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7372e2820fa66fd47d3b135f3eb574ab015f90780c3a21cfd4809b54f23a4723"}, + {file = "cymem-2.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4e57bee56d35b90fc2cba93e75b2ce76feaca05251936e28a96cf812a1f5dda"}, + {file = "cymem-2.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ceeab3ce2a92c7f3b2d90854efb32cb203e78cb24c836a5a9a2cac221930303b"}, + {file = "cymem-2.0.8.tar.gz", hash = "sha256:8fb09d222e21dcf1c7e907dc85cf74501d4cea6c4ed4ac6c9e016f98fb59cbbf"}, +] [[package]] name = "debuglater" @@ -587,6 +777,21 @@ files = [ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, ] +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + [[package]] name = "distlib" version = "0.3.8" @@ -619,6 +824,23 @@ urllib3 = ">=1.26.0" ssh = ["paramiko (>=2.4.3)"] websockets = ["websocket-client (>=1.3.0)"] +[[package]] +name = "en_core_web_sm" +version = "3.7.1" +description = "English pipeline optimized for CPU. Components: tok2vec, tagger, parser, senter, ner, attribute_ruler, lemmatizer." +optional = false +python-versions = "*" +files = [ + {file = "en_core_web_sm-3.7.1.tar.gz", hash = "sha256:1075c2aa2bc2fee105ab6e90a01a5d1a428c9f5b20a1fa003dc2cb6a438d295e"}, +] + +[package.dependencies] +spacy = ">=3.7.2,<3.8.0" + +[package.source] +type = "url" +url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1.tar.gz" + [[package]] name = "entrypoints" version = "0.4" @@ -688,6 +910,93 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1 testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] +[[package]] +name = "flatbuffers" +version = "24.3.25" +description = "The FlatBuffers serialization format for Python" +optional = false +python-versions = "*" +files = [ + {file = "flatbuffers-24.3.25-py2.py3-none-any.whl", hash = "sha256:8dbdec58f935f3765e4f7f3cf635ac3a77f83568138d6a2311f524ec96364812"}, + {file = "flatbuffers-24.3.25.tar.gz", hash = "sha256:de2ec5b203f21441716617f38443e0a8ebf3d25bf0d9c0bb0ce68fa00ad546a4"}, +] + +[[package]] +name = "fonttools" +version = "4.50.0" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.50.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effd303fb422f8ce06543a36ca69148471144c534cc25f30e5be752bc4f46736"}, + {file = "fonttools-4.50.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7913992ab836f621d06aabac118fc258b9947a775a607e1a737eb3a91c360335"}, + {file = "fonttools-4.50.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e0a1c5bd2f63da4043b63888534b52c5a1fd7ae187c8ffc64cbb7ae475b9dab"}, + {file = "fonttools-4.50.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d40fc98540fa5360e7ecf2c56ddf3c6e7dd04929543618fd7b5cc76e66390562"}, + {file = "fonttools-4.50.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fff65fbb7afe137bac3113827855e0204482727bddd00a806034ab0d3951d0d"}, + {file = "fonttools-4.50.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b1aeae3dd2ee719074a9372c89ad94f7c581903306d76befdaca2a559f802472"}, + {file = "fonttools-4.50.0-cp310-cp310-win32.whl", hash = "sha256:e9623afa319405da33b43c85cceb0585a6f5d3a1d7c604daf4f7e1dd55c03d1f"}, + {file = "fonttools-4.50.0-cp310-cp310-win_amd64.whl", hash = "sha256:778c5f43e7e654ef7fe0605e80894930bc3a7772e2f496238e57218610140f54"}, + {file = "fonttools-4.50.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3dfb102e7f63b78c832e4539969167ffcc0375b013080e6472350965a5fe8048"}, + {file = "fonttools-4.50.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e58fe34cb379ba3d01d5d319d67dd3ce7ca9a47ad044ea2b22635cd2d1247fc"}, + {file = "fonttools-4.50.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c673ab40d15a442a4e6eb09bf007c1dda47c84ac1e2eecbdf359adacb799c24"}, + {file = "fonttools-4.50.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b3ac35cdcd1a4c90c23a5200212c1bb74fa05833cc7c14291d7043a52ca2aaa"}, + {file = "fonttools-4.50.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8844e7a2c5f7ecf977e82eb6b3014f025c8b454e046d941ece05b768be5847ae"}, + {file = "fonttools-4.50.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f849bd3c5c2249b49c98eca5aaebb920d2bfd92b3c69e84ca9bddf133e9f83f0"}, + {file = "fonttools-4.50.0-cp311-cp311-win32.whl", hash = "sha256:39293ff231b36b035575e81c14626dfc14407a20de5262f9596c2cbb199c3625"}, + {file = "fonttools-4.50.0-cp311-cp311-win_amd64.whl", hash = "sha256:c33d5023523b44d3481624f840c8646656a1def7630ca562f222eb3ead16c438"}, + {file = "fonttools-4.50.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b4a886a6dbe60100ba1cd24de962f8cd18139bd32808da80de1fa9f9f27bf1dc"}, + {file = "fonttools-4.50.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b2ca1837bfbe5eafa11313dbc7edada79052709a1fffa10cea691210af4aa1fa"}, + {file = "fonttools-4.50.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0493dd97ac8977e48ffc1476b932b37c847cbb87fd68673dee5182004906828"}, + {file = "fonttools-4.50.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77844e2f1b0889120b6c222fc49b2b75c3d88b930615e98893b899b9352a27ea"}, + {file = "fonttools-4.50.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3566bfb8c55ed9100afe1ba6f0f12265cd63a1387b9661eb6031a1578a28bad1"}, + {file = "fonttools-4.50.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:35e10ddbc129cf61775d58a14f2d44121178d89874d32cae1eac722e687d9019"}, + {file = "fonttools-4.50.0-cp312-cp312-win32.whl", hash = "sha256:cc8140baf9fa8f9b903f2b393a6c413a220fa990264b215bf48484f3d0bf8710"}, + {file = "fonttools-4.50.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ccc85fd96373ab73c59833b824d7a73846670a0cb1f3afbaee2b2c426a8f931"}, + {file = "fonttools-4.50.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e270a406219af37581d96c810172001ec536e29e5593aa40d4c01cca3e145aa6"}, + {file = "fonttools-4.50.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac2463de667233372e9e1c7e9de3d914b708437ef52a3199fdbf5a60184f190c"}, + {file = "fonttools-4.50.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47abd6669195abe87c22750dbcd366dc3a0648f1b7c93c2baa97429c4dc1506e"}, + {file = "fonttools-4.50.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:074841375e2e3d559aecc86e1224caf78e8b8417bb391e7d2506412538f21adc"}, + {file = "fonttools-4.50.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0743fd2191ad7ab43d78cd747215b12033ddee24fa1e088605a3efe80d6984de"}, + {file = "fonttools-4.50.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3d7080cce7be5ed65bee3496f09f79a82865a514863197ff4d4d177389e981b0"}, + {file = "fonttools-4.50.0-cp38-cp38-win32.whl", hash = "sha256:a467ba4e2eadc1d5cc1a11d355abb945f680473fbe30d15617e104c81f483045"}, + {file = "fonttools-4.50.0-cp38-cp38-win_amd64.whl", hash = "sha256:f77e048f805e00870659d6318fd89ef28ca4ee16a22b4c5e1905b735495fc422"}, + {file = "fonttools-4.50.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6245eafd553c4e9a0708e93be51392bd2288c773523892fbd616d33fd2fda59"}, + {file = "fonttools-4.50.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a4062cc7e8de26f1603323ef3ae2171c9d29c8a9f5e067d555a2813cd5c7a7e0"}, + {file = "fonttools-4.50.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34692850dfd64ba06af61e5791a441f664cb7d21e7b544e8f385718430e8f8e4"}, + {file = "fonttools-4.50.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678dd95f26a67e02c50dcb5bf250f95231d455642afbc65a3b0bcdacd4e4dd38"}, + {file = "fonttools-4.50.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4f2ce7b0b295fe64ac0a85aef46a0f2614995774bd7bc643b85679c0283287f9"}, + {file = "fonttools-4.50.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d346f4dc2221bfb7ab652d1e37d327578434ce559baf7113b0f55768437fe6a0"}, + {file = "fonttools-4.50.0-cp39-cp39-win32.whl", hash = "sha256:a51eeaf52ba3afd70bf489be20e52fdfafe6c03d652b02477c6ce23c995222f4"}, + {file = "fonttools-4.50.0-cp39-cp39-win_amd64.whl", hash = "sha256:8639be40d583e5d9da67795aa3eeeda0488fb577a1d42ae11a5036f18fb16d93"}, + {file = "fonttools-4.50.0-py3-none-any.whl", hash = "sha256:48fa36da06247aa8282766cfd63efff1bb24e55f020f29a335939ed3844d20d3"}, + {file = "fonttools-4.50.0.tar.gz", hash = "sha256:fa5cf61058c7dbb104c2ac4e782bf1b2016a8cf2f69de6e4dd6a865d2c969bb5"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "pycairo", "scipy"] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "gast" +version = "0.5.4" +description = "Python AST that abstracts the underlying Python version" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "gast-0.5.4-py3-none-any.whl", hash = "sha256:6fc4fa5fa10b72fb8aab4ae58bcb023058386e67b6fa2e3e34cec5c769360316"}, + {file = "gast-0.5.4.tar.gz", hash = "sha256:9c270fe5f4b130969b54174de7db4e764b09b4f7f67ccfc32480e29f78348d97"}, +] + [[package]] name = "ghp-import" version = "2.1.0" @@ -763,20 +1072,35 @@ trusted-deps = ["arrow (==1.2.3)", "click (==8.1.3)", "sh (==1.14.3)"] [[package]] name = "gitpython" -version = "3.1.41" +version = "3.1.42" description = "GitPython is a Python library used to interact with Git repositories" optional = true python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.41-py3-none-any.whl", hash = "sha256:c36b6634d069b3f719610175020a9aed919421c87552185b085e04fbbdb10b7c"}, - {file = "GitPython-3.1.41.tar.gz", hash = "sha256:ed66e624884f76df22c8e16066d567aaa5a37d5b5fa19db2c6df6f7156db9048"}, + {file = "GitPython-3.1.42-py3-none-any.whl", hash = "sha256:1bf9cd7c9e7255f77778ea54359e54ac22a72a5b51288c457c881057b7bb9ecd"}, + {file = "GitPython-3.1.42.tar.gz", hash = "sha256:2d99869e0fef71a73cbd242528105af1d6c1b108c60dfabd994bf292f76c3ceb"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" [package.extras] -test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "sumtypes"] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar"] + +[[package]] +name = "google-pasta" +version = "0.2.0" +description = "pasta is an AST-based Python refactoring library" +optional = false +python-versions = "*" +files = [ + {file = "google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"}, + {file = "google_pasta-0.2.0-py2-none-any.whl", hash = "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954"}, + {file = "google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed"}, +] + +[package.dependencies] +six = "*" [[package]] name = "greenlet" @@ -851,27 +1175,130 @@ test = ["objgraph", "psutil"] [[package]] name = "griffe" -version = "0.39.1" +version = "0.42.1" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.8" files = [ - {file = "griffe-0.39.1-py3-none-any.whl", hash = "sha256:6ce4ecffcf0d2f96362c5974b3f7df812da8f8d4cfcc5ebc8202ef72656fc087"}, - {file = "griffe-0.39.1.tar.gz", hash = "sha256:ead8dfede6e6531cce6bf69090a4f3c6d36fdf923c43f8e85aa530552cef0c09"}, + {file = "griffe-0.42.1-py3-none-any.whl", hash = "sha256:7e805e35617601355edcac0d3511cedc1ed0cb1f7645e2d336ae4b05bbae7b3b"}, + {file = "griffe-0.42.1.tar.gz", hash = "sha256:57046131384043ed078692b85d86b76568a686266cc036b9b56b704466f803ce"}, ] [package.dependencies] colorama = ">=0.4" +[[package]] +name = "grpcio" +version = "1.62.1" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.62.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e"}, + {file = "grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70"}, + {file = "grpcio-1.62.1-cp310-cp310-win32.whl", hash = "sha256:4187201a53f8561c015bc745b81a1b2d278967b8de35f3399b84b0695e281d5f"}, + {file = "grpcio-1.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:844d1f3fb11bd1ed362d3fdc495d0770cfab75761836193af166fee113421d66"}, + {file = "grpcio-1.62.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:833379943d1728a005e44103f17ecd73d058d37d95783eb8f0b28ddc1f54d7b2"}, + {file = "grpcio-1.62.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:c7fcc6a32e7b7b58f5a7d27530669337a5d587d4066060bcb9dee7a8c833dfb7"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:fa7d28eb4d50b7cbe75bb8b45ed0da9a1dc5b219a0af59449676a29c2eed9698"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48f7135c3de2f298b833be8b4ae20cafe37091634e91f61f5a7eb3d61ec6f660"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71f11fd63365ade276c9d4a7b7df5c136f9030e3457107e1791b3737a9b9ed6a"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b49fd8fe9f9ac23b78437da94c54aa7e9996fbb220bac024a67469ce5d0825f"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:482ae2ae78679ba9ed5752099b32e5fe580443b4f798e1b71df412abf43375db"}, + {file = "grpcio-1.62.1-cp311-cp311-win32.whl", hash = "sha256:1faa02530b6c7426404372515fe5ddf66e199c2ee613f88f025c6f3bd816450c"}, + {file = "grpcio-1.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bd90b8c395f39bc82a5fb32a0173e220e3f401ff697840f4003e15b96d1befc"}, + {file = "grpcio-1.62.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:b134d5d71b4e0837fff574c00e49176051a1c532d26c052a1e43231f252d813b"}, + {file = "grpcio-1.62.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d1f6c96573dc09d50dbcbd91dbf71d5cf97640c9427c32584010fbbd4c0e0037"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:359f821d4578f80f41909b9ee9b76fb249a21035a061a327f91c953493782c31"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a485f0c2010c696be269184bdb5ae72781344cb4e60db976c59d84dd6354fac9"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b50b09b4dc01767163d67e1532f948264167cd27f49e9377e3556c3cba1268e1"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3227c667dccbe38f2c4d943238b887bac588d97c104815aecc62d2fd976e014b"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3952b581eb121324853ce2b191dae08badb75cd493cb4e0243368aa9e61cfd41"}, + {file = "grpcio-1.62.1-cp312-cp312-win32.whl", hash = "sha256:83a17b303425104d6329c10eb34bba186ffa67161e63fa6cdae7776ff76df73f"}, + {file = "grpcio-1.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:6696ffe440333a19d8d128e88d440f91fb92c75a80ce4b44d55800e656a3ef1d"}, + {file = "grpcio-1.62.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:e3393b0823f938253370ebef033c9fd23d27f3eae8eb9a8f6264900c7ea3fb5a"}, + {file = "grpcio-1.62.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:83e7ccb85a74beaeae2634f10eb858a0ed1a63081172649ff4261f929bacfd22"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:882020c87999d54667a284c7ddf065b359bd00251fcd70279ac486776dbf84ec"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a10383035e864f386fe096fed5c47d27a2bf7173c56a6e26cffaaa5a361addb1"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:960edebedc6b9ada1ef58e1c71156f28689978188cd8cff3b646b57288a927d9"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:23e2e04b83f347d0aadde0c9b616f4726c3d76db04b438fd3904b289a725267f"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:978121758711916d34fe57c1f75b79cdfc73952f1481bb9583399331682d36f7"}, + {file = "grpcio-1.62.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9084086190cc6d628f282e5615f987288b95457292e969b9205e45b442276407"}, + {file = "grpcio-1.62.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:22bccdd7b23c420a27fd28540fb5dcbc97dc6be105f7698cb0e7d7a420d0e362"}, + {file = "grpcio-1.62.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:8999bf1b57172dbc7c3e4bb3c732658e918f5c333b2942243f10d0d653953ba9"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:d9e52558b8b8c2f4ac05ac86344a7417ccdd2b460a59616de49eb6933b07a0bd"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1714e7bc935780bc3de1b3fcbc7674209adf5208ff825799d579ffd6cd0bd505"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8842ccbd8c0e253c1f189088228f9b433f7a93b7196b9e5b6f87dba393f5d5d"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1f1e7b36bdff50103af95a80923bf1853f6823dd62f2d2a2524b66ed74103e49"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bba97b8e8883a8038606480d6b6772289f4c907f6ba780fa1f7b7da7dfd76f06"}, + {file = "grpcio-1.62.1-cp38-cp38-win32.whl", hash = "sha256:a7f615270fe534548112a74e790cd9d4f5509d744dd718cd442bf016626c22e4"}, + {file = "grpcio-1.62.1-cp38-cp38-win_amd64.whl", hash = "sha256:e6c8c8693df718c5ecbc7babb12c69a4e3677fd11de8886f05ab22d4e6b1c43b"}, + {file = "grpcio-1.62.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:73db2dc1b201d20ab7083e7041946910bb991e7e9761a0394bbc3c2632326483"}, + {file = "grpcio-1.62.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:407b26b7f7bbd4f4751dbc9767a1f0716f9fe72d3d7e96bb3ccfc4aace07c8de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f8de7c8cef9261a2d0a62edf2ccea3d741a523c6b8a6477a340a1f2e417658de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd5c8a1af40ec305d001c60236308a67e25419003e9bb3ebfab5695a8d0b369"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0477cb31da67846a33b1a75c611f88bfbcd427fe17701b6317aefceee1b96f"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:60dcd824df166ba266ee0cfaf35a31406cd16ef602b49f5d4dfb21f014b0dedd"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:973c49086cabab773525f6077f95e5a993bfc03ba8fc32e32f2c279497780585"}, + {file = "grpcio-1.62.1-cp39-cp39-win32.whl", hash = "sha256:12859468e8918d3bd243d213cd6fd6ab07208195dc140763c00dfe901ce1e1b4"}, + {file = "grpcio-1.62.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7209117bbeebdfa5d898205cc55153a51285757902dd73c47de498ad4d11332"}, + {file = "grpcio-1.62.1.tar.gz", hash = "sha256:6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.62.1)"] + +[[package]] +name = "h5py" +version = "3.10.0" +description = "Read and write HDF5 files from Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "h5py-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f"}, + {file = "h5py-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c"}, + {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03"}, + {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d"}, + {file = "h5py-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f"}, + {file = "h5py-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc"}, + {file = "h5py-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd"}, + {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7"}, + {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52"}, + {file = "h5py-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684"}, + {file = "h5py-3.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3"}, + {file = "h5py-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20"}, + {file = "h5py-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039"}, + {file = "h5py-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339"}, + {file = "h5py-3.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:492305a074327e8d2513011fa9fffeb54ecb28a04ca4c4227d7e1e9616d35641"}, + {file = "h5py-3.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9450464b458cca2c86252b624279115dcaa7260a40d3cb1594bf2b410a2bd1a3"}, + {file = "h5py-3.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd6f6d1384a9f491732cee233b99cd4bfd6e838a8815cc86722f9d2ee64032af"}, + {file = "h5py-3.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3074ec45d3dc6e178c6f96834cf8108bf4a60ccb5ab044e16909580352010a97"}, + {file = "h5py-3.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:212bb997a91e6a895ce5e2f365ba764debeaef5d2dca5c6fb7098d66607adf99"}, + {file = "h5py-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5dfc65ac21fa2f630323c92453cadbe8d4f504726ec42f6a56cf80c2f90d6c52"}, + {file = "h5py-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d4682b94fd36ab217352be438abd44c8f357c5449b8995e63886b431d260f3d3"}, + {file = "h5py-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aece0e2e1ed2aab076c41802e50a0c3e5ef8816d60ece39107d68717d4559824"}, + {file = "h5py-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a61b2c2ad65b1fabc28802d133eed34debcc2c8b420cb213d3d4ef4d3e2229"}, + {file = "h5py-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:ae2f0201c950059676455daf92700eeb57dcf5caaf71b9e1328e6e6593601770"}, + {file = "h5py-3.10.0.tar.gz", hash = "sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049"}, +] + +[package.dependencies] +numpy = ">=1.17.3" + [[package]] name = "identify" -version = "2.5.33" +version = "2.5.35" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.5.33-py2.py3-none-any.whl", hash = "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34"}, - {file = "identify-2.5.33.tar.gz", hash = "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d"}, + {file = "identify-2.5.35-py2.py3-none-any.whl", hash = "sha256:c4de0081837b211594f8e877a6b4fad7ca32bbfc1a9307fdd61c28bfe923f13e"}, + {file = "identify-2.5.35.tar.gz", hash = "sha256:10a7ca245cfcd756a554a7288159f72ff105ad233c7c4b9c6f0f4d108f5f6791"}, ] [package.extras] @@ -890,32 +1317,32 @@ files = [ [[package]] name = "importlib-metadata" -version = "7.0.1" +version = "7.1.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, - {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, + {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"}, + {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] [[package]] name = "importlib-resources" -version = "6.1.1" +version = "6.4.0" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_resources-6.1.1-py3-none-any.whl", hash = "sha256:e8bf90d8213b486f428c9c39714b920041cb02c184686a3dee24905aaa8105d6"}, - {file = "importlib_resources-6.1.1.tar.gz", hash = "sha256:3893a00122eafde6894c59914446a512f728a0c1a45f9bb9b63721b6bacf0b4a"}, + {file = "importlib_resources-6.4.0-py3-none-any.whl", hash = "sha256:50d10f043df931902d4194ea07ec57960f66a80449ff867bfe782b4c486ba78c"}, + {file = "importlib_resources-6.4.0.tar.gz", hash = "sha256:cdb2b453b8046ca4e3798eb1d84f3cce1446a0e8e7b5ef4efb600f19fc398145"}, ] [package.dependencies] @@ -923,7 +1350,7 @@ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] +testing = ["jaraco.test (>=5.4)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] [[package]] name = "iniconfig" @@ -936,45 +1363,6 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "ipython" -version = "8.12.0" -description = "IPython: Productive Interactive Computing" -optional = false -python-versions = ">=3.8" -files = [ - {file = "ipython-8.12.0-py3-none-any.whl", hash = "sha256:1c183bf61b148b00bcebfa5d9b39312733ae97f6dad90d7e9b4d86c8647f498c"}, - {file = "ipython-8.12.0.tar.gz", hash = "sha256:a950236df04ad75b5bc7f816f9af3d74dc118fd42f2ff7e80e8e60ca1f182e2d"}, -] - -[package.dependencies] -appnope = {version = "*", markers = "sys_platform == \"darwin\""} -backcall = "*" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -decorator = "*" -jedi = ">=0.16" -matplotlib-inline = "*" -pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} -pickleshare = "*" -prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" -pygments = ">=2.4.0" -stack-data = "*" -traitlets = ">=5" -typing-extensions = {version = "*", markers = "python_version < \"3.10\""} - -[package.extras] -all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] -black = ["black"] -doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] -kernel = ["ipykernel"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["ipywidgets", "notebook"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] - [[package]] name = "ipython" version = "8.18.1" @@ -1048,6 +1436,17 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "joblib" +version = "1.3.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"}, + {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"}, +] + [[package]] name = "jsonschema" version = "4.21.1" @@ -1061,9 +1460,7 @@ files = [ [package.dependencies] attrs = ">=22.2.0" -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} jsonschema-specifications = ">=2023.03.6" -pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} referencing = ">=0.28.4" rpds-py = ">=0.7.1" @@ -1083,18 +1480,17 @@ files = [ ] [package.dependencies] -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} referencing = ">=0.31.0" [[package]] name = "jupyter-client" -version = "8.6.0" +version = "8.6.1" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.6.0-py3-none-any.whl", hash = "sha256:909c474dbe62582ae62b758bca86d6518c85234bdee2d908c778db6d72f39d99"}, - {file = "jupyter_client-8.6.0.tar.gz", hash = "sha256:0642244bb83b4764ae60d07e010e15f0e2d275ec4e918a8f7b80fbbef3ca60c7"}, + {file = "jupyter_client-8.6.1-py3-none-any.whl", hash = "sha256:3b7bd22f058434e3b9a7ea4b1500ed47de2713872288c0d511d19926f99b459f"}, + {file = "jupyter_client-8.6.1.tar.gz", hash = "sha256:e842515e2bab8e19186d89fdfea7abd15e39dd581f94e399f00e2af5a1652d3f"}, ] [package.dependencies] @@ -1111,13 +1507,13 @@ test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pyt [[package]] name = "jupyter-core" -version = "5.7.1" +version = "5.7.2" description = "Jupyter core package. A base package on which Jupyter projects rely." optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_core-5.7.1-py3-none-any.whl", hash = "sha256:c65c82126453a723a2804aa52409930434598fd9d35091d63dfb919d2b765bb7"}, - {file = "jupyter_core-5.7.1.tar.gz", hash = "sha256:de61a9d7fc71240f688b2fb5ab659fbb56979458dc66a71decd098e03c79e218"}, + {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, + {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, ] [package.dependencies] @@ -1127,7 +1523,7 @@ traitlets = ">=5.3" [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] -test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] +test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] [[package]] name = "jupyterlab-pygments" @@ -1140,6 +1536,171 @@ files = [ {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, ] +[[package]] +name = "keras" +version = "3.1.1" +description = "Multi-backend Keras." +optional = false +python-versions = ">=3.9" +files = [ + {file = "keras-3.1.1-py3-none-any.whl", hash = "sha256:b5d45f0b5116b11db502da00bd501592364325d01724e6cb2032711e3e32677e"}, + {file = "keras-3.1.1.tar.gz", hash = "sha256:55558ea228dc38e7667874fd2e83eaf7faeb026e2e8615b36a8616830f7e303b"}, +] + +[package.dependencies] +absl-py = "*" +h5py = "*" +ml-dtypes = "*" +namex = "*" +numpy = "*" +optree = "*" +rich = "*" + +[[package]] +name = "kiwisolver" +version = "1.4.5" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, + {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, +] + +[[package]] +name = "langcodes" +version = "3.3.0" +description = "Tools for labeling human languages with IETF language tags" +optional = false +python-versions = ">=3.6" +files = [ + {file = "langcodes-3.3.0-py3-none-any.whl", hash = "sha256:4d89fc9acb6e9c8fdef70bcdf376113a3db09b67285d9e1d534de6d8818e7e69"}, + {file = "langcodes-3.3.0.tar.gz", hash = "sha256:794d07d5a28781231ac335a1561b8442f8648ca07cd518310aeb45d6f0807ef6"}, +] + +[package.extras] +data = ["language-data (>=1.1,<2.0)"] + +[[package]] +name = "libclang" +version = "18.1.1" +description = "Clang Python Bindings, mirrored from the official LLVM repo: https://github.com/llvm/llvm-project/tree/main/clang/bindings/python, to make the installation process easier." +optional = false +python-versions = "*" +files = [ + {file = "libclang-18.1.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:6f14c3f194704e5d09769108f03185fce7acaf1d1ae4bbb2f30a72c2400cb7c5"}, + {file = "libclang-18.1.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:83ce5045d101b669ac38e6da8e58765f12da2d3aafb3b9b98d88b286a60964d8"}, + {file = "libclang-18.1.1-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:c533091d8a3bbf7460a00cb6c1a71da93bffe148f172c7d03b1c31fbf8aa2a0b"}, + {file = "libclang-18.1.1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:54dda940a4a0491a9d1532bf071ea3ef26e6dbaf03b5000ed94dd7174e8f9592"}, + {file = "libclang-18.1.1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:cf4a99b05376513717ab5d82a0db832c56ccea4fd61a69dbb7bccf2dfb207dbe"}, + {file = "libclang-18.1.1-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:69f8eb8f65c279e765ffd28aaa7e9e364c776c17618af8bff22a8df58677ff4f"}, + {file = "libclang-18.1.1-py2.py3-none-win_amd64.whl", hash = "sha256:4dd2d3b82fab35e2bf9ca717d7b63ac990a3519c7e312f19fa8e86dcc712f7fb"}, + {file = "libclang-18.1.1-py2.py3-none-win_arm64.whl", hash = "sha256:3f0e1f49f04d3cd198985fea0511576b0aee16f9ff0e0f0cad7f9c57ec3c20e8"}, + {file = "libclang-18.1.1.tar.gz", hash = "sha256:a1214966d08d73d971287fc3ead8dfaf82eb07fb197680d8b3859dbbbbf78250"}, +] + [[package]] name = "macholib" version = "1.16.3" @@ -1156,13 +1717,13 @@ altgraph = ">=0.17" [[package]] name = "markdown" -version = "3.5.2" +version = "3.6" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"}, - {file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"}, + {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, + {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, ] [package.dependencies] @@ -1198,73 +1759,122 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "2.1.4" +version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" files = [ - {file = "MarkupSafe-2.1.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-win32.whl", hash = "sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0"}, - {file = "MarkupSafe-2.1.4-cp310-cp310-win_amd64.whl", hash = "sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-win32.whl", hash = "sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74"}, - {file = "MarkupSafe-2.1.4-cp311-cp311-win_amd64.whl", hash = "sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-win32.whl", hash = "sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475"}, - {file = "MarkupSafe-2.1.4-cp312-cp312-win_amd64.whl", hash = "sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-win32.whl", hash = "sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0"}, - {file = "MarkupSafe-2.1.4-cp37-cp37m-win_amd64.whl", hash = "sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-win32.whl", hash = "sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a"}, - {file = "MarkupSafe-2.1.4-cp38-cp38-win_amd64.whl", hash = "sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-win32.whl", hash = "sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6"}, - {file = "MarkupSafe-2.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959"}, - {file = "MarkupSafe-2.1.4.tar.gz", hash = "sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "matplotlib" +version = "3.8.3" +description = "Python plotting package" +optional = false +python-versions = ">=3.9" +files = [ + {file = "matplotlib-3.8.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cf60138ccc8004f117ab2a2bad513cc4d122e55864b4fe7adf4db20ca68a078f"}, + {file = "matplotlib-3.8.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f557156f7116be3340cdeef7f128fa99b0d5d287d5f41a16e169819dcf22357"}, + {file = "matplotlib-3.8.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f386cf162b059809ecfac3bcc491a9ea17da69fa35c8ded8ad154cd4b933d5ec"}, + {file = "matplotlib-3.8.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3c5f96f57b0369c288bf6f9b5274ba45787f7e0589a34d24bdbaf6d3344632f"}, + {file = "matplotlib-3.8.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:83e0f72e2c116ca7e571c57aa29b0fe697d4c6425c4e87c6e994159e0c008635"}, + {file = "matplotlib-3.8.3-cp310-cp310-win_amd64.whl", hash = "sha256:1c5c8290074ba31a41db1dc332dc2b62def469ff33766cbe325d32a3ee291aea"}, + {file = "matplotlib-3.8.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5184e07c7e1d6d1481862ee361905b7059f7fe065fc837f7c3dc11eeb3f2f900"}, + {file = "matplotlib-3.8.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d7e7e0993d0758933b1a241a432b42c2db22dfa37d4108342ab4afb9557cbe3e"}, + {file = "matplotlib-3.8.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04b36ad07eac9740fc76c2aa16edf94e50b297d6eb4c081e3add863de4bb19a7"}, + {file = "matplotlib-3.8.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c42dae72a62f14982f1474f7e5c9959fc4bc70c9de11cc5244c6e766200ba65"}, + {file = "matplotlib-3.8.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf5932eee0d428192c40b7eac1399d608f5d995f975cdb9d1e6b48539a5ad8d0"}, + {file = "matplotlib-3.8.3-cp311-cp311-win_amd64.whl", hash = "sha256:40321634e3a05ed02abf7c7b47a50be50b53ef3eaa3a573847431a545585b407"}, + {file = "matplotlib-3.8.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:09074f8057917d17ab52c242fdf4916f30e99959c1908958b1fc6032e2d0f6d4"}, + {file = "matplotlib-3.8.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5745f6d0fb5acfabbb2790318db03809a253096e98c91b9a31969df28ee604aa"}, + {file = "matplotlib-3.8.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97653d869a71721b639714b42d87cda4cfee0ee74b47c569e4874c7590c55c5"}, + {file = "matplotlib-3.8.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:242489efdb75b690c9c2e70bb5c6550727058c8a614e4c7716f363c27e10bba1"}, + {file = "matplotlib-3.8.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:83c0653c64b73926730bd9ea14aa0f50f202ba187c307a881673bad4985967b7"}, + {file = "matplotlib-3.8.3-cp312-cp312-win_amd64.whl", hash = "sha256:ef6c1025a570354297d6c15f7d0f296d95f88bd3850066b7f1e7b4f2f4c13a39"}, + {file = "matplotlib-3.8.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c4af3f7317f8a1009bbb2d0bf23dfaba859eb7dd4ccbd604eba146dccaaaf0a4"}, + {file = "matplotlib-3.8.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4c6e00a65d017d26009bac6808f637b75ceade3e1ff91a138576f6b3065eeeba"}, + {file = "matplotlib-3.8.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7b49ab49a3bea17802df6872f8d44f664ba8f9be0632a60c99b20b6db2165b7"}, + {file = "matplotlib-3.8.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6728dde0a3997396b053602dbd907a9bd64ec7d5cf99e728b404083698d3ca01"}, + {file = "matplotlib-3.8.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:813925d08fb86aba139f2d31864928d67511f64e5945ca909ad5bc09a96189bb"}, + {file = "matplotlib-3.8.3-cp39-cp39-win_amd64.whl", hash = "sha256:cd3a0c2be76f4e7be03d34a14d49ded6acf22ef61f88da600a18a5cd8b3c5f3c"}, + {file = "matplotlib-3.8.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fa93695d5c08544f4a0dfd0965f378e7afc410d8672816aff1e81be1f45dbf2e"}, + {file = "matplotlib-3.8.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9764df0e8778f06414b9d281a75235c1e85071f64bb5d71564b97c1306a2afc"}, + {file = "matplotlib-3.8.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5e431a09e6fab4012b01fc155db0ce6dccacdbabe8198197f523a4ef4805eb26"}, + {file = "matplotlib-3.8.3.tar.gz", hash = "sha256:7b416239e9ae38be54b028abbf9048aff5054a9aba5416bef0bd17f9162ce161"}, ] +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} +kiwisolver = ">=1.3.1" +numpy = ">=1.21,<2" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + [[package]] name = "matplotlib-inline" version = "0.1.6" @@ -1345,17 +1955,18 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-autorefs" -version = "0.5.0" +version = "1.0.1" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_autorefs-0.5.0-py3-none-any.whl", hash = "sha256:7930fcb8ac1249f10e683967aeaddc0af49d90702af111a5e390e8b20b3d97ff"}, - {file = "mkdocs_autorefs-0.5.0.tar.gz", hash = "sha256:9a5054a94c08d28855cfab967ada10ed5be76e2bfad642302a610b252c3274c0"}, + {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, + {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, ] [package.dependencies] Markdown = ">=3.3" +markupsafe = ">=2.0.1" mkdocs = ">=1.1" [[package]] @@ -1375,13 +1986,13 @@ markdown = ">=3.3" [[package]] name = "mkdocs-material" -version = "9.5.4" +version = "9.5.14" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.4-py3-none-any.whl", hash = "sha256:efd7cc8ae03296d728da9bd38f4db8b07ab61f9738a0cbd0dfaf2a15a50e7343"}, - {file = "mkdocs_material-9.5.4.tar.gz", hash = "sha256:3d196ee67fad16b2df1a458d650a8ac1890294eaae368d26cee71bc24ad41c40"}, + {file = "mkdocs_material-9.5.14-py3-none-any.whl", hash = "sha256:a45244ac221fda46ecf8337f00ec0e5cb5348ab9ffb203ca2a0c313b0d4dbc27"}, + {file = "mkdocs_material-9.5.14.tar.gz", hash = "sha256:2a1f8e67cda2587ab93ecea9ba42d0ca61d1d7b5fad8cf690eeaeb39dcd4b9af"}, ] [package.dependencies] @@ -1398,8 +2009,8 @@ regex = ">=2022.4" requests = ">=2.26,<3.0" [package.extras] -git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2,<2.0)"] -imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=9.4,<10.0)"] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] [[package]] @@ -1429,13 +2040,13 @@ mkdocs = ">=1.2" [[package]] name = "mkdocstrings" -version = "0.24.0" +version = "0.24.1" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.24.0-py3-none-any.whl", hash = "sha256:f4908560c10f587326d8f5165d1908817b2e280bbf707607f601c996366a2264"}, - {file = "mkdocstrings-0.24.0.tar.gz", hash = "sha256:222b1165be41257b494a9d29b14135d2b7ca43f38161d5b10caae03b87bd4f7e"}, + {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, + {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, ] [package.dependencies] @@ -1471,37 +2082,73 @@ files = [ griffe = ">=0.37" mkdocstrings = ">=0.20" +[[package]] +name = "ml-dtypes" +version = "0.3.2" +description = "" +optional = false +python-versions = ">=3.9" +files = [ + {file = "ml_dtypes-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7afde548890a92b41c0fed3a6c525f1200a5727205f73dc21181a2726571bb53"}, + {file = "ml_dtypes-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a746fe5fb9cd974a91070174258f0be129c592b93f9ce7df6cc336416c3fbd"}, + {file = "ml_dtypes-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:961134ea44c7b8ca63eda902a44b58cd8bd670e21d62e255c81fba0a8e70d9b7"}, + {file = "ml_dtypes-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:6b35c4e8ca957c877ac35c79ffa77724ecc3702a1e4b18b08306c03feae597bb"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:763697ab8a88d47443997a7cdf3aac7340049aed45f7521f6b0ec8a0594821fe"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b89b194e9501a92d289c1ffd411380baf5daafb9818109a4f49b0a1b6dce4462"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c34f2ba9660b21fe1034b608308a01be82bbef2a92fb8199f24dc6bad0d5226"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:6604877d567a29bfe7cc02969ae0f2425260e5335505cf5e7fefc3e5465f5655"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:93b78f53431c93953f7850bb1b925a17f0ab5d97527e38a7e865b5b4bc5cfc18"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a17ef2322e60858d93584e9c52a5be7dd6236b056b7fa1ec57f1bb6ba043e33"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8505946df1665db01332d885c2020b4cb9e84a8b1241eb4ba69d59591f65855"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:f47619d978ab1ae7dfdc4052ea97c636c6263e1f19bd1be0e42c346b98d15ff4"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c7b3fb3d4f6b39bcd4f6c4b98f406291f0d681a895490ee29a0f95bab850d53c"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a4c3fcbf86fa52d0204f07cfd23947ef05b4ad743a1a988e163caa34a201e5e"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91f8783fd1f2c23fd3b9ee5ad66b785dafa58ba3cdb050c4458021fa4d1eb226"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:7ba8e1fafc7fff3e643f453bffa7d082df1678a73286ce8187d3e825e776eb94"}, + {file = "ml_dtypes-0.3.2.tar.gz", hash = "sha256:533059bc5f1764fac071ef54598db358c167c51a718f68f5bb55e3dee79d2967"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.21.2", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, + {version = ">1.20", markers = "python_version < \"3.10\""}, + {version = ">=1.23.3", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, +] + +[package.extras] +dev = ["absl-py", "pyink", "pylint (>=2.6.0)", "pytest", "pytest-xdist"] + [[package]] name = "mlflow-skinny" -version = "2.9.2" +version = "2.11.2" description = "MLflow: A Platform for ML Development and Productionization" optional = true python-versions = ">=3.8" files = [ - {file = "mlflow-skinny-2.9.2.tar.gz", hash = "sha256:61de0a70e22552132f7140e1ddfa7d31a88a3ed735df155dcd884d5a64c81f14"}, - {file = "mlflow_skinny-2.9.2-py3-none-any.whl", hash = "sha256:364d53b703a238594aa9d1b978c4377d4f931b1fcd43d021b76fb483009a31f9"}, + {file = "mlflow-skinny-2.11.2.tar.gz", hash = "sha256:f6cf6bd221f70b4666ca691567f54e4be5b12ad2fbaa4ae50c7f6bdffe895685"}, + {file = "mlflow_skinny-2.11.2-py3-none-any.whl", hash = "sha256:229fa206da4c7d1f6efc6b2fce488952a7790644f6b98c87a1b1873933ed17de"}, ] [package.dependencies] click = ">=7.0,<9" cloudpickle = "<4" -databricks-cli = ">=0.8.7,<1" entrypoints = "<1" -gitpython = ">=2.1.0,<4" +gitpython = ">=3.1.9,<4" importlib-metadata = ">=3.7.0,<4.7.0 || >4.7.0,<8" packaging = "<24" protobuf = ">=3.12.0,<5" -pytz = "<2024" +pytz = "<2025" pyyaml = ">=5.1,<7" requests = ">=2.17.3,<3" sqlparse = ">=0.4.0,<1" [package.extras] aliyun-oss = ["aliyunstoreplugin"] -databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "google-cloud-storage (>=1.30.0)"] -extras = ["azureml-core (>=1.2.0)", "boto3", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] -gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] -genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] sqlserver = ["mlflow-dbstore"] xethub = ["mlflow-xethub"] @@ -1516,40 +2163,82 @@ files = [ {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, ] +[[package]] +name = "murmurhash" +version = "1.0.10" +description = "Cython bindings for MurmurHash" +optional = false +python-versions = ">=3.6" +files = [ + {file = "murmurhash-1.0.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3e90eef568adca5e17a91f96975e9a782ace3a617bbb3f8c8c2d917096e9bfeb"}, + {file = "murmurhash-1.0.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f8ecb00cc1ab57e4b065f9fb3ea923b55160c402d959c69a0b6dbbe8bc73efc3"}, + {file = "murmurhash-1.0.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3310101004d9e2e0530c2fed30174448d998ffd1b50dcbfb7677e95db101aa4b"}, + {file = "murmurhash-1.0.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65401a6f1778676253cbf89c1f45a8a7feb7d73038e483925df7d5943c08ed9"}, + {file = "murmurhash-1.0.10-cp310-cp310-win_amd64.whl", hash = "sha256:f23f2dfc7174de2cdc5007c0771ab8376a2a3f48247f32cac4a5563e40c6adcc"}, + {file = "murmurhash-1.0.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90ed37ee2cace9381b83d56068334f77e3e30bc521169a1f886a2a2800e965d6"}, + {file = "murmurhash-1.0.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:22e9926fdbec9d24ced9b0a42f0fee68c730438be3cfb00c2499fd495caec226"}, + {file = "murmurhash-1.0.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54bfbfd68baa99717239b8844600db627f336a08b1caf4df89762999f681cdd1"}, + {file = "murmurhash-1.0.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18b9d200a09d48ef67f6840b77c14f151f2b6c48fd69661eb75c7276ebdb146c"}, + {file = "murmurhash-1.0.10-cp311-cp311-win_amd64.whl", hash = "sha256:e5d7cfe392c0a28129226271008e61e77bf307afc24abf34f386771daa7b28b0"}, + {file = "murmurhash-1.0.10-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:96f0a070344d4802ea76a160e0d4c88b7dc10454d2426f48814482ba60b38b9e"}, + {file = "murmurhash-1.0.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9f61862060d677c84556610ac0300a0776cb13cb3155f5075ed97e80f86e55d9"}, + {file = "murmurhash-1.0.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3b6d2d877d8881a08be66d906856d05944be0faf22b9a0390338bcf45299989"}, + {file = "murmurhash-1.0.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f54b0031d8696fed17ed6e9628f339cdea0ba2367ca051e18ff59193f52687"}, + {file = "murmurhash-1.0.10-cp312-cp312-win_amd64.whl", hash = "sha256:97e09d675de2359e586f09de1d0de1ab39f9911edffc65c9255fb5e04f7c1f85"}, + {file = "murmurhash-1.0.10-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b64e5332932993fef598e78d633b1ba664789ab73032ed511f3dc615a631a1a"}, + {file = "murmurhash-1.0.10-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e2a38437a8497e082408aa015c6d90554b9e00c2c221fdfa79728a2d99a739e"}, + {file = "murmurhash-1.0.10-cp36-cp36m-win_amd64.whl", hash = "sha256:55f4e4f9291a53c36070330950b472d72ba7d331e4ce3ce1ab349a4f458f7bc4"}, + {file = "murmurhash-1.0.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:16ef9f0855952493fe08929d23865425906a8c0c40607ac8a949a378652ba6a9"}, + {file = "murmurhash-1.0.10-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cc3351ae92b89c2fcdc6e41ac6f17176dbd9b3554c96109fd0713695d8663e7"}, + {file = "murmurhash-1.0.10-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6559fef7c2e7349a42a63549067709b656d6d1580752bd76be1541d8b2d65718"}, + {file = "murmurhash-1.0.10-cp37-cp37m-win_amd64.whl", hash = "sha256:8bf49e3bb33febb7057ae3a5d284ef81243a1e55eaa62bdcd79007cddbdc0461"}, + {file = "murmurhash-1.0.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f1605fde07030516eb63d77a598dd164fb9bf217fd937dbac588fe7e47a28c40"}, + {file = "murmurhash-1.0.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4904f7e68674a64eb2b08823c72015a5e14653e0b4b109ea00c652a005a59bad"}, + {file = "murmurhash-1.0.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0438f0cb44cf1cd26251f72c1428213c4197d40a4e3f48b1efc3aea12ce18517"}, + {file = "murmurhash-1.0.10-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db1171a3f9a10571931764cdbfaa5371f4cf5c23c680639762125cb075b833a5"}, + {file = "murmurhash-1.0.10-cp38-cp38-win_amd64.whl", hash = "sha256:1c9fbcd7646ad8ba67b895f71d361d232c6765754370ecea473dd97d77afe99f"}, + {file = "murmurhash-1.0.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7024ab3498434f22f8e642ae31448322ad8228c65c8d9e5dc2d563d57c14c9b8"}, + {file = "murmurhash-1.0.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a99dedfb7f0cc5a4cd76eb409ee98d3d50eba024f934e705914f6f4d765aef2c"}, + {file = "murmurhash-1.0.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b580b8503647de5dd7972746b7613ea586270f17ac92a44872a9b1b52c36d68"}, + {file = "murmurhash-1.0.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75840212bf75eb1352c946c3cf1622dacddd6d6bdda34368237d1eb3568f23a"}, + {file = "murmurhash-1.0.10-cp39-cp39-win_amd64.whl", hash = "sha256:a4209962b9f85de397c3203ea4b3a554da01ae9fd220fdab38757d4e9eba8d1a"}, + {file = "murmurhash-1.0.10.tar.gz", hash = "sha256:5282aab1317804c6ebd6dd7f69f15ba9075aee671c44a34be2bde0f1b11ef88a"}, +] + [[package]] name = "mypy" -version = "1.8.0" +version = "1.9.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"}, - {file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"}, - {file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"}, - {file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"}, - {file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"}, - {file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"}, - {file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"}, - {file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"}, - {file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"}, - {file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"}, - {file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"}, - {file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"}, - {file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"}, - {file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"}, - {file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"}, - {file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"}, - {file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"}, - {file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"}, - {file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, + {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, + {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, + {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, + {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, + {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, + {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, + {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, + {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, + {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, + {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, + {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, + {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, + {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, + {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, + {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, + {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, + {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, ] [package.dependencies] @@ -1574,15 +2263,26 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "namex" +version = "0.0.7" +description = "A simple utility to separate the implementation of your Python package and its public API surface." +optional = false +python-versions = "*" +files = [ + {file = "namex-0.0.7-py3-none-any.whl", hash = "sha256:8a4f062945f405d77cb66b907f16aa2fd83681945e998be840eb6c4154d40108"}, + {file = "namex-0.0.7.tar.gz", hash = "sha256:84ba65bc4d22bd909e3d26bf2ffb4b9529b608cb3f9a4336f776b04204ced69b"}, +] + [[package]] name = "nbclient" -version = "0.9.0" +version = "0.10.0" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." optional = false python-versions = ">=3.8.0" files = [ - {file = "nbclient-0.9.0-py3-none-any.whl", hash = "sha256:a3a1ddfb34d4a9d17fc744d655962714a866639acd30130e9be84191cd97cd15"}, - {file = "nbclient-0.9.0.tar.gz", hash = "sha256:4b28c207877cf33ef3a9838cdc7a54c5ceff981194a82eac59d558f05487295e"}, + {file = "nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f"}, + {file = "nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09"}, ] [package.dependencies] @@ -1594,17 +2294,17 @@ traitlets = ">=5.4" [package.extras] dev = ["pre-commit"] docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"] -test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] +test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] [[package]] name = "nbconvert" -version = "7.14.2" -description = "Converting Jupyter Notebooks" +version = "7.16.2" +description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." optional = false python-versions = ">=3.8" files = [ - {file = "nbconvert-7.14.2-py3-none-any.whl", hash = "sha256:db28590cef90f7faf2ebbc71acd402cbecf13d29176df728c0a9025a49345ea1"}, - {file = "nbconvert-7.14.2.tar.gz", hash = "sha256:a7f8808fd4e082431673ac538400218dd45efd076fbeb07cc6e5aa5a3a4e949e"}, + {file = "nbconvert-7.16.2-py3-none-any.whl", hash = "sha256:0c01c23981a8de0220255706822c40b751438e32467d6a686e26be08ba784382"}, + {file = "nbconvert-7.16.2.tar.gz", hash = "sha256:8310edd41e1c43947e4ecf16614c61469ebc024898eb808cce0999860fc9fb16"}, ] [package.dependencies] @@ -1636,13 +2336,13 @@ webpdf = ["playwright"] [[package]] name = "nbformat" -version = "5.9.2" +version = "5.10.3" description = "The Jupyter Notebook format" optional = false python-versions = ">=3.8" files = [ - {file = "nbformat-5.9.2-py3-none-any.whl", hash = "sha256:1c5172d786a41b82bcfd0c23f9e6b6f072e8fb49c39250219e4acfff1efe89e9"}, - {file = "nbformat-5.9.2.tar.gz", hash = "sha256:5f98b5ba1997dff175e77e0c17d5c10a96eaed2cbd1de3533d1fc35d5e111192"}, + {file = "nbformat-5.10.3-py3-none-any.whl", hash = "sha256:d9476ca28676799af85385f409b49d95e199951477a159a576ef2a675151e5e8"}, + {file = "nbformat-5.10.3.tar.gz", hash = "sha256:60ed5e910ef7c6264b87d644f276b1b49e24011930deef54605188ddeb211685"}, ] [package.dependencies] @@ -1670,20 +2370,127 @@ files = [ setuptools = "*" [[package]] -name = "oauthlib" -version = "3.2.2" -description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -optional = true -python-versions = ">=3.6" +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "opt-einsum" +version = "3.3.0" +description = "Optimizing numpys einsum function" +optional = false +python-versions = ">=3.5" files = [ - {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, - {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, + {file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"}, + {file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"}, ] +[package.dependencies] +numpy = ">=1.7" + +[package.extras] +docs = ["numpydoc", "sphinx (==1.2.3)", "sphinx-rtd-theme", "sphinxcontrib-napoleon"] +tests = ["pytest", "pytest-cov", "pytest-pep8"] + +[[package]] +name = "optree" +version = "0.11.0" +description = "Optimized PyTree Utilities." +optional = false +python-versions = ">=3.7" +files = [ + {file = "optree-0.11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fa9ed745d4cbac5e15df70339b30867ba033542b87f7b734f4cacae5ec73ba00"}, + {file = "optree-0.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f53951bfb640417558568284a8949d67bcdbf21fa0113107e20bd9403aa20b2b"}, + {file = "optree-0.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0df9a3923725aabb112ec7f10c74fa96b6c640da1cd30e7bc62fd4b03ef02875"}, + {file = "optree-0.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:979ffc2b96f16595c219fb7a89597dd2fa00ac47a3b411fdcf8ae6821da52290"}, + {file = "optree-0.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:228b97e8c991739b10c8548c118747ba32ee765f88236342e492bf9648afc0bc"}, + {file = "optree-0.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:a91840f9d45e7c01f151ba1815ae32b4c3c21e4290298772ee4b13314f729856"}, + {file = "optree-0.11.0-cp310-cp310-win_arm64.whl", hash = "sha256:31d444684ebd8c9f09a3d806fb3277843138ef9952b7a2954908e440e3b22519"}, + {file = "optree-0.11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a5f37bcfe4e363e3bb8d36c5698fb829546956b2fe88951994387162a1859625"}, + {file = "optree-0.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6e8c3757088cd7fce666f2a5e031b65d7898e210452380d2657c0fc0a7ec9932"}, + {file = "optree-0.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:39bed744a61e2f795e172d2853779ac59b8dea236982dc160ea22063afc99ca3"}, + {file = "optree-0.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e250144eacdd5813dec0b18d91df0229197e3be402db42fd8e254ec90ea343d"}, + {file = "optree-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc17f9d085cd75a2de4f299a9c5e3c3520138eac7596061e581230b03862b44d"}, + {file = "optree-0.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:a64df43fce2d8eeafd7db6e27447c56b3fa64842df847819684b3b1cc254c016"}, + {file = "optree-0.11.0-cp311-cp311-win_arm64.whl", hash = "sha256:d666099a78f7bf31bf3a520d6871ddcae65484bcff095fc4271a391553b09c75"}, + {file = "optree-0.11.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9bf322ad14f907ad4660ca286e731e750546d54934a94cc5ba7efe8860c60ab4"}, + {file = "optree-0.11.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:64c2e00fe508f50a42c50838df0d1f5be0dce5b4bef2373db8ad72b860211015"}, + {file = "optree-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:738e8bf4158e9c11cd051d89c2e453aeacf80ff8719ebc3251069015646554d0"}, + {file = "optree-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0db6968394096223881053dffdcaf2b8e220fd85db904f14aa931e4dc422c046"}, + {file = "optree-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e5df0e8aaca124cc1ffca311786cc909810f3c046de090729cdafbf910082f8"}, + {file = "optree-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:ee208f0bec6436085a9fa3ae98af54bfcb8822086894fc1ade283e80a6f11fd7"}, + {file = "optree-0.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:26b1230f9b75b579923a4f837c7c13db8b8d815cf68ce5af31dda5d818a877b2"}, + {file = "optree-0.11.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6cdd625dab2dff5374ff9c6792e8702fced8f0ea713ce959fc8f95499b5ecb2f"}, + {file = "optree-0.11.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:234a4f8f97a1217f13390df7ac416771689749d9a1c8eda31bf8622cd333219e"}, + {file = "optree-0.11.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a406eee5acd3fd4875fa44c3972d29ae6d4329e7296e9219986fe6ff8e92ea0"}, + {file = "optree-0.11.0-cp37-cp37m-win_amd64.whl", hash = "sha256:63e020a34b7168b5d0701a265c7c95b07984ff699d4894b20fa601282be88f20"}, + {file = "optree-0.11.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e2d47bd28eff690eb2f7432e490265a291b04d6d346cf7b586491b2e2337bf97"}, + {file = "optree-0.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2bc08fb9691f43afc3a01119dead6b823ce3d7239e42fc3e47d4028eed50a6a2"}, + {file = "optree-0.11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3cdc9fac9888d9eff11128ccfc4d4c10309163e372f312f7942ecee8df3d7824"}, + {file = "optree-0.11.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b3bb59324d635f2015bb3e237fd772b1fd548eee6cc80e008fbe0f092e9228d"}, + {file = "optree-0.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b201a9405e250cf5770955863af2a236e382bdf5e4e086897ff03c41418c39da"}, + {file = "optree-0.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:162ed3ff2eb3f1c358e131e72c025f2b93d69b906e9057a811d014032ec71dc8"}, + {file = "optree-0.11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:00a63f10d4a476e8e9aa2988daba9b2e88cb369c5aacc12545957d7d00bcd1a7"}, + {file = "optree-0.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:418850ceff364f51a6d81f32a1efd06a4e2d8df79a162e892685bc20c0aedd72"}, + {file = "optree-0.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b8126d81ecb2c9e3554420834014ba343251f564c905ee3bef09d205b924b0c0"}, + {file = "optree-0.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4144126dd3c2ece2d2dd1d5e0b39fb91adf1c46f660c2c5a2df7f80666989d5d"}, + {file = "optree-0.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9d236bc1491a5e366921b95fecc05aa6ff55989a81f2242cd11121b82c24503"}, + {file = "optree-0.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:b26ac807d8993b7e43081b4b7bbb0378b4e5f3e6525daf923c470bc176cc3327"}, + {file = "optree-0.11.0-cp39-cp39-win_arm64.whl", hash = "sha256:9d9d644e5448db9f32e2497487aca3bb2d3f92cbb50429a411ccda3f1f0968f3"}, + {file = "optree-0.11.0.tar.gz", hash = "sha256:8e6a46e95c3ea8546055087d6fe52a1dcd56de5182365f1469106cc72cdf3307"}, +] + +[package.dependencies] +typing-extensions = ">=4.0.0" + [package.extras] -rsa = ["cryptography (>=3.0.0)"] -signals = ["blinker (>=1.4.0)"] -signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] +benchmark = ["dm-tree (>=0.1,<0.2.0a0)", "jax[cpu] (>=0.4.6,<0.5.0a0)", "pandas", "tabulate", "termcolor", "torch (>=2.0,<2.1.0a0)", "torchvision"] +docs = ["docutils", "jax[cpu]", "numpy", "sphinx (>=5.2.1)", "sphinx-autoapi", "sphinx-autobuild", "sphinx-autodoc-typehints (>=1.19.2)", "sphinx-copybutton", "sphinx-rtd-theme", "sphinxcontrib-bibtex", "torch"] +jax = ["jax"] +lint = ["black (>=22.6.0)", "cpplint", "doc8 (<1.0.0a0)", "flake8", "flake8-bugbear", "flake8-comprehensions", "flake8-docstrings", "flake8-pyi", "flake8-simplify", "isort (>=5.11.0)", "mypy (>=0.990)", "pre-commit", "pydocstyle", "pyenchant", "pylint[spelling] (>=2.15.0)", "ruff", "xdoctest"] +numpy = ["numpy"] +test = ["pytest", "pytest-cov", "pytest-xdist"] +torch = ["torch"] [[package]] name = "packaging" @@ -1706,6 +2513,79 @@ files = [ {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, ] +[[package]] +name = "pandas" +version = "2.2.1" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8df8612be9cd1c7797c93e1c5df861b2ddda0b48b08f2c3eaa0702cf88fb5f88"}, + {file = "pandas-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0f573ab277252ed9aaf38240f3b54cfc90fff8e5cab70411ee1d03f5d51f3944"}, + {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f02a3a6c83df4026e55b63c1f06476c9aa3ed6af3d89b4f04ea656ccdaaaa359"}, + {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c38ce92cb22a4bea4e3929429aa1067a454dcc9c335799af93ba9be21b6beb51"}, + {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c2ce852e1cf2509a69e98358e8458775f89599566ac3775e70419b98615f4b06"}, + {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53680dc9b2519cbf609c62db3ed7c0b499077c7fefda564e330286e619ff0dd9"}, + {file = "pandas-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:94e714a1cca63e4f5939cdce5f29ba8d415d85166be3441165edd427dc9f6bc0"}, + {file = "pandas-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f821213d48f4ab353d20ebc24e4faf94ba40d76680642fb7ce2ea31a3ad94f9b"}, + {file = "pandas-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c70e00c2d894cb230e5c15e4b1e1e6b2b478e09cf27cc593a11ef955b9ecc81a"}, + {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e97fbb5387c69209f134893abc788a6486dbf2f9e511070ca05eed4b930b1b02"}, + {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101d0eb9c5361aa0146f500773395a03839a5e6ecde4d4b6ced88b7e5a1a6403"}, + {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7d2ed41c319c9fb4fd454fe25372028dfa417aacb9790f68171b2e3f06eae8cd"}, + {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5d3c00557d657c8773ef9ee702c61dd13b9d7426794c9dfeb1dc4a0bf0ebc7"}, + {file = "pandas-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:06cf591dbaefb6da9de8472535b185cba556d0ce2e6ed28e21d919704fef1a9e"}, + {file = "pandas-2.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:88ecb5c01bb9ca927ebc4098136038519aa5d66b44671861ffab754cae75102c"}, + {file = "pandas-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:04f6ec3baec203c13e3f8b139fb0f9f86cd8c0b94603ae3ae8ce9a422e9f5bee"}, + {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a935a90a76c44fe170d01e90a3594beef9e9a6220021acfb26053d01426f7dc2"}, + {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c391f594aae2fd9f679d419e9a4d5ba4bce5bb13f6a989195656e7dc4b95c8f0"}, + {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9d1265545f579edf3f8f0cb6f89f234f5e44ba725a34d86535b1a1d38decbccc"}, + {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11940e9e3056576ac3244baef2fedade891977bcc1cb7e5cc8f8cc7d603edc89"}, + {file = "pandas-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acf681325ee1c7f950d058b05a820441075b0dd9a2adf5c4835b9bc056bf4fb"}, + {file = "pandas-2.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9bd8a40f47080825af4317d0340c656744f2bfdb6819f818e6ba3cd24c0e1397"}, + {file = "pandas-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df0c37ebd19e11d089ceba66eba59a168242fc6b7155cba4ffffa6eccdfb8f16"}, + {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:739cc70eaf17d57608639e74d63387b0d8594ce02f69e7a0b046f117974b3019"}, + {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d3558d263073ed95e46f4650becff0c5e1ffe0fc3a015de3c79283dfbdb3df"}, + {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4aa1d8707812a658debf03824016bf5ea0d516afdea29b7dc14cf687bc4d4ec6"}, + {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:76f27a809cda87e07f192f001d11adc2b930e93a2b0c4a236fde5429527423be"}, + {file = "pandas-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:1ba21b1d5c0e43416218db63037dbe1a01fc101dc6e6024bcad08123e48004ab"}, + {file = "pandas-2.2.1.tar.gz", hash = "sha256:0ab90f87093c13f3e8fa45b48ba9f39181046e8f3317d3aadb2fffbb1b978572"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + [[package]] name = "pandocfilters" version = "1.5.1" @@ -1780,51 +2660,114 @@ files = [ ptyprocess = ">=0.5" [[package]] -name = "pickleshare" -version = "0.7.5" -description = "Tiny 'shelve'-like database with concurrency support" +name = "pillow" +version = "10.2.0" +description = "Python Imaging Library (Fork)" optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, - {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, + {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"}, + {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"}, + {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"}, + {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"}, + {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"}, + {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"}, + {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"}, + {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"}, + {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"}, + {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"}, + {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"}, + {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"}, + {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"}, + {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"}, + {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"}, + {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"}, + {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"}, + {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"}, ] -[[package]] -name = "pkgutil-resolve-name" -version = "1.3.10" -description = "Resolve a name to an object." -optional = false -python-versions = ">=3.6" -files = [ - {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, - {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, -] +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.1.0" +version = "4.2.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, - {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, + {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, + {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, ] [package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] [[package]] name = "ploomber-core" -version = "0.2.22" +version = "0.2.25" description = "" optional = false python-versions = "*" files = [ - {file = "ploomber-core-0.2.22.tar.gz", hash = "sha256:6ba97a39f6dc20053ffa2f9d7f9885ced72b6632f4b7dae45d24b718b63f6292"}, - {file = "ploomber_core-0.2.22-py3-none-any.whl", hash = "sha256:83b118042a2ffe1fa920b7634e7070ae61d0901ad61eea4de34b5048449b01c9"}, + {file = "ploomber-core-0.2.25.tar.gz", hash = "sha256:f264bb89cbce23e368ce9854ac8e2b13ccc799c2ee3261a0730b49c7f0e9f097"}, + {file = "ploomber_core-0.2.25-py3-none-any.whl", hash = "sha256:3bcfbca4b2102931528c23d5cd752a8d0cba836c20b9cc5a56227c11c462db44"}, ] [package.dependencies] @@ -1848,10 +2791,7 @@ files = [ [package.dependencies] click = "*" debuglater = ">=1.4.4" -ipython = [ - {version = "<=8.12.0", markers = "python_version <= \"3.8\""}, - {version = "*", markers = "python_version > \"3.8\""}, -] +ipython = "*" nbclient = "*" nbformat = "*" parso = "*" @@ -1863,13 +2803,13 @@ dev = ["flake8", "invoke", "jupytext", "matplotlib", "numpy", "pandas", "pkgmt", [[package]] name = "pluggy" -version = "1.3.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] @@ -1878,13 +2818,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "posthog" -version = "3.3.2" +version = "3.5.0" description = "Integrate PostHog into any python application." optional = false python-versions = "*" files = [ - {file = "posthog-3.3.2-py2.py3-none-any.whl", hash = "sha256:14fb43ea95c40b353db59c49af2c09ff15188aa2963f48091fc7912fa9375263"}, - {file = "posthog-3.3.2.tar.gz", hash = "sha256:734bf89f3c372605a8bbf2b07f600885287209145d747b09ccd004c59834750e"}, + {file = "posthog-3.5.0-py2.py3-none-any.whl", hash = "sha256:3c672be7ba6f95d555ea207d4486c171d06657eb34b3ce25eb043bfe7b6b5b76"}, + {file = "posthog-3.5.0.tar.gz", hash = "sha256:8f7e3b2c6e8714d0c0c542a2109b83a7549f63b7113a133ab2763a89245ef2ef"}, ] [package.dependencies] @@ -1901,13 +2841,13 @@ test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint" [[package]] name = "pre-commit" -version = "3.5.0" +version = "3.6.2" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"}, - {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"}, + {file = "pre_commit-3.6.2-py2.py3-none-any.whl", hash = "sha256:ba637c2d7a670c10daedc059f5c49b5bd0aadbccfcd7ec15592cf9665117532c"}, + {file = "pre_commit-3.6.2.tar.gz", hash = "sha256:c3ef34f463045c88658c5b99f38c1e297abdcc0ff13f98d3370055fbbfabc67e"}, ] [package.dependencies] @@ -1917,6 +2857,52 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" +[[package]] +name = "preshed" +version = "3.0.9" +description = "Cython hash table that trusts the keys are pre-hashed" +optional = false +python-versions = ">=3.6" +files = [ + {file = "preshed-3.0.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f96ef4caf9847b2bb9868574dcbe2496f974e41c2b83d6621c24fb4c3fc57e3"}, + {file = "preshed-3.0.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a61302cf8bd30568631adcdaf9e6b21d40491bd89ba8ebf67324f98b6c2a2c05"}, + {file = "preshed-3.0.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99499e8a58f58949d3f591295a97bca4e197066049c96f5d34944dd21a497193"}, + {file = "preshed-3.0.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea6b6566997dc3acd8c6ee11a89539ac85c77275b4dcefb2dc746d11053a5af8"}, + {file = "preshed-3.0.9-cp310-cp310-win_amd64.whl", hash = "sha256:bfd523085a84b1338ff18f61538e1cfcdedc4b9e76002589a301c364d19a2e36"}, + {file = "preshed-3.0.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7c2364da27f2875524ce1ca754dc071515a9ad26eb5def4c7e69129a13c9a59"}, + {file = "preshed-3.0.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:182138033c0730c683a6d97e567ceb8a3e83f3bff5704f300d582238dbd384b3"}, + {file = "preshed-3.0.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:345a10be3b86bcc6c0591d343a6dc2bfd86aa6838c30ced4256dfcfa836c3a64"}, + {file = "preshed-3.0.9-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51d0192274aa061699b284f9fd08416065348edbafd64840c3889617ee1609de"}, + {file = "preshed-3.0.9-cp311-cp311-win_amd64.whl", hash = "sha256:96b857d7a62cbccc3845ac8c41fd23addf052821be4eb987f2eb0da3d8745aa1"}, + {file = "preshed-3.0.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4fe6720012c62e6d550d6a5c1c7ad88cacef8388d186dad4bafea4140d9d198"}, + {file = "preshed-3.0.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e04f05758875be9751e483bd3c519c22b00d3b07f5a64441ec328bb9e3c03700"}, + {file = "preshed-3.0.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a55091d0e395f1fdb62ab43401bb9f8b46c7d7794d5b071813c29dc1ab22fd0"}, + {file = "preshed-3.0.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7de8f5138bcac7870424e09684dc3dd33c8e30e81b269f6c9ede3d8c7bb8e257"}, + {file = "preshed-3.0.9-cp312-cp312-win_amd64.whl", hash = "sha256:24229c77364628743bc29c5620c5d6607ed104f0e02ae31f8a030f99a78a5ceb"}, + {file = "preshed-3.0.9-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73b0f7ecc58095ebbc6ca26ec806008ef780190fe685ce471b550e7eef58dc2"}, + {file = "preshed-3.0.9-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cb90ecd5bec71c21d95962db1a7922364d6db2abe284a8c4b196df8bbcc871e"}, + {file = "preshed-3.0.9-cp36-cp36m-win_amd64.whl", hash = "sha256:e304a0a8c9d625b70ba850c59d4e67082a6be9c16c4517b97850a17a282ebee6"}, + {file = "preshed-3.0.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1fa6d3d5529b08296ff9b7b4da1485c080311fd8744bbf3a86019ff88007b382"}, + {file = "preshed-3.0.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef1e5173809d85edd420fc79563b286b88b4049746b797845ba672cf9435c0e7"}, + {file = "preshed-3.0.9-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fe81eb21c7d99e8b9a802cc313b998c5f791bda592903c732b607f78a6b7dc4"}, + {file = "preshed-3.0.9-cp37-cp37m-win_amd64.whl", hash = "sha256:78590a4a952747c3766e605ce8b747741005bdb1a5aa691a18aae67b09ece0e6"}, + {file = "preshed-3.0.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3452b64d97ce630e200c415073040aa494ceec6b7038f7a2a3400cbd7858e952"}, + {file = "preshed-3.0.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ac970d97b905e9e817ec13d31befd5b07c9cfec046de73b551d11a6375834b79"}, + {file = "preshed-3.0.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eebaa96ece6641cd981491cba995b68c249e0b6877c84af74971eacf8990aa19"}, + {file = "preshed-3.0.9-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d473c5f6856e07a88d41fe00bb6c206ecf7b34c381d30de0b818ba2ebaf9406"}, + {file = "preshed-3.0.9-cp38-cp38-win_amd64.whl", hash = "sha256:0de63a560f10107a3f0a9e252cc3183b8fdedcb5f81a86938fd9f1dcf8a64adf"}, + {file = "preshed-3.0.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3a9ad9f738084e048a7c94c90f40f727217387115b2c9a95c77f0ce943879fcd"}, + {file = "preshed-3.0.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a671dfa30b67baa09391faf90408b69c8a9a7f81cb9d83d16c39a182355fbfce"}, + {file = "preshed-3.0.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23906d114fc97c17c5f8433342495d7562e96ecfd871289c2bb2ed9a9df57c3f"}, + {file = "preshed-3.0.9-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:778cf71f82cedd2719b256f3980d556d6fb56ec552334ba79b49d16e26e854a0"}, + {file = "preshed-3.0.9-cp39-cp39-win_amd64.whl", hash = "sha256:a6e579439b329eb93f32219ff27cb358b55fbb52a4862c31a915a098c8a22ac2"}, + {file = "preshed-3.0.9.tar.gz", hash = "sha256:721863c5244ffcd2651ad0928951a2c7c77b102f4e11a251ad85d37ee7621660"}, +] + +[package.dependencies] +cymem = ">=2.0.2,<2.1.0" +murmurhash = ">=0.28.0,<1.1.0" + [[package]] name = "prompt-toolkit" version = "3.0.43" @@ -1933,22 +2919,22 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "4.25.2" +version = "4.25.3" description = "" -optional = true +optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.2-cp310-abi3-win32.whl", hash = "sha256:b50c949608682b12efb0b2717f53256f03636af5f60ac0c1d900df6213910fd6"}, - {file = "protobuf-4.25.2-cp310-abi3-win_amd64.whl", hash = "sha256:8f62574857ee1de9f770baf04dde4165e30b15ad97ba03ceac65f760ff018ac9"}, - {file = "protobuf-4.25.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2db9f8fa64fbdcdc93767d3cf81e0f2aef176284071507e3ede160811502fd3d"}, - {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:10894a2885b7175d3984f2be8d9850712c57d5e7587a2410720af8be56cdaf62"}, - {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fc381d1dd0516343f1440019cedf08a7405f791cd49eef4ae1ea06520bc1c020"}, - {file = "protobuf-4.25.2-cp38-cp38-win32.whl", hash = "sha256:33a1aeef4b1927431d1be780e87b641e322b88d654203a9e9d93f218ee359e61"}, - {file = "protobuf-4.25.2-cp38-cp38-win_amd64.whl", hash = "sha256:47f3de503fe7c1245f6f03bea7e8d3ec11c6c4a2ea9ef910e3221c8a15516d62"}, - {file = "protobuf-4.25.2-cp39-cp39-win32.whl", hash = "sha256:5e5c933b4c30a988b52e0b7c02641760a5ba046edc5e43d3b94a74c9fc57c1b3"}, - {file = "protobuf-4.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:d66a769b8d687df9024f2985d5137a337f957a0916cf5464d1513eee96a63ff0"}, - {file = "protobuf-4.25.2-py3-none-any.whl", hash = "sha256:a8b7a98d4ce823303145bf3c1a8bdb0f2f4642a414b196f04ad9853ed0c8f830"}, - {file = "protobuf-4.25.2.tar.gz", hash = "sha256:fe599e175cb347efc8ee524bcd4b902d11f7262c0e569ececcb89995c15f0a5e"}, + {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, + {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, + {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, + {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, + {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, + {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, + {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, + {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, + {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, ] [[package]] @@ -2000,18 +2986,18 @@ files = [ [[package]] name = "pydantic" -version = "2.5.3" +version = "2.6.4" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-2.5.3-py3-none-any.whl", hash = "sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4"}, - {file = "pydantic-2.5.3.tar.gz", hash = "sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a"}, + {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, + {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.14.6" +pydantic-core = "2.16.3" typing-extensions = ">=4.6.1" [package.extras] @@ -2019,116 +3005,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.14.6" +version = "2.16.3" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.14.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9"}, - {file = "pydantic_core-2.14.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c"}, - {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66"}, - {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590"}, - {file = "pydantic_core-2.14.6-cp310-none-win32.whl", hash = "sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7"}, - {file = "pydantic_core-2.14.6-cp310-none-win_amd64.whl", hash = "sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87"}, - {file = "pydantic_core-2.14.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4"}, - {file = "pydantic_core-2.14.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937"}, - {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622"}, - {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2"}, - {file = "pydantic_core-2.14.6-cp311-none-win32.whl", hash = "sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2"}, - {file = "pydantic_core-2.14.6-cp311-none-win_amd64.whl", hash = "sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23"}, - {file = "pydantic_core-2.14.6-cp311-none-win_arm64.whl", hash = "sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6"}, - {file = "pydantic_core-2.14.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec"}, - {file = "pydantic_core-2.14.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd"}, - {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91"}, - {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c"}, - {file = "pydantic_core-2.14.6-cp312-none-win32.whl", hash = "sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786"}, - {file = "pydantic_core-2.14.6-cp312-none-win_amd64.whl", hash = "sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40"}, - {file = "pydantic_core-2.14.6-cp312-none-win_arm64.whl", hash = "sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e"}, - {file = "pydantic_core-2.14.6-cp37-none-win32.whl", hash = "sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6"}, - {file = "pydantic_core-2.14.6-cp37-none-win_amd64.whl", hash = "sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391"}, - {file = "pydantic_core-2.14.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149"}, - {file = "pydantic_core-2.14.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d"}, - {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1"}, - {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60"}, - {file = "pydantic_core-2.14.6-cp38-none-win32.whl", hash = "sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe"}, - {file = "pydantic_core-2.14.6-cp38-none-win_amd64.whl", hash = "sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8"}, - {file = "pydantic_core-2.14.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab"}, - {file = "pydantic_core-2.14.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0"}, - {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9"}, - {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411"}, - {file = "pydantic_core-2.14.6-cp39-none-win32.whl", hash = "sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975"}, - {file = "pydantic_core-2.14.6-cp39-none-win_amd64.whl", hash = "sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e"}, - {file = "pydantic_core-2.14.6.tar.gz", hash = "sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, + {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, + {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, + {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, + {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, + {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, + {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, + {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, + {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, + {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, + {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, + {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, + {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, + {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, ] [package.dependencies] @@ -2199,13 +3159,13 @@ hook-testing = ["execnet (>=1.5.0)", "psutil", "pytest (>=2.7.3)"] [[package]] name = "pyinstaller-hooks-contrib" -version = "2024.0" +version = "2024.3" description = "Community maintained hooks for PyInstaller" optional = false python-versions = ">=3.7" files = [ - {file = "pyinstaller-hooks-contrib-2024.0.tar.gz", hash = "sha256:a7118c1a5c9788595e5c43ad058a7a5b7b6d59e1eceb42362f6ec1f0b61986b0"}, - {file = "pyinstaller_hooks_contrib-2024.0-py2.py3-none-any.whl", hash = "sha256:469b5690df53223e2e8abffb2e44d6ee596e7d79d4b1eed9465123b67439875a"}, + {file = "pyinstaller-hooks-contrib-2024.3.tar.gz", hash = "sha256:d18657c29267c63563a96b8fc78db6ba9ae40af6702acb2f8c871df12c75b60b"}, + {file = "pyinstaller_hooks_contrib-2024.3-py2.py3-none-any.whl", hash = "sha256:6701752d525e1f4eda1eaec2c2affc206171e15c7a4e188a152fcf3ed3308024"}, ] [package.dependencies] @@ -2213,32 +3173,15 @@ importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} packaging = ">=22.0" setuptools = ">=42.0.0" -[[package]] -name = "pyjwt" -version = "2.8.0" -description = "JSON Web Token implementation in Python" -optional = true -python-versions = ">=3.7" -files = [ - {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, - {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, -] - -[package.extras] -crypto = ["cryptography (>=3.4.0)"] -dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] - [[package]] name = "pymdown-extensions" -version = "10.7" +version = "10.7.1" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.7-py3-none-any.whl", hash = "sha256:6ca215bc57bc12bf32b414887a68b810637d039124ed9b2e5bd3325cbb2c050c"}, - {file = "pymdown_extensions-10.7.tar.gz", hash = "sha256:c0d64d5cf62566f59e6b2b690a4095c931107c250a8c8e1351c1de5f6b036deb"}, + {file = "pymdown_extensions-10.7.1-py3-none-any.whl", hash = "sha256:f5cc7000d7ff0d1ce9395d216017fa4df3dde800afb1fb72d1c7d3fd35e710f4"}, + {file = "pymdown_extensions-10.7.1.tar.gz", hash = "sha256:c70e146bdd83c744ffc766b4671999796aba18842b268510a329f7f64700d584"}, ] [package.dependencies] @@ -2248,15 +3191,29 @@ pyyaml = "*" [package.extras] extra = ["pygments (>=2.12)"] +[[package]] +name = "pyparsing" +version = "3.1.2" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, + {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + [[package]] name = "pytest" -version = "7.4.4" +version = "8.1.1" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, + {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, + {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, ] [package.dependencies] @@ -2264,11 +3221,11 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} +pluggy = ">=1.4,<2.0" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +testing = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-cov" @@ -2307,13 +3264,13 @@ dev = ["pre-commit", "pytest-asyncio", "tox"] [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -2321,13 +3278,13 @@ six = ">=1.5" [[package]] name = "pytz" -version = "2023.3.post1" +version = "2024.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, - {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, ] [[package]] @@ -2534,13 +3491,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "referencing" -version = "0.32.1" +version = "0.34.0" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" files = [ - {file = "referencing-0.32.1-py3-none-any.whl", hash = "sha256:7e4dc12271d8e15612bfe35792f5ea1c40970dadf8624602e33db2758f7ee554"}, - {file = "referencing-0.32.1.tar.gz", hash = "sha256:3c57da0513e9563eb7e203ebe9bb3a1b509b042016433bd1e45a2853466c3dd3"}, + {file = "referencing-0.34.0-py3-none-any.whl", hash = "sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4"}, + {file = "referencing-0.34.0.tar.gz", hash = "sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844"}, ] [package.dependencies] @@ -2672,140 +3629,139 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rich" -version = "13.7.0" +version = "13.7.1" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.7.0" files = [ - {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"}, - {file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"}, + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.17.1" +version = "0.18.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" files = [ - {file = "rpds_py-0.17.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:4128980a14ed805e1b91a7ed551250282a8ddf8201a4e9f8f5b7e6225f54170d"}, - {file = "rpds_py-0.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ff1dcb8e8bc2261a088821b2595ef031c91d499a0c1b031c152d43fe0a6ecec8"}, - {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d65e6b4f1443048eb7e833c2accb4fa7ee67cc7d54f31b4f0555b474758bee55"}, - {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a71169d505af63bb4d20d23a8fbd4c6ce272e7bce6cc31f617152aa784436f29"}, - {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:436474f17733c7dca0fbf096d36ae65277e8645039df12a0fa52445ca494729d"}, - {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10162fe3f5f47c37ebf6d8ff5a2368508fe22007e3077bf25b9c7d803454d921"}, - {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:720215373a280f78a1814becb1312d4e4d1077b1202a56d2b0815e95ccb99ce9"}, - {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70fcc6c2906cfa5c6a552ba7ae2ce64b6c32f437d8f3f8eea49925b278a61453"}, - {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91e5a8200e65aaac342a791272c564dffcf1281abd635d304d6c4e6b495f29dc"}, - {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:99f567dae93e10be2daaa896e07513dd4bf9c2ecf0576e0533ac36ba3b1d5394"}, - {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:24e4900a6643f87058a27320f81336d527ccfe503984528edde4bb660c8c8d59"}, - {file = "rpds_py-0.17.1-cp310-none-win32.whl", hash = "sha256:0bfb09bf41fe7c51413f563373e5f537eaa653d7adc4830399d4e9bdc199959d"}, - {file = "rpds_py-0.17.1-cp310-none-win_amd64.whl", hash = "sha256:20de7b7179e2031a04042e85dc463a93a82bc177eeba5ddd13ff746325558aa6"}, - {file = "rpds_py-0.17.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:65dcf105c1943cba45d19207ef51b8bc46d232a381e94dd38719d52d3980015b"}, - {file = "rpds_py-0.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:01f58a7306b64e0a4fe042047dd2b7d411ee82e54240284bab63e325762c1147"}, - {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:071bc28c589b86bc6351a339114fb7a029f5cddbaca34103aa573eba7b482382"}, - {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae35e8e6801c5ab071b992cb2da958eee76340e6926ec693b5ff7d6381441745"}, - {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149c5cd24f729e3567b56e1795f74577aa3126c14c11e457bec1b1c90d212e38"}, - {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e796051f2070f47230c745d0a77a91088fbee2cc0502e9b796b9c6471983718c"}, - {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e820ee1004327609b28db8307acc27f5f2e9a0b185b2064c5f23e815f248f8"}, - {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1957a2ab607f9added64478a6982742eb29f109d89d065fa44e01691a20fc20a"}, - {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8587fd64c2a91c33cdc39d0cebdaf30e79491cc029a37fcd458ba863f8815383"}, - {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4dc889a9d8a34758d0fcc9ac86adb97bab3fb7f0c4d29794357eb147536483fd"}, - {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2953937f83820376b5979318840f3ee47477d94c17b940fe31d9458d79ae7eea"}, - {file = "rpds_py-0.17.1-cp311-none-win32.whl", hash = "sha256:1bfcad3109c1e5ba3cbe2f421614e70439f72897515a96c462ea657261b96518"}, - {file = "rpds_py-0.17.1-cp311-none-win_amd64.whl", hash = "sha256:99da0a4686ada4ed0f778120a0ea8d066de1a0a92ab0d13ae68492a437db78bf"}, - {file = "rpds_py-0.17.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1dc29db3900cb1bb40353772417800f29c3d078dbc8024fd64655a04ee3c4bdf"}, - {file = "rpds_py-0.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82ada4a8ed9e82e443fcef87e22a3eed3654dd3adf6e3b3a0deb70f03e86142a"}, - {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d36b2b59e8cc6e576f8f7b671e32f2ff43153f0ad6d0201250a7c07f25d570e"}, - {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3677fcca7fb728c86a78660c7fb1b07b69b281964673f486ae72860e13f512ad"}, - {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:516fb8c77805159e97a689e2f1c80655c7658f5af601c34ffdb916605598cda2"}, - {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df3b6f45ba4515632c5064e35ca7f31d51d13d1479673185ba8f9fefbbed58b9"}, - {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a967dd6afda7715d911c25a6ba1517975acd8d1092b2f326718725461a3d33f9"}, - {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dbbb95e6fc91ea3102505d111b327004d1c4ce98d56a4a02e82cd451f9f57140"}, - {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02866e060219514940342a1f84303a1ef7a1dad0ac311792fbbe19b521b489d2"}, - {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2528ff96d09f12e638695f3a2e0c609c7b84c6df7c5ae9bfeb9252b6fa686253"}, - {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bd345a13ce06e94c753dab52f8e71e5252aec1e4f8022d24d56decd31e1b9b23"}, - {file = "rpds_py-0.17.1-cp312-none-win32.whl", hash = "sha256:2a792b2e1d3038daa83fa474d559acfd6dc1e3650ee93b2662ddc17dbff20ad1"}, - {file = "rpds_py-0.17.1-cp312-none-win_amd64.whl", hash = "sha256:292f7344a3301802e7c25c53792fae7d1593cb0e50964e7bcdcc5cf533d634e3"}, - {file = "rpds_py-0.17.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8ffe53e1d8ef2520ebcf0c9fec15bb721da59e8ef283b6ff3079613b1e30513d"}, - {file = "rpds_py-0.17.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4341bd7579611cf50e7b20bb8c2e23512a3dc79de987a1f411cb458ab670eb90"}, - {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4eb548daf4836e3b2c662033bfbfc551db58d30fd8fe660314f86bf8510b93"}, - {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b686f25377f9c006acbac63f61614416a6317133ab7fafe5de5f7dc8a06d42eb"}, - {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e21b76075c01d65d0f0f34302b5a7457d95721d5e0667aea65e5bb3ab415c25"}, - {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b86b21b348f7e5485fae740d845c65a880f5d1eda1e063bc59bef92d1f7d0c55"}, - {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f175e95a197f6a4059b50757a3dca33b32b61691bdbd22c29e8a8d21d3914cae"}, - {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1701fc54460ae2e5efc1dd6350eafd7a760f516df8dbe51d4a1c79d69472fbd4"}, - {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9051e3d2af8f55b42061603e29e744724cb5f65b128a491446cc029b3e2ea896"}, - {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7450dbd659fed6dd41d1a7d47ed767e893ba402af8ae664c157c255ec6067fde"}, - {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5a024fa96d541fd7edaa0e9d904601c6445e95a729a2900c5aec6555fe921ed6"}, - {file = "rpds_py-0.17.1-cp38-none-win32.whl", hash = "sha256:da1ead63368c04a9bded7904757dfcae01eba0e0f9bc41d3d7f57ebf1c04015a"}, - {file = "rpds_py-0.17.1-cp38-none-win_amd64.whl", hash = "sha256:841320e1841bb53fada91c9725e766bb25009cfd4144e92298db296fb6c894fb"}, - {file = "rpds_py-0.17.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f6c43b6f97209e370124baf2bf40bb1e8edc25311a158867eb1c3a5d449ebc7a"}, - {file = "rpds_py-0.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7d63ec01fe7c76c2dbb7e972fece45acbb8836e72682bde138e7e039906e2c"}, - {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81038ff87a4e04c22e1d81f947c6ac46f122e0c80460b9006e6517c4d842a6ec"}, - {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:810685321f4a304b2b55577c915bece4c4a06dfe38f6e62d9cc1d6ca8ee86b99"}, - {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25f071737dae674ca8937a73d0f43f5a52e92c2d178330b4c0bb6ab05586ffa6"}, - {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa5bfb13f1e89151ade0eb812f7b0d7a4d643406caaad65ce1cbabe0a66d695f"}, - {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfe07308b311a8293a0d5ef4e61411c5c20f682db6b5e73de6c7c8824272c256"}, - {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a000133a90eea274a6f28adc3084643263b1e7c1a5a66eb0a0a7a36aa757ed74"}, - {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d0e8a6434a3fbf77d11448c9c25b2f25244226cfbec1a5159947cac5b8c5fa4"}, - {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:efa767c220d94aa4ac3a6dd3aeb986e9f229eaf5bce92d8b1b3018d06bed3772"}, - {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dbc56680ecf585a384fbd93cd42bc82668b77cb525343170a2d86dafaed2a84b"}, - {file = "rpds_py-0.17.1-cp39-none-win32.whl", hash = "sha256:270987bc22e7e5a962b1094953ae901395e8c1e1e83ad016c5cfcfff75a15a3f"}, - {file = "rpds_py-0.17.1-cp39-none-win_amd64.whl", hash = "sha256:2a7b2f2f56a16a6d62e55354dd329d929560442bd92e87397b7a9586a32e3e76"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a3264e3e858de4fc601741498215835ff324ff2482fd4e4af61b46512dd7fc83"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f2f3b28b40fddcb6c1f1f6c88c6f3769cd933fa493ceb79da45968a21dccc920"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9584f8f52010295a4a417221861df9bea4c72d9632562b6e59b3c7b87a1522b7"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c64602e8be701c6cfe42064b71c84ce62ce66ddc6422c15463fd8127db3d8066"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:060f412230d5f19fc8c8b75f315931b408d8ebf56aec33ef4168d1b9e54200b1"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9412abdf0ba70faa6e2ee6c0cc62a8defb772e78860cef419865917d86c7342"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9737bdaa0ad33d34c0efc718741abaafce62fadae72c8b251df9b0c823c63b22"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9f0e4dc0f17dcea4ab9d13ac5c666b6b5337042b4d8f27e01b70fae41dd65c57"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1db228102ab9d1ff4c64148c96320d0be7044fa28bd865a9ce628ce98da5973d"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:d8bbd8e56f3ba25a7d0cf980fc42b34028848a53a0e36c9918550e0280b9d0b6"}, - {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:be22ae34d68544df293152b7e50895ba70d2a833ad9566932d750d3625918b82"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bf046179d011e6114daf12a534d874958b039342b347348a78b7cdf0dd9d6041"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a746a6d49665058a5896000e8d9d2f1a6acba8a03b389c1e4c06e11e0b7f40d"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b8bf5b8db49d8fd40f54772a1dcf262e8be0ad2ab0206b5a2ec109c176c0a4"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f7f4cb1f173385e8a39c29510dd11a78bf44e360fb75610594973f5ea141028b"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7fbd70cb8b54fe745301921b0816c08b6d917593429dfc437fd024b5ba713c58"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bdf1303df671179eaf2cb41e8515a07fc78d9d00f111eadbe3e14262f59c3d0"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad059a4bd14c45776600d223ec194e77db6c20255578bb5bcdd7c18fd169361"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3664d126d3388a887db44c2e293f87d500c4184ec43d5d14d2d2babdb4c64cad"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:698ea95a60c8b16b58be9d854c9f993c639f5c214cf9ba782eca53a8789d6b19"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:c3d2010656999b63e628a3c694f23020322b4178c450dc478558a2b6ef3cb9bb"}, - {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:938eab7323a736533f015e6069a7d53ef2dcc841e4e533b782c2bfb9fb12d84b"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e626b365293a2142a62b9a614e1f8e331b28f3ca57b9f05ebbf4cf2a0f0bdc5"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:380e0df2e9d5d5d339803cfc6d183a5442ad7ab3c63c2a0982e8c824566c5ccc"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b760a56e080a826c2e5af09002c1a037382ed21d03134eb6294812dda268c811"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5576ee2f3a309d2bb403ec292d5958ce03953b0e57a11d224c1f134feaf8c40f"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3c3461ebb4c4f1bbc70b15d20b565759f97a5aaf13af811fcefc892e9197ba"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:637b802f3f069a64436d432117a7e58fab414b4e27a7e81049817ae94de45d8d"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffee088ea9b593cc6160518ba9bd319b5475e5f3e578e4552d63818773c6f56a"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ac732390d529d8469b831949c78085b034bff67f584559340008d0f6041a049"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:93432e747fb07fa567ad9cc7aaadd6e29710e515aabf939dfbed8046041346c6"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b7d9ca34542099b4e185b3c2a2b2eda2e318a7dbde0b0d83357a6d4421b5296"}, - {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:0387ce69ba06e43df54e43968090f3626e231e4bc9150e4c3246947567695f68"}, - {file = "rpds_py-0.17.1.tar.gz", hash = "sha256:0210b2668f24c078307260bf88bdac9d6f1093635df5123789bfee4d8d7fc8e7"}, + {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, + {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, + {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, + {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836"}, + {file = "rpds_py-0.18.0-cp311-none-win32.whl", hash = "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1"}, + {file = "rpds_py-0.18.0-cp311-none-win_amd64.whl", hash = "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7"}, + {file = "rpds_py-0.18.0-cp312-none-win32.whl", hash = "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98"}, + {file = "rpds_py-0.18.0-cp312-none-win_amd64.whl", hash = "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, + {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, + {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, + {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, + {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, + {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, ] [[package]] name = "ruamel-yaml" -version = "0.18.5" +version = "0.18.6" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false python-versions = ">=3.7" files = [ - {file = "ruamel.yaml-0.18.5-py3-none-any.whl", hash = "sha256:a013ac02f99a69cdd6277d9664689eb1acba07069f912823177c5eced21a6ada"}, - {file = "ruamel.yaml-0.18.5.tar.gz", hash = "sha256:61917e3a35a569c1133a8f772e1226961bf5a1198bea7e23f06a0841dea1ab0e"}, + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, ] [package.dependencies] @@ -2900,21 +3856,105 @@ files = [ {file = "ruff-0.0.259.tar.gz", hash = "sha256:8b56496063ab3bfdf72339a5fbebb8bd46e5c5fee25ef11a9f03b208fa0562ec"}, ] +[[package]] +name = "scikit-learn" +version = "1.4.1.post1" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scikit-learn-1.4.1.post1.tar.gz", hash = "sha256:93d3d496ff1965470f9977d05e5ec3376fb1e63b10e4fda5e39d23c2d8969a30"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c540aaf44729ab5cd4bd5e394f2b375e65ceaea9cdd8c195788e70433d91bbc5"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4310bff71aa98b45b46cd26fa641309deb73a5d1c0461d181587ad4f30ea3c36"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f43dd527dabff5521af2786a2f8de5ba381e182ec7292663508901cf6ceaf6e"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c02e27d65b0c7dc32f2c5eb601aaf5530b7a02bfbe92438188624524878336f2"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-win_amd64.whl", hash = "sha256:629e09f772ad42f657ca60a1a52342eef786218dd20cf1369a3b8d085e55ef8f"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6145dfd9605b0b50ae72cdf72b61a2acd87501369a763b0d73d004710ebb76b5"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1afed6951bc9d2053c6ee9a518a466cbc9b07c6a3f9d43bfe734192b6125d508"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce03506ccf5f96b7e9030fea7eb148999b254c44c10182ac55857bc9b5d4815f"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ba516fcdc73d60e7f48cbb0bccb9acbdb21807de3651531208aac73c758e3ab"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-win_amd64.whl", hash = "sha256:78cd27b4669513b50db4f683ef41ea35b5dddc797bd2bbd990d49897fd1c8a46"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a1e289f33f613cefe6707dead50db31930530dc386b6ccff176c786335a7b01c"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0df87de9ce1c0140f2818beef310fb2e2afdc1e66fc9ad587965577f17733649"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:712c1c69c45b58ef21635360b3d0a680ff7d83ac95b6f9b82cf9294070cda710"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1754b0c2409d6ed5a3380512d0adcf182a01363c669033a2b55cca429ed86a81"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-win_amd64.whl", hash = "sha256:1d491ef66e37f4e812db7e6c8286520c2c3fc61b34bf5e59b67b4ce528de93af"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aa0029b78ef59af22cfbd833e8ace8526e4df90212db7ceccbea582ebb5d6794"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:14e4c88436ac96bf69eb6d746ac76a574c314a23c6961b7d344b38877f20fee1"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7cd3a77c32879311f2aa93466d3c288c955ef71d191503cf0677c3340ae8ae0"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a3ee19211ded1a52ee37b0a7b373a8bfc66f95353af058a210b692bd4cda0dd"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-win_amd64.whl", hash = "sha256:234b6bda70fdcae9e4abbbe028582ce99c280458665a155eed0b820599377d25"}, +] + +[package.dependencies] +joblib = ">=1.2.0" +numpy = ">=1.19.5,<2.0" +scipy = ">=1.6.0" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "pandas (>=1.1.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.15.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.19.12)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.17.2)"] + +[[package]] +name = "scipy" +version = "1.12.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scipy-1.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:78e4402e140879387187f7f25d91cc592b3501a2e51dfb320f48dfb73565f10b"}, + {file = "scipy-1.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5f00ebaf8de24d14b8449981a2842d404152774c1a1d880c901bf454cb8e2a1"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e53958531a7c695ff66c2e7bb7b79560ffdc562e2051644c5576c39ff8efb563"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e32847e08da8d895ce09d108a494d9eb78974cf6de23063f93306a3e419960c"}, + {file = "scipy-1.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c1020cad92772bf44b8e4cdabc1df5d87376cb219742549ef69fc9fd86282dd"}, + {file = "scipy-1.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:75ea2a144096b5e39402e2ff53a36fecfd3b960d786b7efd3c180e29c39e53f2"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:408c68423f9de16cb9e602528be4ce0d6312b05001f3de61fe9ec8b1263cad08"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5adfad5dbf0163397beb4aca679187d24aec085343755fcdbdeb32b3679f254c"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3003652496f6e7c387b1cf63f4bb720951cfa18907e998ea551e6de51a04467"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8066bce124ee5531d12a74b617d9ac0ea59245246410e19bca549656d9a40a"}, + {file = "scipy-1.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8bee4993817e204d761dba10dbab0774ba5a8612e57e81319ea04d84945375ba"}, + {file = "scipy-1.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a24024d45ce9a675c1fb8494e8e5244efea1c7a09c60beb1eeb80373d0fecc70"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7e76cc48638228212c747ada851ef355c2bb5e7f939e10952bc504c11f4e372"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f7ce148dffcd64ade37b2df9315541f9adad6efcaa86866ee7dd5db0c8f041c3"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c39f92041f490422924dfdb782527a4abddf4707616e07b021de33467f917bc"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7ebda398f86e56178c2fa94cad15bf457a218a54a35c2a7b4490b9f9cb2676c"}, + {file = "scipy-1.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:95e5c750d55cf518c398a8240571b0e0782c2d5a703250872f36eaf737751338"}, + {file = "scipy-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e646d8571804a304e1da01040d21577685ce8e2db08ac58e543eaca063453e1c"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:913d6e7956c3a671de3b05ccb66b11bc293f56bfdef040583a7221d9e22a2e35"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba1b0c7256ad75401c73e4b3cf09d1f176e9bd4248f0d3112170fb2ec4db067"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:730badef9b827b368f351eacae2e82da414e13cf8bd5051b4bdfd720271a5371"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6546dc2c11a9df6926afcbdd8a3edec28566e4e785b915e849348c6dd9f3f490"}, + {file = "scipy-1.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:196ebad3a4882081f62a5bf4aeb7326aa34b110e533aab23e4374fcccb0890dc"}, + {file = "scipy-1.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:b360f1b6b2f742781299514e99ff560d1fe9bd1bff2712894b52abe528d1fd1e"}, + {file = "scipy-1.12.0.tar.gz", hash = "sha256:4bf5abab8a36d20193c698b0f1fc282c1d083c94723902c447e5d2f1780936a3"}, +] + +[package.dependencies] +numpy = ">=1.22.4,<1.29.0" + +[package.extras] +dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "hypothesis", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "setuptools" -version = "69.0.3" +version = "69.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"}, - {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"}, + {file = "setuptools-69.2.0-py3-none-any.whl", hash = "sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c"}, + {file = "setuptools-69.2.0.tar.gz", hash = "sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "sh" @@ -2926,6 +3966,17 @@ files = [ {file = "sh-1.14.3.tar.gz", hash = "sha256:e4045b6c732d9ce75d571c79f5ac2234edd9ae4f5fa9d59b09705082bdca18c7"}, ] +[[package]] +name = "sh" +version = "2.0.6" +description = "Python subprocess replacement" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "sh-2.0.6-py3-none-any.whl", hash = "sha256:ced8f2e081a858b66a46ace3703dec243779abbd5a1887ba7e3c34f34da70cd2"}, + {file = "sh-2.0.6.tar.gz", hash = "sha256:9b2998f313f201c777e2c0061f0b1367497097ef13388595be147e2a00bf7ba1"}, +] + [[package]] name = "six" version = "1.16.0" @@ -2937,6 +3988,27 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] +[[package]] +name = "smart-open" +version = "6.4.0" +description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "smart_open-6.4.0-py3-none-any.whl", hash = "sha256:8d3ef7e6997e8e42dd55c74166ed21e6ac70664caa32dd940b26d54a8f6b4142"}, + {file = "smart_open-6.4.0.tar.gz", hash = "sha256:be3c92c246fbe80ebce8fbacb180494a481a77fcdcb7c1aadb2ea5b9c2bee8b9"}, +] + +[package.extras] +all = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "paramiko", "requests"] +azure = ["azure-common", "azure-core", "azure-storage-blob"] +gcs = ["google-cloud-storage (>=2.6.0)"] +http = ["requests"] +s3 = ["boto3"] +ssh = ["paramiko"] +test = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "moto[server]", "paramiko", "pytest", "pytest-rerunfailures", "requests", "responses"] +webhdfs = ["requests"] + [[package]] name = "smmap" version = "5.0.1" @@ -2959,62 +4031,172 @@ files = [ {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, ] +[[package]] +name = "spacy" +version = "3.7.4" +description = "Industrial-strength Natural Language Processing (NLP) in Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "spacy-3.7.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0f748625192f573c07ddea5fcd324919dbfbf4f4a2f7a1fc731e6dcba7321ea1"}, + {file = "spacy-3.7.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6288dca7b3a5489b3d7ce68404bc432ca22f826c662a12af47ef7bdb264307fb"}, + {file = "spacy-3.7.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef59db99b12a72d2646be3888d87f94c59e11cd07adc2f50a8130e83f07eb1cf"}, + {file = "spacy-3.7.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f07477a4027711c22b3865e78dc9076335c03fcf318a6736159bf07e2a923125"}, + {file = "spacy-3.7.4-cp310-cp310-win_amd64.whl", hash = "sha256:787ce42a837f7edfbd4185356eea893a81b7dd75743d0047f2b9bf179775f970"}, + {file = "spacy-3.7.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e82b9da21853d4aee46811804dc7e136895f087fda25c7585172d95eb9b70833"}, + {file = "spacy-3.7.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07ffedf51899441070fb70432f8f873696f39e0e31c9ce7403101c459f8a1281"}, + {file = "spacy-3.7.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba57bcc111eca7b086ee33a9636df775cfd4b14302f7d0ffbc11e95ac0fb3f0e"}, + {file = "spacy-3.7.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7580d1565f4d1ccbee9a18531f993a5b9b37ced96f145153dd4e98ceec607a55"}, + {file = "spacy-3.7.4-cp311-cp311-win_amd64.whl", hash = "sha256:df99c6f0085b1ec8e88beb5fd96d4371cef6fc19c202c41fc4fadc2afd55a157"}, + {file = "spacy-3.7.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b982ebab417189346acb4722637c573830d62e157ba336c3eb6c417249344be1"}, + {file = "spacy-3.7.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e7c29e152d8ea060af60da9410fa8ef038f3c9068a206905ee5c704de78f6e87"}, + {file = "spacy-3.7.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:023c9a008328f55c4717c56c4f8a28073b9961547f7d38a9405c967a52e66d59"}, + {file = "spacy-3.7.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1969d3d0fd0c811b7485438460f0ae8cfe16d46b54bcb8d1c26e70914e67e3d"}, + {file = "spacy-3.7.4-cp312-cp312-win_amd64.whl", hash = "sha256:040f7df5096c817450820eaaa426d54ed266254d16974e9a707a32f5b0f139ae"}, + {file = "spacy-3.7.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6757e8fbfd35dc0ed830296d5756f46d5b8d4b0353925dbe2f9aa33b82c5308"}, + {file = "spacy-3.7.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c500c1bad9e0488814a75077089aeef64a6b520ae8131578f266a08168106fa3"}, + {file = "spacy-3.7.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c992e2c5c0cd06c7f3e74fe8d758885117090013931c7938277d1421660bf71f"}, + {file = "spacy-3.7.4-cp37-cp37m-win_amd64.whl", hash = "sha256:2463c56ab1378f2b9a675340a2e3dfb618989d0da8cdce06429bc9b1dad4f294"}, + {file = "spacy-3.7.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b43e92edfa99f34dbb9dd30175f41158d20945e3179055d0071fee19394add96"}, + {file = "spacy-3.7.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c26a81d33c93e4a8e3360d61dcce0802fb886de79f666a487ea5abbd3ce4b30b"}, + {file = "spacy-3.7.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d7910ca7a91bf423febd8a9a10ca6a4cfcb5c99abdec79df1eb7b67ea3e3c90"}, + {file = "spacy-3.7.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b16768b9e5c350b8a383a6bd84cd0481ccdf10ae6231f568598890638065f69"}, + {file = "spacy-3.7.4-cp38-cp38-win_amd64.whl", hash = "sha256:ed99fb176979b1e3cf6830161f8e881beae54e80147b05fca31d9a67cb12fbca"}, + {file = "spacy-3.7.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ca8112330982dbeef125cc5eb40e0349493055835a0ebe29028a0953a25d8522"}, + {file = "spacy-3.7.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:977f37493d7cf0b5dca155f0450d47890378703283c29919cdcc220db994a775"}, + {file = "spacy-3.7.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ad5e931c294d100ec3edb40e40f2722ef505cea16312839dd6467e81d665740"}, + {file = "spacy-3.7.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11ebf6054cd3ec3638801d7ff9b709e32fb9c15512b347b489bfe2ccb1102c9f"}, + {file = "spacy-3.7.4-cp39-cp39-win_amd64.whl", hash = "sha256:f5b930753027ac599f70bb7e77d6a2256191fe582e6f3f0cd624d88f6c279fa4"}, + {file = "spacy-3.7.4.tar.gz", hash = "sha256:525f2ced2e40761562c8cace93ef6a1e6e8c483f27bd564bc1b15f608efbe85b"}, +] + +[package.dependencies] +catalogue = ">=2.0.6,<2.1.0" +cymem = ">=2.0.2,<2.1.0" +jinja2 = "*" +langcodes = ">=3.2.0,<4.0.0" +murmurhash = ">=0.28.0,<1.1.0" +numpy = {version = ">=1.19.0", markers = "python_version >= \"3.9\""} +packaging = ">=20.0" +preshed = ">=3.0.2,<3.1.0" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<3.0.0" +requests = ">=2.13.0,<3.0.0" +setuptools = "*" +smart-open = ">=5.2.1,<7.0.0" +spacy-legacy = ">=3.0.11,<3.1.0" +spacy-loggers = ">=1.0.0,<2.0.0" +srsly = ">=2.4.3,<3.0.0" +thinc = ">=8.2.2,<8.3.0" +tqdm = ">=4.38.0,<5.0.0" +typer = ">=0.3.0,<0.10.0" +wasabi = ">=0.9.1,<1.2.0" +weasel = ">=0.1.0,<0.4.0" + +[package.extras] +apple = ["thinc-apple-ops (>=0.1.0.dev0,<1.0.0)"] +cuda = ["cupy (>=5.0.0b4,<13.0.0)"] +cuda-autodetect = ["cupy-wheel (>=11.0.0,<13.0.0)"] +cuda100 = ["cupy-cuda100 (>=5.0.0b4,<13.0.0)"] +cuda101 = ["cupy-cuda101 (>=5.0.0b4,<13.0.0)"] +cuda102 = ["cupy-cuda102 (>=5.0.0b4,<13.0.0)"] +cuda110 = ["cupy-cuda110 (>=5.0.0b4,<13.0.0)"] +cuda111 = ["cupy-cuda111 (>=5.0.0b4,<13.0.0)"] +cuda112 = ["cupy-cuda112 (>=5.0.0b4,<13.0.0)"] +cuda113 = ["cupy-cuda113 (>=5.0.0b4,<13.0.0)"] +cuda114 = ["cupy-cuda114 (>=5.0.0b4,<13.0.0)"] +cuda115 = ["cupy-cuda115 (>=5.0.0b4,<13.0.0)"] +cuda116 = ["cupy-cuda116 (>=5.0.0b4,<13.0.0)"] +cuda117 = ["cupy-cuda117 (>=5.0.0b4,<13.0.0)"] +cuda11x = ["cupy-cuda11x (>=11.0.0,<13.0.0)"] +cuda12x = ["cupy-cuda12x (>=11.5.0,<13.0.0)"] +cuda80 = ["cupy-cuda80 (>=5.0.0b4,<13.0.0)"] +cuda90 = ["cupy-cuda90 (>=5.0.0b4,<13.0.0)"] +cuda91 = ["cupy-cuda91 (>=5.0.0b4,<13.0.0)"] +cuda92 = ["cupy-cuda92 (>=5.0.0b4,<13.0.0)"] +ja = ["sudachidict-core (>=20211220)", "sudachipy (>=0.5.2,!=0.6.1)"] +ko = ["natto-py (>=0.9.0)"] +lookups = ["spacy-lookups-data (>=1.0.3,<1.1.0)"] +th = ["pythainlp (>=2.0)"] +transformers = ["spacy-transformers (>=1.1.2,<1.4.0)"] + +[[package]] +name = "spacy-legacy" +version = "3.0.12" +description = "Legacy registered functions for spaCy backwards compatibility" +optional = false +python-versions = ">=3.6" +files = [ + {file = "spacy-legacy-3.0.12.tar.gz", hash = "sha256:b37d6e0c9b6e1d7ca1cf5bc7152ab64a4c4671f59c85adaf7a3fcb870357a774"}, + {file = "spacy_legacy-3.0.12-py2.py3-none-any.whl", hash = "sha256:476e3bd0d05f8c339ed60f40986c07387c0a71479245d6d0f4298dbd52cda55f"}, +] + +[[package]] +name = "spacy-loggers" +version = "1.0.5" +description = "Logging utilities for SpaCy" +optional = false +python-versions = ">=3.6" +files = [ + {file = "spacy-loggers-1.0.5.tar.gz", hash = "sha256:d60b0bdbf915a60e516cc2e653baeff946f0cfc461b452d11a4d5458c6fe5f24"}, + {file = "spacy_loggers-1.0.5-py3-none-any.whl", hash = "sha256:196284c9c446cc0cdb944005384270d775fdeaf4f494d8e269466cfa497ef645"}, +] + [[package]] name = "sqlalchemy" -version = "2.0.25" +version = "2.0.28" description = "Database Abstraction Library" optional = true python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4344d059265cc8b1b1be351bfb88749294b87a8b2bbe21dfbe066c4199541ebd"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9e2e59cbcc6ba1488404aad43de005d05ca56e069477b33ff74e91b6319735"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84daa0a2055df9ca0f148a64fdde12ac635e30edbca80e87df9b3aaf419e144a"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc8b7dabe8e67c4832891a5d322cec6d44ef02f432b4588390017f5cec186a84"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f5693145220517b5f42393e07a6898acdfe820e136c98663b971906120549da5"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db854730a25db7c956423bb9fb4bdd1216c839a689bf9cc15fada0a7fb2f4570"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-win32.whl", hash = "sha256:14a6f68e8fc96e5e8f5647ef6cda6250c780612a573d99e4d881581432ef1669"}, - {file = "SQLAlchemy-2.0.25-cp310-cp310-win_amd64.whl", hash = "sha256:87f6e732bccd7dcf1741c00f1ecf33797383128bd1c90144ac8adc02cbb98643"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:342d365988ba88ada8af320d43df4e0b13a694dbd75951f537b2d5e4cb5cd002"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f37c0caf14b9e9b9e8f6dbc81bc56db06acb4363eba5a633167781a48ef036ed"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9373708763ef46782d10e950b49d0235bfe58facebd76917d3f5cbf5971aed"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d24f571990c05f6b36a396218f251f3e0dda916e0c687ef6fdca5072743208f5"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75432b5b14dc2fff43c50435e248b45c7cdadef73388e5610852b95280ffd0e9"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:884272dcd3ad97f47702965a0e902b540541890f468d24bd1d98bcfe41c3f018"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-win32.whl", hash = "sha256:e607cdd99cbf9bb80391f54446b86e16eea6ad309361942bf88318bcd452363c"}, - {file = "SQLAlchemy-2.0.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d505815ac340568fd03f719446a589162d55c52f08abd77ba8964fbb7eb5b5f"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0dacf67aee53b16f365c589ce72e766efaabd2b145f9de7c917777b575e3659d"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b801154027107461ee992ff4b5c09aa7cc6ec91ddfe50d02bca344918c3265c6"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59a21853f5daeb50412d459cfb13cb82c089ad4c04ec208cd14dddd99fc23b39"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29049e2c299b5ace92cbed0c1610a7a236f3baf4c6b66eb9547c01179f638ec5"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b64b183d610b424a160b0d4d880995e935208fc043d0302dd29fee32d1ee3f95"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f7a7d7fcc675d3d85fbf3b3828ecd5990b8d61bd6de3f1b260080b3beccf215"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-win32.whl", hash = "sha256:cf18ff7fc9941b8fc23437cc3e68ed4ebeff3599eec6ef5eebf305f3d2e9a7c2"}, - {file = "SQLAlchemy-2.0.25-cp312-cp312-win_amd64.whl", hash = "sha256:91f7d9d1c4dd1f4f6e092874c128c11165eafcf7c963128f79e28f8445de82d5"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bb209a73b8307f8fe4fe46f6ad5979649be01607f11af1eb94aa9e8a3aaf77f0"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:798f717ae7c806d67145f6ae94dc7c342d3222d3b9a311a784f371a4333212c7"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd402169aa00df3142149940b3bf9ce7dde075928c1886d9a1df63d4b8de62"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0d3cab3076af2e4aa5693f89622bef7fa770c6fec967143e4da7508b3dceb9b9"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:74b080c897563f81062b74e44f5a72fa44c2b373741a9ade701d5f789a10ba23"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-win32.whl", hash = "sha256:87d91043ea0dc65ee583026cb18e1b458d8ec5fc0a93637126b5fc0bc3ea68c4"}, - {file = "SQLAlchemy-2.0.25-cp37-cp37m-win_amd64.whl", hash = "sha256:75f99202324383d613ddd1f7455ac908dca9c2dd729ec8584c9541dd41822a2c"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:420362338681eec03f53467804541a854617faed7272fe71a1bfdb07336a381e"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c88f0c7dcc5f99bdb34b4fd9b69b93c89f893f454f40219fe923a3a2fd11625"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3be4987e3ee9d9a380b66393b77a4cd6d742480c951a1c56a23c335caca4ce3"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a159111a0f58fb034c93eeba211b4141137ec4b0a6e75789ab7a3ef3c7e7e3"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8b8cb63d3ea63b29074dcd29da4dc6a97ad1349151f2d2949495418fd6e48db9"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:736ea78cd06de6c21ecba7416499e7236a22374561493b456a1f7ffbe3f6cdb4"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-win32.whl", hash = "sha256:10331f129982a19df4284ceac6fe87353ca3ca6b4ca77ff7d697209ae0a5915e"}, - {file = "SQLAlchemy-2.0.25-cp38-cp38-win_amd64.whl", hash = "sha256:c55731c116806836a5d678a70c84cb13f2cedba920212ba7dcad53260997666d"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:605b6b059f4b57b277f75ace81cc5bc6335efcbcc4ccb9066695e515dbdb3900"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:665f0a3954635b5b777a55111ababf44b4fc12b1f3ba0a435b602b6387ffd7cf"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecf6d4cda1f9f6cb0b45803a01ea7f034e2f1aed9475e883410812d9f9e3cfcf"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c51db269513917394faec5e5c00d6f83829742ba62e2ac4fa5c98d58be91662f"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:790f533fa5c8901a62b6fef5811d48980adeb2f51f1290ade8b5e7ba990ba3de"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1b1180cda6df7af84fe72e4530f192231b1f29a7496951db4ff38dac1687202d"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-win32.whl", hash = "sha256:555651adbb503ac7f4cb35834c5e4ae0819aab2cd24857a123370764dc7d7e24"}, - {file = "SQLAlchemy-2.0.25-cp39-cp39-win_amd64.whl", hash = "sha256:dc55990143cbd853a5d038c05e79284baedf3e299661389654551bd02a6a68d7"}, - {file = "SQLAlchemy-2.0.25-py3-none-any.whl", hash = "sha256:a86b4240e67d4753dc3092d9511886795b3c2852abe599cffe108952f7af7ac3"}, - {file = "SQLAlchemy-2.0.25.tar.gz", hash = "sha256:a2c69a7664fb2d54b8682dd774c3b54f67f84fa123cf84dda2a5f40dcaa04e08"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0b148ab0438f72ad21cb004ce3bdaafd28465c4276af66df3b9ecd2037bf252"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbda76961eb8f27e6ad3c84d1dc56d5bc61ba8f02bd20fcf3450bd421c2fcc9c"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feea693c452d85ea0015ebe3bb9cd15b6f49acc1a31c28b3c50f4db0f8fb1e71"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da98815f82dce0cb31fd1e873a0cb30934971d15b74e0d78cf21f9e1b05953f"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5adf383c73f2d49ad15ff363a8748319ff84c371eed59ffd0127355d6ea1da"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56856b871146bfead25fbcaed098269d90b744eea5cb32a952df00d542cdd368"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win32.whl", hash = "sha256:943aa74a11f5806ab68278284a4ddd282d3fb348a0e96db9b42cb81bf731acdc"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win_amd64.whl", hash = "sha256:c6c4da4843e0dabde41b8f2e8147438330924114f541949e6318358a56d1875a"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46a3d4e7a472bfff2d28db838669fc437964e8af8df8ee1e4548e92710929adc"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3dd67b5d69794cfe82862c002512683b3db038b99002171f624712fa71aeaa"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61e2e41656a673b777e2f0cbbe545323dbe0d32312f590b1bc09da1de6c2a02"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0315d9125a38026227f559488fe7f7cee1bd2fbc19f9fd637739dc50bb6380b2"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af8ce2d31679006e7b747d30a89cd3ac1ec304c3d4c20973f0f4ad58e2d1c4c9"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:81ba314a08c7ab701e621b7ad079c0c933c58cdef88593c59b90b996e8b58fa5"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win32.whl", hash = "sha256:1ee8bd6d68578e517943f5ebff3afbd93fc65f7ef8f23becab9fa8fb315afb1d"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win_amd64.whl", hash = "sha256:ad7acbe95bac70e4e687a4dc9ae3f7a2f467aa6597049eeb6d4a662ecd990bb6"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d3499008ddec83127ab286c6f6ec82a34f39c9817f020f75eca96155f9765097"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b66fcd38659cab5d29e8de5409cdf91e9986817703e1078b2fdaad731ea66f5"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea30da1e76cb1acc5b72e204a920a3a7678d9d52f688f087dc08e54e2754c67"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:124202b4e0edea7f08a4db8c81cc7859012f90a0d14ba2bf07c099aff6e96462"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e23b88c69497a6322b5796c0781400692eca1ae5532821b39ce81a48c395aae9"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b6303bfd78fb3221847723104d152e5972c22367ff66edf09120fcde5ddc2e2"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win32.whl", hash = "sha256:a921002be69ac3ab2cf0c3017c4e6a3377f800f1fca7f254c13b5f1a2f10022c"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win_amd64.whl", hash = "sha256:b4a2cf92995635b64876dc141af0ef089c6eea7e05898d8d8865e71a326c0385"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e91b5e341f8c7f1e5020db8e5602f3ed045a29f8e27f7f565e0bdee3338f2c7"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c7b78dfc7278329f27be02c44abc0d69fe235495bb8e16ec7ef1b1a17952db"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eba73ef2c30695cb7eabcdb33bb3d0b878595737479e152468f3ba97a9c22a4"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5df5d1dafb8eee89384fb7a1f79128118bc0ba50ce0db27a40750f6f91aa99d5"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2858bbab1681ee5406650202950dc8f00e83b06a198741b7c656e63818633526"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win32.whl", hash = "sha256:9461802f2e965de5cff80c5a13bc945abea7edaa1d29360b485c3d2b56cdb075"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win_amd64.whl", hash = "sha256:a6bec1c010a6d65b3ed88c863d56b9ea5eeefdf62b5e39cafd08c65f5ce5198b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:843a882cadebecc655a68bd9a5b8aa39b3c52f4a9a5572a3036fb1bb2ccdc197"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dbb990612c36163c6072723523d2be7c3eb1517bbdd63fe50449f56afafd1133"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7e4baf9161d076b9a7e432fce06217b9bd90cfb8f1d543d6e8c4595627edb9"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a5354cb4de9b64bccb6ea33162cb83e03dbefa0d892db88a672f5aad638a75"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fffcc8edc508801ed2e6a4e7b0d150a62196fd28b4e16ab9f65192e8186102b6"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca7b6d99a4541b2ebab4494f6c8c2f947e0df4ac859ced575238e1d6ca5716b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win32.whl", hash = "sha256:8c7f10720fc34d14abad5b647bc8202202f4948498927d9f1b4df0fb1cf391b7"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win_amd64.whl", hash = "sha256:243feb6882b06a2af68ecf4bec8813d99452a1b62ba2be917ce6283852cf701b"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc4974d3684f28b61b9a90fcb4c41fb340fd4b6a50c04365704a4da5a9603b05"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87724e7ed2a936fdda2c05dbd99d395c91ea3c96f029a033a4a20e008dd876bf"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68722e6a550f5de2e3cfe9da6afb9a7dd15ef7032afa5651b0f0c6b3adb8815d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:328529f7c7f90adcd65aed06a161851f83f475c2f664a898af574893f55d9e53"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:df40c16a7e8be7413b885c9bf900d402918cc848be08a59b022478804ea076b8"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:426f2fa71331a64f5132369ede5171c52fd1df1bd9727ce621f38b5b24f48750"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win32.whl", hash = "sha256:33157920b233bc542ce497a81a2e1452e685a11834c5763933b440fedd1d8e2d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win_amd64.whl", hash = "sha256:2f60843068e432311c886c5f03c4664acaef507cf716f6c60d5fde7265be9d7b"}, + {file = "SQLAlchemy-2.0.28-py3-none-any.whl", hash = "sha256:78bb7e8da0183a8301352d569900d9d3594c48ac21dc1c2ec6b3121ed8b6c986"}, + {file = "SQLAlchemy-2.0.28.tar.gz", hash = "sha256:dd53b6c4e6d960600fd6532b79ee28e2da489322fcf6648738134587faf767b6"}, ] [package.dependencies] @@ -3062,6 +4244,52 @@ dev = ["build", "flake8"] doc = ["sphinx"] test = ["pytest", "pytest-cov"] +[[package]] +name = "srsly" +version = "2.4.8" +description = "Modern high-performance serialization utilities for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "srsly-2.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:17f3bcb418bb4cf443ed3d4dcb210e491bd9c1b7b0185e6ab10b6af3271e63b2"}, + {file = "srsly-2.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b070a58e21ab0e878fd949f932385abb4c53dd0acb6d3a7ee75d95d447bc609"}, + {file = "srsly-2.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98286d20014ed2067ad02b0be1e17c7e522255b188346e79ff266af51a54eb33"}, + {file = "srsly-2.4.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18685084e2e0cc47c25158cbbf3e44690e494ef77d6418c2aae0598c893f35b0"}, + {file = "srsly-2.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:980a179cbf4eb5bc56f7507e53f76720d031bcf0cef52cd53c815720eb2fc30c"}, + {file = "srsly-2.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5472ed9f581e10c32e79424c996cf54c46c42237759f4224806a0cd4bb770993"}, + {file = "srsly-2.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:50f10afe9230072c5aad9f6636115ea99b32c102f4c61e8236d8642c73ec7a13"}, + {file = "srsly-2.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c994a89ba247a4d4f63ef9fdefb93aa3e1f98740e4800d5351ebd56992ac75e3"}, + {file = "srsly-2.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace7ed4a0c20fa54d90032be32f9c656b6d75445168da78d14fe9080a0c208ad"}, + {file = "srsly-2.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:7a919236a090fb93081fbd1cec030f675910f3863825b34a9afbcae71f643127"}, + {file = "srsly-2.4.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7583c03d114b4478b7a357a1915305163e9eac2dfe080da900555c975cca2a11"}, + {file = "srsly-2.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:94ccdd2f6db824c31266aaf93e0f31c1c43b8bc531cd2b3a1d924e3c26a4f294"}, + {file = "srsly-2.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db72d2974f91aee652d606c7def98744ca6b899bd7dd3009fd75ebe0b5a51034"}, + {file = "srsly-2.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a60c905fd2c15e848ce1fc315fd34d8a9cc72c1dee022a0d8f4c62991131307"}, + {file = "srsly-2.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:e0b8d5722057000694edf105b8f492e7eb2f3aa6247a5f0c9170d1e0d074151c"}, + {file = "srsly-2.4.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:196b4261f9d6372d1d3d16d1216b90c7e370b4141471322777b7b3c39afd1210"}, + {file = "srsly-2.4.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4750017e6d78590b02b12653e97edd25aefa4734281386cc27501d59b7481e4e"}, + {file = "srsly-2.4.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa034cd582ba9e4a120c8f19efa263fcad0f10fc481e73fb8c0d603085f941c4"}, + {file = "srsly-2.4.8-cp36-cp36m-win_amd64.whl", hash = "sha256:5a78ab9e9d177ee8731e950feb48c57380036d462b49e3fb61a67ce529ff5f60"}, + {file = "srsly-2.4.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:087e36439af517e259843df93eb34bb9e2d2881c34fa0f541589bcfbc757be97"}, + {file = "srsly-2.4.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad141d8a130cb085a0ed3a6638b643e2b591cb98a4591996780597a632acfe20"}, + {file = "srsly-2.4.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24d05367b2571c0d08d00459636b951e3ca2a1e9216318c157331f09c33489d3"}, + {file = "srsly-2.4.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3fd661a1c4848deea2849b78f432a70c75d10968e902ca83c07c89c9b7050ab8"}, + {file = "srsly-2.4.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec37233fe39af97b00bf20dc2ceda04d39b9ea19ce0ee605e16ece9785e11f65"}, + {file = "srsly-2.4.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d2fd4bc081f1d6a6063396b6d97b00d98e86d9d3a3ac2949dba574a84e148080"}, + {file = "srsly-2.4.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7347cff1eb4ef3fc335d9d4acc89588051b2df43799e5d944696ef43da79c873"}, + {file = "srsly-2.4.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a9dc1da5cc94d77056b91ba38365c72ae08556b6345bef06257c7e9eccabafe"}, + {file = "srsly-2.4.8-cp38-cp38-win_amd64.whl", hash = "sha256:dc0bf7b6f23c9ecb49ec0924dc645620276b41e160e9b283ed44ca004c060d79"}, + {file = "srsly-2.4.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff8df21d00d73c371bead542cefef365ee87ca3a5660de292444021ff84e3b8c"}, + {file = "srsly-2.4.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ac3e340e65a9fe265105705586aa56054dc3902789fcb9a8f860a218d6c0a00"}, + {file = "srsly-2.4.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06d1733f4275eff4448e96521cc7dcd8fdabd68ba9b54ca012dcfa2690db2644"}, + {file = "srsly-2.4.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be5b751ad88fdb58fb73871d456248c88204f213aaa3c9aab49b6a1802b3fa8d"}, + {file = "srsly-2.4.8-cp39-cp39-win_amd64.whl", hash = "sha256:822a38b8cf112348f3accbc73274a94b7bf82515cb14a85ba586d126a5a72851"}, + {file = "srsly-2.4.8.tar.gz", hash = "sha256:b24d95a65009c2447e0b49cda043ac53fecf4f09e358d87a57446458f91b8a91"}, +] + +[package.dependencies] +catalogue = ">=2.0.3,<2.1.0" + [[package]] name = "stack-data" version = "0.6.3" @@ -3096,18 +4324,232 @@ files = [ pbr = ">=2.0.0,<2.1.0 || >2.1.0" [[package]] -name = "tabulate" -version = "0.9.0" -description = "Pretty-print tabular data" -optional = true +name = "tensorboard" +version = "2.16.2" +description = "TensorBoard lets you watch Tensors Flow" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tensorboard-2.16.2-py3-none-any.whl", hash = "sha256:9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45"}, +] + +[package.dependencies] +absl-py = ">=0.4" +grpcio = ">=1.48.2" +markdown = ">=2.6.8" +numpy = ">=1.12.0" +protobuf = ">=3.19.6,<4.24.0 || >4.24.0" +setuptools = ">=41.0.0" +six = ">1.9" +tensorboard-data-server = ">=0.7.0,<0.8.0" +werkzeug = ">=1.0.1" + +[[package]] +name = "tensorboard-data-server" +version = "0.7.2" +description = "Fast data loading for TensorBoard" +optional = false python-versions = ">=3.7" files = [ - {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, - {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, + {file = "tensorboard_data_server-0.7.2-py3-none-any.whl", hash = "sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb"}, + {file = "tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60"}, + {file = "tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530"}, +] + +[[package]] +name = "tensorflow" +version = "2.16.1" +description = "TensorFlow is an open source machine learning framework for everyone." +optional = false +python-versions = ">=3.9" +files = [ + {file = "tensorflow-2.16.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:ab79f156dd746c2dae906e3b4c5daac3855742941752e5a2c28f094c56eed466"}, + {file = "tensorflow-2.16.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:8e376ab46fb1df18a1f927d77011d36ecf7b717a81cbfe4a941c7bf5236939b3"}, + {file = "tensorflow-2.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae0554471d472b8095f8a5204d878389d0d4bc88f6ef6edcd477b952dff5cfab"}, + {file = "tensorflow-2.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e96047657c64459a36a0cc211a3d003df96c7be3f95a84f7b705715f5697270"}, + {file = "tensorflow-2.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:21a3c6d76a39f52754c389326f6bef8aef3c26b5bc89ca365add4a69483e569e"}, + {file = "tensorflow-2.16.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:f8a5b83ca4bf1813da158f63479cfdf848c0761e5120258417b3a96074a489f5"}, + {file = "tensorflow-2.16.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc2065d1d27f9f89fea8a0fe8fdf6c437ae60987cd7f2928e0d00e532e79e44d"}, + {file = "tensorflow-2.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:617df9fa2d697c4bc22fa3ee87eb01d580ab1bd0438fea15c4ec2f2870c40bb0"}, + {file = "tensorflow-2.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930c61100cce3a5cb63d30fe6776504405214e8398a26ca968222ecb8b8f9404"}, + {file = "tensorflow-2.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:093573a8eb93ef9511e7015b8de9659ed27156f2f05e6d1211f8f4cb76407ee1"}, + {file = "tensorflow-2.16.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:09cac3c6a8fbf85a9b95491b58086154dd00a09956ed31823bb45c6605f0e881"}, + {file = "tensorflow-2.16.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:bbf06d879070dfce2617c7d2bb19696bb1b2bcbb3b4ae009520e7166dd75dfc2"}, + {file = "tensorflow-2.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c612cdd436bb55b8dae1ecdd1d253496c95b006870b7165b8480c6606b8622aa"}, + {file = "tensorflow-2.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a123fbb5788ba30d1113ce01bb166ddf85056fcb40e287c32a929ebfa4aa061"}, + {file = "tensorflow-2.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c5611e7357b7a4bc6dccc60750c91e27cdff82622fc917848f22add5ab8de26"}, + {file = "tensorflow-2.16.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:e9cf3fba7f389ff8b8342c5fbebb2529321e0ce9e03d7bcb3657ee0876686c36"}, + {file = "tensorflow-2.16.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:42858b5d14159a2b9cc01c7f5a88e063b0601f20430cb358374005a67da38114"}, + {file = "tensorflow-2.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92152aa77c402684e9066885515af6a45d88455c4453a818052c7369357078d8"}, + {file = "tensorflow-2.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b946e73bf48d857928329b8b321b00b42fe1b4f774c6580666683b0629689f"}, + {file = "tensorflow-2.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:8231a9d7bba92a51231dcdcc3073920ad7d22fa88c64c7e2ecb7f1feac9d5fcb"}, ] +[package.dependencies] +absl-py = ">=1.0.0" +astunparse = ">=1.6.0" +flatbuffers = ">=23.5.26" +gast = ">=0.2.1,<0.5.0 || >0.5.0,<0.5.1 || >0.5.1,<0.5.2 || >0.5.2" +google-pasta = ">=0.1.1" +grpcio = ">=1.24.3,<2.0" +h5py = ">=3.10.0" +keras = ">=3.0.0" +libclang = ">=13.0.0" +ml-dtypes = ">=0.3.1,<0.4.0" +numpy = [ + {version = ">=1.23.5,<2.0.0", markers = "python_version <= \"3.11\""}, + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, +] +opt-einsum = ">=2.3.2" +packaging = "*" +protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" +requests = ">=2.21.0,<3" +setuptools = "*" +six = ">=1.12.0" +tensorboard = ">=2.16,<2.17" +tensorflow-io-gcs-filesystem = {version = ">=0.23.1", markers = "python_version < \"3.12\""} +termcolor = ">=1.1.0" +typing-extensions = ">=3.6.6" +wrapt = ">=1.11.0" + [package.extras] -widechars = ["wcwidth"] +and-cuda = ["nvidia-cublas-cu12 (==12.3.4.1)", "nvidia-cuda-cupti-cu12 (==12.3.101)", "nvidia-cuda-nvcc-cu12 (==12.3.107)", "nvidia-cuda-nvrtc-cu12 (==12.3.107)", "nvidia-cuda-runtime-cu12 (==12.3.101)", "nvidia-cudnn-cu12 (==8.9.7.29)", "nvidia-cufft-cu12 (==11.0.12.1)", "nvidia-curand-cu12 (==10.3.4.107)", "nvidia-cusolver-cu12 (==11.5.4.101)", "nvidia-cusparse-cu12 (==12.2.0.103)", "nvidia-nccl-cu12 (==2.19.3)", "nvidia-nvjitlink-cu12 (==12.3.101)"] + +[[package]] +name = "tensorflow-io-gcs-filesystem" +version = "0.36.0" +description = "TensorFlow IO" +optional = false +python-versions = ">=3.7, <3.12" +files = [ + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:702c6df62b38095ff613c433546d9424d4f33902a5ab26b00fd26457e27a99fa"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e9b8aaca2789af356c42afda0f52380f82e5abb2f3c0b85087833fcfe03875d8"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c477aed96864ceae77d7051c3b687f28813aba7320fc5dd552164fad6ec8d1a1"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be1ff92559dfa23048b01179a1827081947583f5c6f9986ccac471df8a29322a"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:72c3ca4b8c0d8dbdd970699d05a100107cf200317ad8e6a8373e2c37225cd552"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:848e8e89a0f49258c7782189c938d8d1162d989da1a80c79f95c7af3ef6006c8"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d72db1ab03edb65fa1e98d06e504ccbc64282d38ab3589afb6db66dc448d1c1"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd4d946b5fa23220daa473a80e511a5fb27493d7e49d17dff0bb43bb0a31f32"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa346fd1dd9f57848b73874007440504f060fadd689fa1cc29cc49817d0eeaf3"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0a4437824424a4423cf86162cb8b21b1bec24698194332748b50bb952e62ab9f"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:31806bd7ac2db789161bc720747de22947063265561a4c17be54698fd9780b03"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc0e57976c1aa035af6281f0330cfb8dd50eee2f63412ecc84d60ff5075d29b7"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e97ff5c280eb10f699098ae21057be2b146d39e8a906cd5db91f2ea6c34e47d0"}, +] + +[package.extras] +tensorflow = ["tensorflow (>=2.15.0,<2.16.0)"] +tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.15.0,<2.16.0)"] +tensorflow-cpu = ["tensorflow-cpu (>=2.15.0,<2.16.0)"] +tensorflow-gpu = ["tensorflow-gpu (>=2.15.0,<2.16.0)"] +tensorflow-rocm = ["tensorflow-rocm (>=2.15.0,<2.16.0)"] + +[[package]] +name = "termcolor" +version = "2.4.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.8" +files = [ + {file = "termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63"}, + {file = "termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "thinc" +version = "8.2.3" +description = "A refreshing functional take on deep learning, compatible with your favorite libraries" +optional = false +python-versions = ">=3.6" +files = [ + {file = "thinc-8.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:27950dc8a14e1ead09dec329ad98edf1b8f7cc71ec9d5ce5f301073de9d7dadf"}, + {file = "thinc-8.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fce09571619f344983f915f5deb5b8346304b56d3a9ae1bc5ac8c5872eee0738"}, + {file = "thinc-8.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0fb4e534c978ff4b429678ab28db2f81503549f97ed61b2b752c07c08b2083"}, + {file = "thinc-8.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607223c178ae5fba36a3b35fa82d94a453694551bcfbe7f9ac04a01a9e87ebad"}, + {file = "thinc-8.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:53b48a6ae43b0e4054816a378163237b1d2120a49c71994682037437d64b7f84"}, + {file = "thinc-8.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9db67f460dae2e3aada1ff166394ce13c2dabb4db93d6bd79cd256f5beab9599"}, + {file = "thinc-8.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d57bdf43e0acd1406d681bf988179f677cf1b385c86f744bf314d827383ce31"}, + {file = "thinc-8.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78311a593b8bf3f03af52bf71d6b364463c598f3540ea8387c00017d2a0e0a5d"}, + {file = "thinc-8.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9489ae7fec427064a50a0c3e7c661a95251756032e31316add2c8c13f98f93c"}, + {file = "thinc-8.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:d0bf3840d434e3dbdf294643e6d54d2042d0e652abc68dee16673f28269fc456"}, + {file = "thinc-8.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bb7c64d0cb8066c47af9441cd611e89a0e2b28b85f2fffbdec791724c81e1915"}, + {file = "thinc-8.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c11ab3236e56311568f1e84099bfbeea3a4ee2434758a32982b224ddf8bad9c5"}, + {file = "thinc-8.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0a7f29ad534b6e761ee24d0c9e7402447e8ed4e772922795f77c98d88d7f99c"}, + {file = "thinc-8.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2817bde75c92f98fee747efdbebca68d16158b808401c5a922ba54a5f2619e9b"}, + {file = "thinc-8.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:a336f8cae7374d1768a52e63a5084a1208e30b8761eede113d2703e43e7839f1"}, + {file = "thinc-8.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:45c1a2880329eae53da1d77a4898b7fd30faad445b28fdf92c5557dbf6492ff0"}, + {file = "thinc-8.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c899b25442ed915bc77fa4cf07e908dea1bccab7c4b8d854cc0b261026d6a06"}, + {file = "thinc-8.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83a6b46d5f0accf0c2b2e5ff05b1bffd4d99721513b6d0374574009b0aab292c"}, + {file = "thinc-8.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:9a29a9ca7a5060c923866f16ba7823a4540cfd708eafa7202ee89ac029e0b78b"}, + {file = "thinc-8.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bd25b781faae71c52ba053157ab1865f4163be1a6485e70a007855a037ba060f"}, + {file = "thinc-8.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f01a7107c36c4fc60b60fdbda30d76a0ac9bc8f4f9c7f6872db62250e2f836a5"}, + {file = "thinc-8.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa65182424efda03be9359c3540928bf2985792f89826a76ee475c7c6b2ec64f"}, + {file = "thinc-8.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4d448c8a870f594125cbfadc91024ce67683eae5698207101d2ea4793ab222a1"}, + {file = "thinc-8.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97605421b898441733fda24c6dda74a85325fbeebc808176857b0a8e6e7a9d47"}, + {file = "thinc-8.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b0309d14bcfdad24b1e8bb87f8b245acfd7eb5305be466c284c788adf026ffa"}, + {file = "thinc-8.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aead20abe233adade3c37daeb9d08e5429dfcada81856b1f2b1b7e4a67a671a0"}, + {file = "thinc-8.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:324e5d2c98f787d82d239cf33cee425e1c11e34a3c96cb3f4e1ee5661abef50c"}, + {file = "thinc-8.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:45e6416e56d5101d0557e31cd06235d80fc89e9ac455ef1b444c440cb3c1ce64"}, + {file = "thinc-8.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5e6ebf63a185d7691b38655a184e30554fbe589805a802d97230eed07af8ea39"}, + {file = "thinc-8.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4d29ee871cfd0d40f4a0436e154640c0965b163b91a088a85bcd5658c1cc3ed4"}, + {file = "thinc-8.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8709d114131680bc7c02b0c97817bd7692eda50beb7849c7908666cf15a6cfd"}, + {file = "thinc-8.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9b81e3c1e89c8ed6dff5a8440f584cda623ec77a3bd8c0ed059936405b8a7ca"}, + {file = "thinc-8.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:1df983af74952d4818703e6bac8af64fad338eaaef8b017fa05d372e3c68e577"}, + {file = "thinc-8.2.3.tar.gz", hash = "sha256:f5afc5222912a80bda8bdcec958362a2ba538d7027dc8db6154845d2859dca76"}, +] + +[package.dependencies] +blis = ">=0.7.8,<0.8.0" +catalogue = ">=2.0.4,<2.1.0" +confection = ">=0.0.1,<1.0.0" +cymem = ">=2.0.2,<2.1.0" +murmurhash = ">=1.0.2,<1.1.0" +numpy = {version = ">=1.19.0", markers = "python_version >= \"3.9\""} +packaging = ">=20.0" +preshed = ">=3.0.2,<3.1.0" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<3.0.0" +setuptools = "*" +srsly = ">=2.4.0,<3.0.0" +wasabi = ">=0.8.1,<1.2.0" + +[package.extras] +cuda = ["cupy (>=5.0.0b4)"] +cuda-autodetect = ["cupy-wheel (>=11.0.0)"] +cuda100 = ["cupy-cuda100 (>=5.0.0b4)"] +cuda101 = ["cupy-cuda101 (>=5.0.0b4)"] +cuda102 = ["cupy-cuda102 (>=5.0.0b4)"] +cuda110 = ["cupy-cuda110 (>=5.0.0b4)"] +cuda111 = ["cupy-cuda111 (>=5.0.0b4)"] +cuda112 = ["cupy-cuda112 (>=5.0.0b4)"] +cuda113 = ["cupy-cuda113 (>=5.0.0b4)"] +cuda114 = ["cupy-cuda114 (>=5.0.0b4)"] +cuda115 = ["cupy-cuda115 (>=5.0.0b4)"] +cuda116 = ["cupy-cuda116 (>=5.0.0b4)"] +cuda117 = ["cupy-cuda117 (>=5.0.0b4)"] +cuda11x = ["cupy-cuda11x (>=11.0.0)"] +cuda12x = ["cupy-cuda12x (>=11.5.0)"] +cuda80 = ["cupy-cuda80 (>=5.0.0b4)"] +cuda90 = ["cupy-cuda90 (>=5.0.0b4)"] +cuda91 = ["cupy-cuda91 (>=5.0.0b4)"] +cuda92 = ["cupy-cuda92 (>=5.0.0b4)"] +datasets = ["ml-datasets (>=0.2.0,<0.3.0)"] +mxnet = ["mxnet (>=1.5.1,<1.6.0)"] +tensorflow = ["tensorflow (>=2.0.0,<2.6.0)"] +torch = ["torch (>=1.6.0)"] + +[[package]] +name = "threadpoolctl" +version = "3.4.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.4.0-py3-none-any.whl", hash = "sha256:8f4c689a65b23e5ed825c8436a92b818aac005e0f3715f6a1664d7c7ee29d262"}, + {file = "threadpoolctl-3.4.0.tar.gz", hash = "sha256:f11b491a03661d6dd7ef692dd422ab34185d982466c49c8f98c8f716b5c93196"}, +] [[package]] name = "tinycss2" @@ -3185,13 +4627,13 @@ testing = ["flaky (>=3.4.0)", "freezegun (>=0.3.11)", "pathlib2 (>=2.3.3)", "psu [[package]] name = "tqdm" -version = "4.66.1" +version = "4.66.2" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, - {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, + {file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9"}, + {file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"}, ] [package.dependencies] @@ -3205,55 +4647,88 @@ telegram = ["requests"] [[package]] name = "traitlets" -version = "5.14.1" +version = "5.14.2" description = "Traitlets Python configuration system" optional = false python-versions = ">=3.8" files = [ - {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"}, - {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"}, + {file = "traitlets-5.14.2-py3-none-any.whl", hash = "sha256:fcdf85684a772ddeba87db2f398ce00b40ff550d1528c03c14dbf6a02003cd80"}, + {file = "traitlets-5.14.2.tar.gz", hash = "sha256:8cdd83c040dab7d1dee822678e5f5d100b514f7b72b01615b26fc5718916fdf9"}, ] [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.1)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "typer" +version = "0.9.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.6" +files = [ + {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, + {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, +] + +[package.dependencies] +click = ">=7.1.1,<9.0.0" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] [[package]] name = "typing-extensions" -version = "4.9.0" +version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, - {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, + {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, + {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, +] + +[[package]] +name = "tzdata" +version = "2024.1" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, + {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, ] [[package]] name = "urllib3" -version = "2.1.0" +version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.25.0" +version = "20.25.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.25.0-py3-none-any.whl", hash = "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3"}, - {file = "virtualenv-20.25.0.tar.gz", hash = "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b"}, + {file = "virtualenv-20.25.1-py3-none-any.whl", hash = "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a"}, + {file = "virtualenv-20.25.1.tar.gz", hash = "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197"}, ] [package.dependencies] @@ -3265,40 +4740,56 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +[[package]] +name = "wasabi" +version = "1.1.2" +description = "A lightweight console printing and formatting toolkit" +optional = false +python-versions = ">=3.6" +files = [ + {file = "wasabi-1.1.2-py3-none-any.whl", hash = "sha256:0a3f933c4bf0ed3f93071132c1b87549733256d6c8de6473c5f7ed2e171b5cf9"}, + {file = "wasabi-1.1.2.tar.gz", hash = "sha256:1aaef3aceaa32edb9c91330d29d3936c0c39fdb965743549c173cb54b16c30b5"}, +] + +[package.dependencies] +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\" and python_version >= \"3.7\""} + [[package]] name = "watchdog" -version = "3.0.0" +version = "4.0.0" description = "Filesystem events monitoring" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, - {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, - {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, - {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, - {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, - {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, - {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, - {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, - {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, ] [package.extras] @@ -3315,6 +4806,28 @@ files = [ {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] +[[package]] +name = "weasel" +version = "0.3.4" +description = "Weasel: A small and easy workflow system" +optional = false +python-versions = ">=3.6" +files = [ + {file = "weasel-0.3.4-py3-none-any.whl", hash = "sha256:ee48a944f051d007201c2ea1661d0c41035028c5d5a8bcb29a0b10f1100206ae"}, + {file = "weasel-0.3.4.tar.gz", hash = "sha256:eb16f92dc9f1a3ffa89c165e3a9acd28018ebb656e0da4da02c0d7d8ae3f6178"}, +] + +[package.dependencies] +cloudpathlib = ">=0.7.0,<0.17.0" +confection = ">=0.0.4,<0.2.0" +packaging = ">=20.0" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<3.0.0" +requests = ">=2.13.0,<3.0.0" +smart-open = ">=5.2.1,<7.0.0" +srsly = ">=2.4.3,<3.0.0" +typer = ">=0.3.0,<0.10.0" +wasabi = ">=0.9.1,<1.2.0" + [[package]] name = "webencodings" version = "0.5.1" @@ -3326,20 +4839,130 @@ files = [ {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, ] +[[package]] +name = "werkzeug" +version = "3.0.1" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, + {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wheel" +version = "0.43.0" +description = "A built-package format for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "wheel-0.43.0-py3-none-any.whl", hash = "sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81"}, + {file = "wheel-0.43.0.tar.gz", hash = "sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85"}, +] + +[package.extras] +test = ["pytest (>=6.0.0)", "setuptools (>=65)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + [[package]] name = "zipp" -version = "3.17.0" +version = "3.18.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, - {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, + {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, + {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] database = ["sqlalchemy"] @@ -3349,5 +4972,5 @@ notebook = ["ploomber-engine"] [metadata] lock-version = "2.0" -python-versions = ">=3.8,<3.13" -content-hash = "d8a8675e69ca12306c86ea22a2f10716c7c70a3db2f9708080f060c1fb73be90" +python-versions = ">=3.9,<3.13" +content-hash = "17d24abebbf5c91bcc5d3befede2ab7acc854718461d58f50cd6a0ad6977d294" diff --git a/pyproject.toml b/pyproject.toml index 3113261c..2ede0348 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,44 +1,54 @@ [tool.poetry] -name = "magnus" +name = "runnable" version = "0" description = "A Compute agnostic pipelining software" authors = ["Vijay Vammi "] license = "Apache-2.0" readme = "README.md" -homepage = "https://github.com/AstraZeneca/magnus-core" -repository = "https://github.com/AstraZeneca/magnus-core" -documentation = "https://astrazeneca.github.io/magnus-core/" +homepage = "https://github.com/vijayvammi/runnable" +repository = "https://github.com/vijayvammi/runnable" +documentation = "https://github.com/vijayvammi/runnable" [tool.poetry.dependencies] -python = ">=3.8,<3.13" +python = ">=3.9,<3.13" "ruamel.yaml" = "*" "ruamel.yaml.clib" = "*" pydantic = "^2.5" stevedore = "^3.5.0" "click" = "*" click-plugins = "^1.1.1" -typing-extensions ={ version= "*", python = "<3.8" } -docker ={ version = "*", optional = true } -sqlalchemy ={ version = "*", optional = true } +typing-extensions = { version = "*", python = "<3.8" } +docker = { version = "*", optional = true } +sqlalchemy = { version = "*", optional = true } rich = "^13.5.2" -mlflow-skinny ={ version = "*", optional = true } +mlflow-skinny = { version = "*", optional = true } ploomber-engine = "^0.0.31" +dill = "^0.3.8" [tool.poetry.group.docs.dependencies] mkdocs = "*" mkdocs-material = "*" mkdocs-section-index = "^0.3.5" -mkdocstrings = {extras = ["python"], version = "^0.24.0"} +mkdocstrings = { extras = ["python"], version = "^0.24.0" } nbconvert = "^7.13.1" mkdocs-click = "^0.8.1" +tensorflow = "^2.16.1" [tool.poetry.group.binary.dependencies] pyinstaller = "^5.13.2" [tool.poetry.group.perf.dependencies] -# Run the performace tests poetry run python -m pyflame -p ./flamegraph.pl magnus/entrypoints.py +# Run the performace tests poetry run python -m pyflame -p ./flamegraph.pl runnable/entrypoints.py pyflame = "^0.3.1" + +[tool.poetry.group.tutorial.dependencies] +pandas = "^2.2.1" +numpy = "^1.26.4" +scikit-learn = "^1.4.1.post1" +en-core-web-sm = { url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1.tar.gz" } +matplotlib = "^3.8.3" + [tool.poetry.extras] docker = ['docker'] notebook = ['ploomber-engine'] @@ -59,41 +69,38 @@ gitlint = "^0.19.1" [tool.poetry.scripts] -magnus= 'magnus.cli:cli' +runnable = 'runnable.cli:cli' # Plugins for Executors [tool.poetry.plugins."executor"] -"local" = "magnus.extensions.executor.local.implementation:LocalExecutor" -"local-container" = "magnus.extensions.executor.local_container.implementation:LocalContainerExecutor" -"argo" = "magnus.extensions.executor.argo.implementation:ArgoExecutor" -"mocked" = "magnus.extensions.executor.mocked.implementation:MockedExecutor" +"local" = "runnable.extensions.executor.local.implementation:LocalExecutor" +"local-container" = "runnable.extensions.executor.local_container.implementation:LocalContainerExecutor" +"argo" = "runnable.extensions.executor.argo.implementation:ArgoExecutor" +"mocked" = "runnable.extensions.executor.mocked.implementation:MockedExecutor" +"retry" = "runnable.extensions.executor.retry.implementation:RetryExecutor" # Plugins for Catalog [tool.poetry.plugins."catalog"] -"do-nothing" = "magnus.catalog:DoNothingCatalog" -"file-system" = "magnus.extensions.catalog.file_system.implementation:FileSystemCatalog" +"do-nothing" = "runnable.catalog:DoNothingCatalog" +"file-system" = "runnable.extensions.catalog.file_system.implementation:FileSystemCatalog" # Plugins for Secrets [tool.poetry.plugins."secrets"] -"do-nothing" = "magnus.secrets:DoNothingSecretManager" -"dotenv" = "magnus.extensions.secrets.dotenv.implementation:DotEnvSecrets" -"env-secrets-manager" = "magnus.extensions.secrets.env_secrets.implementation:EnvSecretsManager" +"do-nothing" = "runnable.secrets:DoNothingSecretManager" +"dotenv" = "runnable.extensions.secrets.dotenv.implementation:DotEnvSecrets" +"env-secrets-manager" = "runnable.extensions.secrets.env_secrets.implementation:EnvSecretsManager" # Plugins for Run Log store [tool.poetry.plugins."run_log_store"] -"buffered" = "magnus.datastore:BufferRunLogstore" -"file-system" = "magnus.extensions.run_log_store.file_system.implementation:FileSystemRunLogstore" -"chunked-fs" = "magnus.extensions.run_log_store.chunked_file_system.implementation:ChunkedFileSystemRunLogStore" +"buffered" = "runnable.datastore:BufferRunLogstore" +"file-system" = "runnable.extensions.run_log_store.file_system.implementation:FileSystemRunLogstore" +"chunked-fs" = "runnable.extensions.run_log_store.chunked_file_system.implementation:ChunkedFileSystemRunLogStore" -# Plugins for Experiment tracker -[tool.poetry.plugins."experiment_tracker"] -"do-nothing" = "magnus.experiment_tracker:DoNothingTracker" -"mlflow" = "magnus.extensions.experiment_tracker.mlflow.implementation:MLFlowExperimentTracker" # Plugins for Pickler [tool.poetry.plugins."pickler"] -"pickle" = "magnus.pickler:NativePickler" +"pickle" = "runnable.pickler:NativePickler" # Plugins for Integration @@ -102,19 +109,19 @@ magnus= 'magnus.cli:cli' # Plugins for Tasks [tool.poetry.plugins."tasks"] -"python" = "magnus.tasks:PythonTaskType" -"shell" = "magnus.tasks:ShellTaskType" -"notebook" = "magnus.tasks:NotebookTaskType" +"python" = "runnable.tasks:PythonTaskType" +"shell" = "runnable.tasks:ShellTaskType" +"notebook" = "runnable.tasks:NotebookTaskType" # Plugins for Nodes [tool.poetry.plugins."nodes"] -"task" = "magnus.extensions.nodes:TaskNode" -"fail" = "magnus.extensions.nodes:FailNode" -"success" = "magnus.extensions.nodes:SuccessNode" -"parallel" = "magnus.extensions.nodes:ParallelNode" -"map" = "magnus.extensions.nodes:MapNode" -"dag" = "magnus.extensions.nodes:DagNode" -"stub" = "magnus.extensions.nodes:StubNode" +"task" = "runnable.extensions.nodes:TaskNode" +"fail" = "runnable.extensions.nodes:FailNode" +"success" = "runnable.extensions.nodes:SuccessNode" +"parallel" = "runnable.extensions.nodes:ParallelNode" +"map" = "runnable.extensions.nodes:MapNode" +"dag" = "runnable.extensions.nodes:DagNode" +"stub" = "runnable.extensions.nodes:StubNode" [tool.black] @@ -122,7 +129,9 @@ line-length = 120 [tool.ruff] # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default. -select = ["E", "F","W","I001"] +select = ["E", "F", "W"] + +# extend-select = ["I"] ignore = [] @@ -152,12 +161,14 @@ exclude = [ "dist", "node_modules", "venv", + "examples/kubeflow", + "examples/tutorials/", ] per-file-ignores = {} # Builtins -builtins=["__version__"] +builtins = ["__version__"] # Same as Black. line-length = 120 @@ -204,9 +215,7 @@ branch = true [tool.coverage.report] # Regexes for lines to exclude from consideration -exclude_lines = [ - "pragma: no cover" -] +exclude_lines = ["pragma: no cover"] include_namespace_packages = true show_missing = true @@ -218,10 +227,10 @@ exclude_also = [ # Don't complain about abstract methods, they aren't run: "@(abc\\.)?abstractmethod", - ] +] -omit =[ - "magnus/cli.py", - "magnus/extensions/executor/demo_renderer/*", - "*FF.py" - ] +omit = [ + "runnable/cli.py", + "runnable/extensions/executor/demo_renderer/*", + "*FF.py", +] diff --git a/runnable/__init__.py b/runnable/__init__.py new file mode 100644 index 00000000..247a6311 --- /dev/null +++ b/runnable/__init__.py @@ -0,0 +1,36 @@ +# ruff: noqa + +# TODO: Might need to add Rich to pyinstaller part +import logging +from logging.config import dictConfig + +from rich.console import Console + +from runnable import defaults + +dictConfig(defaults.LOGGING_CONFIG) +logger = logging.getLogger(defaults.LOGGER_NAME) + +console = Console() +console.print(":runner: Lets go!!") + +from runnable.sdk import ( # noqa + Catalog, + Fail, + Map, + NotebookTask, + Parallel, + Pipeline, + PythonTask, + ShellTask, + Stub, + Success, + metric, + pickled, +) + +# TODO: Think of model registry as a central place to store models. +# TODO: Implement Sagemaker pipelines as a executor. + + +# TODO: Think of way of generating dag hash without executor configuration diff --git a/magnus/catalog.py b/runnable/catalog.py similarity index 90% rename from magnus/catalog.py rename to runnable/catalog.py index 3daf3e9a..f8339289 100644 --- a/magnus/catalog.py +++ b/runnable/catalog.py @@ -1,12 +1,12 @@ import logging from abc import ABC, abstractmethod -from typing import List, Optional +from typing import Any, Dict, List, Optional from pydantic import BaseModel, ConfigDict -import magnus.context as context -from magnus import defaults -from magnus.datastore import DataCatalog +import runnable.context as context +from runnable import defaults +from runnable.datastore import DataCatalog logger = logging.getLogger(defaults.LOGGER_NAME) @@ -25,6 +25,10 @@ class BaseCatalog(ABC, BaseModel): service_type: str = "catalog" model_config = ConfigDict(extra="forbid") + @abstractmethod + def get_summary(self) -> Dict[str, Any]: + ... + @property def _context(self): return context.run_context @@ -43,7 +47,7 @@ def get(self, name: str, run_id: str, compute_data_folder: str = "", **kwargs) - Args: name (str): The name of the catalog item run_id (str): The run_id of the run. - compute_data_folder (str, optional): The compute data folder. Defaults to magnus default (data/) + compute_data_folder (str, optional): The compute data folder. Defaults to runnable default (data/) Raises: NotImplementedError: Base class, hence not implemented @@ -70,7 +74,7 @@ def put( Args: name (str): The name of the catalog item. run_id (str): The run_id of the run. - compute_data_folder (str, optional): The compute data folder. Defaults to magnus default (data/) + compute_data_folder (str, optional): The compute data folder. Defaults to runnable default (data/) synced_catalogs (dict, optional): Any previously synced catalogs. Defaults to None. Raises: @@ -112,6 +116,9 @@ class DoNothingCatalog(BaseCatalog): service_name: str = "do-nothing" + def get_summary(self) -> Dict[str, Any]: + return {} + def get(self, name: str, run_id: str, compute_data_folder: str = "", **kwargs) -> List[DataCatalog]: """ Does nothing diff --git a/magnus/cli.py b/runnable/cli.py similarity index 92% rename from magnus/cli.py rename to runnable/cli.py index be04f932..aca00e19 100644 --- a/magnus/cli.py +++ b/runnable/cli.py @@ -4,18 +4,18 @@ from click_plugins import with_plugins from pkg_resources import iter_entry_points -from magnus import defaults, entrypoints +from runnable import defaults, entrypoints logger = logging.getLogger(defaults.LOGGER_NAME) -@with_plugins(iter_entry_points("magnus.cli_plugins")) +@with_plugins(iter_entry_points("runnable.cli_plugins")) @click.group() @click.version_option() def cli(): """ - Welcome to magnus. Please provide the command that you want to use. - All commands have options that you can see by magnus --help + Welcome to runnable. Please provide the command that you want to use. + All commands have options that you can see by runnable --help """ pass @@ -41,12 +41,11 @@ def cli(): ) @click.option("--tag", default="", help="A tag attached to the run") @click.option("--run-id", help="An optional run_id, one would be generated if not provided") -@click.option("--use-cached", help="Provide the previous run_id to re-run.", show_default=True) -def execute(file, config_file, parameters_file, log_level, tag, run_id, use_cached): # pragma: no cover +def execute(file, config_file, parameters_file, log_level, tag, run_id): # pragma: no cover """ Execute a pipeline - Usage: magnus execute [OPTIONS] + Usage: runnable execute [OPTIONS] Options: -f, --file TEXT The pipeline definition file [default: pipeline.yaml] @@ -59,15 +58,14 @@ def execute(file, config_file, parameters_file, log_level, tag, run_id, use_cach [default: ] --run-id TEXT An optional run_id, one would be generated if not provided - --use-cached TEXT Provide the previous run_id to re-run. """ logger.setLevel(log_level) + entrypoints.execute( configuration_file=config_file, pipeline_file=file, tag=tag, run_id=run_id, - use_cached=use_cached, parameters_file=parameters_file, ) @@ -97,9 +95,9 @@ def execute(file, config_file, parameters_file, log_level, tag, run_id, use_cach @click.option("--tag", default="", help="A tag attached to the run") def execute_single_node(run_id, step_name, map_variable, file, config_file, parameters_file, log_level, tag): """ - Internal entrypoint for magnus to execute a single node. + Internal entrypoint for runnable to execute a single node. - Other than local executor, every other executor uses this entry point to execute a step in the context of magnus. + Other than local executor, every other executor uses this entry point to execute a step in the context of runnable. Only chained executions should use this method. Unchained executions should use execute_ """ logger.setLevel(log_level) @@ -248,7 +246,7 @@ def execute_function( @click.option("--tag", default="", help="A tag attached to the run") def fan(run_id, step_name, mode, map_variable, file, config_file, parameters_file, log_level, tag): """ - Internal entrypoint for magnus to fan in or out a composite node. + Internal entrypoint for runnable to fan in or out a composite node. Only 3rd party orchestrators should use this entry point. """ diff --git a/magnus/context.py b/runnable/context.py similarity index 52% rename from magnus/context.py rename to runnable/context.py index ab17bd54..f98872e6 100644 --- a/magnus/context.py +++ b/runnable/context.py @@ -1,13 +1,14 @@ from typing import Dict, Optional -from pydantic import BaseModel, SerializeAsAny +from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny +from rich.progress import Progress -from magnus.catalog import BaseCatalog -from magnus.datastore import BaseRunLogStore -from magnus.executor import BaseExecutor -from magnus.experiment_tracker import BaseExperimentTracker -from magnus.graph import Graph -from magnus.secrets import BaseSecrets +from runnable.catalog import BaseCatalog +from runnable.datastore import BaseRunLogStore +from runnable.executor import BaseExecutor +from runnable.graph import Graph +from runnable.pickler import BasePickler +from runnable.secrets import BaseSecrets class Context(BaseModel): @@ -15,7 +16,10 @@ class Context(BaseModel): run_log_store: SerializeAsAny[BaseRunLogStore] secrets_handler: SerializeAsAny[BaseSecrets] catalog_handler: SerializeAsAny[BaseCatalog] - experiment_tracker: SerializeAsAny[BaseExperimentTracker] + pickler: SerializeAsAny[BasePickler] + progress: SerializeAsAny[Optional[Progress]] = Field(default=None, exclude=True) + + model_config = ConfigDict(arbitrary_types_allowed=True) pipeline_file: Optional[str] = "" parameters_file: Optional[str] = "" @@ -24,8 +28,6 @@ class Context(BaseModel): tag: str = "" run_id: str = "" variables: Dict[str, str] = {} - use_cached: bool = False - original_run_id: str = "" dag: Optional[Graph] = None dag_hash: str = "" execution_plan: str = "" diff --git a/magnus/datastore.py b/runnable/datastore.py similarity index 80% rename from magnus/datastore.py rename to runnable/datastore.py index c2ae4252..732599d0 100644 --- a/magnus/datastore.py +++ b/runnable/datastore.py @@ -1,18 +1,32 @@ from __future__ import annotations import logging +import os from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, OrderedDict, Tuple, Union - -from pydantic import BaseModel, Field - -import magnus.context as context -from magnus import defaults, exceptions +from datetime import datetime +from typing import ( + Annotated, + Any, + Dict, + List, + Literal, + Optional, + OrderedDict, + Tuple, + Union, +) + +from pydantic import BaseModel, Field, computed_field + +import runnable.context as context +from runnable import defaults, exceptions logger = logging.getLogger(defaults.LOGGER_NAME) -# Once defined these classes are sealed to any additions unless a default is provided -# Breaking this rule might make magnus backwardly incompatible + +JSONType = Union[ + str, int, float, bool, List[Any], Dict[str, Any] +] # This is actually JSONType, but pydantic doesn't support TypeAlias yet class DataCatalog(BaseModel, extra="allow"): @@ -42,18 +56,98 @@ def __eq__(self, other): return other.name == self.name +""" +The theory behind reduced: + parameters returned by steps in map node are only reduced by the end of the map step, fan-in. + If they are accessed within the map step, the value should be the value returned by the step in the map step. + + Once the map state is complete, we can set the reduce to true and have the value as + the reduced value. Its either a list or a custom function return. +""" + + +class JsonParameter(BaseModel): + kind: Literal["json"] + value: JSONType + reduced: bool = True + + @computed_field # type: ignore + @property + def description(self) -> JSONType: + return self.value + + def get_value(self) -> JSONType: + return self.value + + +class MetricParameter(BaseModel): + kind: Literal["metric"] + value: JSONType + reduced: bool = True + + @computed_field # type: ignore + @property + def description(self) -> JSONType: + return self.value + + def get_value(self) -> JSONType: + return self.value + + +class ObjectParameter(BaseModel): + kind: Literal["object"] + value: str # The name of the pickled object + reduced: bool = True + + @computed_field # type: ignore + @property + def description(self) -> str: + return f"Pickled object stored in catalog as: {self.value}" + + @property + def file_name(self) -> str: + return f"{self.value}{context.run_context.pickler.extension}" + + def get_value(self) -> Any: + # Get the pickled object + catalog_handler = context.run_context.catalog_handler + + catalog_handler.get(name=self.file_name, run_id=context.run_context.run_id) + obj = context.run_context.pickler.load(path=self.file_name) + os.remove(self.file_name) # Remove after loading + return obj + + def put_object(self, data: Any) -> None: + context.run_context.pickler.dump(data=data, path=self.file_name) + + catalog_handler = context.run_context.catalog_handler + catalog_handler.put(name=self.file_name, run_id=context.run_context.run_id) + os.remove(self.file_name) # Remove after loading + + +Parameter = Annotated[Union[JsonParameter, ObjectParameter, MetricParameter], Field(discriminator="kind")] + + class StepAttempt(BaseModel): """ The captured attributes of an Attempt of a step. """ - attempt_number: int = 0 + attempt_number: int = 1 start_time: str = "" end_time: str = "" - duration: str = "" #  end_time - start_time status: str = "FAIL" message: str = "" - parameters: Dict[str, Any] = Field(default_factory=dict) + input_parameters: Dict[str, Parameter] = Field(default_factory=dict) + output_parameters: Dict[str, Parameter] = Field(default_factory=dict) + user_defined_metrics: Dict[str, Parameter] = Field(default_factory=dict) + + @property + def duration(self): + start = datetime.fromisoformat(self.start_time) + end = datetime.fromisoformat(self.end_time) + + return str(end - start) class CodeIdentity(BaseModel, extra="allow"): @@ -81,10 +175,43 @@ class StepLog(BaseModel): mock: bool = False code_identities: List[CodeIdentity] = Field(default_factory=list) attempts: List[StepAttempt] = Field(default_factory=list) - user_defined_metrics: Dict[str, Any] = Field(default_factory=dict) branches: Dict[str, BranchLog] = Field(default_factory=dict) data_catalog: List[DataCatalog] = Field(default_factory=list) + def get_summary(self) -> Dict[str, Any]: + """ + Summarize the step log to log + """ + summary: Dict[str, Any] = {} + + summary["Name"] = self.internal_name + summary["Input catalog content"] = [dc.name for dc in self.data_catalog if dc.stage == "get"] + summary["Available parameters"] = [ + (p, v.description) for attempt in self.attempts for p, v in attempt.input_parameters.items() + ] + + summary["Output catalog content"] = [dc.name for dc in self.data_catalog if dc.stage == "put"] + summary["Output parameters"] = [ + (p, v.description) for attempt in self.attempts for p, v in attempt.output_parameters.items() + ] + + summary["Metrics"] = [ + (p, v.description) for attempt in self.attempts for p, v in attempt.user_defined_metrics.items() + ] + + cis = [] + for ci in self.code_identities: + message = f"{ci.code_identifier_type}:{ci.code_identifier}" + if not ci.code_identifier_dependable: + message += " but is not dependable" + cis.append(message) + + summary["Code identities"] = cis + + summary["status"] = self.status + + return summary + def get_data_catalogs_by_stage(self, stage="put") -> List[DataCatalog]: """ Given a stage, return the data catalogs according to the stage @@ -168,14 +295,28 @@ class RunLog(BaseModel): run_id: str dag_hash: Optional[str] = None - use_cached: bool = False tag: Optional[str] = "" - original_run_id: Optional[str] = "" status: str = defaults.FAIL steps: OrderedDict[str, StepLog] = Field(default_factory=OrderedDict) - parameters: Dict[str, Any] = Field(default_factory=dict) + parameters: Dict[str, Parameter] = Field(default_factory=dict) run_config: Dict[str, Any] = Field(default_factory=dict) + def get_summary(self) -> Dict[str, Any]: + summary: Dict[str, Any] = {} + + _context = context.run_context + + summary["Unique execution id"] = self.run_id + summary["status"] = self.status + + summary["Catalog Location"] = _context.catalog_handler.get_summary() + summary["Full Run log present at: "] = _context.run_log_store.get_summary() + + summary["Final Parameters"] = {p: v.description for p, v in self.parameters.items()} + summary["Collected metrics"] = {p: v.description for p, v in self.parameters.items() if v.kind == "metric"} + + return summary + def get_data_catalogs_by_stage(self, stage: str = "put") -> List[DataCatalog]: """ Return all the cataloged data by the stage at which they were cataloged. @@ -294,6 +435,10 @@ class BaseRunLogStore(ABC, BaseModel): service_name: str = "" service_type: str = "run_log_store" + @abstractmethod + def get_summary(self) -> Dict[str, Any]: + ... + @property def _context(self): return context.run_context @@ -373,7 +518,7 @@ def update_run_log_status(self, run_id: str, status: str): run_log.status = status self.put_run_log(run_log) - def get_parameters(self, run_id: str, **kwargs) -> dict: + def get_parameters(self, run_id: str, **kwargs) -> Dict[str, Parameter]: """ Get the parameters from the Run log defined by the run_id @@ -392,7 +537,7 @@ def get_parameters(self, run_id: str, **kwargs) -> dict: run_log = self.get_run_log_by_id(run_id=run_id) return run_log.parameters - def set_parameters(self, run_id: str, parameters: dict, **kwargs): + def set_parameters(self, run_id: str, parameters: Dict[str, Parameter], **kwargs): """ Update the parameters of the Run log with the new parameters @@ -579,16 +724,7 @@ def add_branch_log(self, branch_log: Union[BranchLog, RunLog], run_id: str, **kw step.branches[internal_branch_name] = branch_log # type: ignore self.put_run_log(run_log) - def create_attempt_log(self, **kwargs) -> StepAttempt: - """ - Returns an uncommitted step attempt log. - - Returns: - StepAttempt: An uncommitted step attempt log - """ - logger.info(f"{self.service_name} Creating an attempt log") - return StepAttempt() - + # def create_code_identity(self, **kwargs) -> CodeIdentity: """ Creates an uncommitted Code identity class @@ -636,6 +772,11 @@ class BufferRunLogstore(BaseRunLogStore): service_name: str = "buffered" run_log: Optional[RunLog] = Field(default=None, exclude=True) # For a buffered Run Log, this is the database + def get_summary(self) -> Dict[str, Any]: + summary = {"Type": self.service_name, "Location": "Not persisted"} + + return summary + def create_run_log( self, run_id: str, @@ -658,9 +799,7 @@ def create_run_log( self.run_log = RunLog( run_id=run_id, dag_hash=dag_hash, - use_cached=use_cached, tag=tag, - original_run_id=original_run_id, status=status, ) return self.run_log diff --git a/magnus/defaults.py b/runnable/defaults.py similarity index 72% rename from magnus/defaults.py rename to runnable/defaults.py index b0982ed6..e3efabcf 100644 --- a/magnus/defaults.py +++ b/runnable/defaults.py @@ -1,19 +1,12 @@ -# mypy: ignore-errors -# The above should be done until https://github.com/python/mypy/issues/8823 from enum import Enum +from typing import TypedDict # type: ignore[unused-ignore] from typing import Any, Dict, Mapping, Optional, Union +from rich.style import Style from typing_extensions import TypeAlias -# TODO: This is not the correct way to do this. -try: # pragma: no cover - from typing import TypedDict # type: ignore[unused-ignore] -except ImportError: # pragma: no cover - from typing_extensions import TypedDict # type: ignore[unused-ignore] - - -NAME = "magnus" -LOGGER_NAME = "magnus" +NAME = "runnable" +LOGGER_NAME = "runnable" # CLI settings LOG_LEVEL = "WARNING" @@ -21,7 +14,7 @@ class EXECUTION_PLAN(Enum): """ - The possible execution plans for a magnus job. + The possible execution plans for a runnable job. """ CHAINED = "chained" #  121 relationship between run log and the dag. @@ -35,29 +28,33 @@ class ServiceConfig(TypedDict): config: Mapping[str, Any] -class MagnusConfig(TypedDict, total=False): +class RunnableConfig(TypedDict, total=False): run_log_store: Optional[ServiceConfig] secrets: Optional[ServiceConfig] catalog: Optional[ServiceConfig] executor: Optional[ServiceConfig] experiment_tracker: Optional[ServiceConfig] + pickler: Optional[ServiceConfig] TypeMapVariable: TypeAlias = Optional[Dict[str, Union[str, int, float]]] # Config file environment variable -MAGNUS_CONFIG_FILE = "MAGNUS_CONFIG_FILE" -MAGNUS_RUN_TAG = "MAGNUS_RUN_TAG" +RUNNABLE_CONFIG_FILE = "RUNNABLE_CONFIG_FILE" +RUNNABLE_RUN_TAG = "RUNNABLE_RUN_TAG" # Interaction settings -TRACK_PREFIX = "MAGNUS_TRACK_" +TRACK_PREFIX = "RUNNABLE_TRACK_" STEP_INDICATOR = "_STEP_" -PARAMETER_PREFIX = "MAGNUS_PRM_" -MAP_VARIABLE = "MAGNUS_MAP_VARIABLE" -VARIABLE_PREFIX = "MAGNUS_VAR_" -ENV_RUN_ID = "MAGNUS_RUN_ID" -ATTEMPT_NUMBER = "MAGNUS_STEP_ATTEMPT" +PARAMETER_PREFIX = "RUNNABLE_PRM_" +MAP_VARIABLE = "RUNNABLE_MAP_VARIABLE" +VARIABLE_PREFIX = "RUNNABLE_VAR_" +ENV_RUN_ID = "RUNNABLE_RUN_ID" +ATTEMPT_NUMBER = "RUNNABLE_STEP_ATTEMPT" + +## Generated pipeline file +GENERATED_PIPELINE_FILE = "generated_pipeline.yaml" # STATUS progression # For Branch, CREATED -> PROCESSING -> SUCCESS OR FAIL @@ -72,16 +69,17 @@ class MagnusConfig(TypedDict, total=False): COMMAND_TYPE = "python" NODE_SPEC_FILE = "node_spec.yaml" COMMAND_FRIENDLY_CHARACTER = "%" -DEFAULT_CONTAINER_CONTEXT_PATH = "/opt/magnus/" +DEFAULT_CONTAINER_CONTEXT_PATH = "/opt/runnable/" DEFAULT_CONTAINER_DATA_PATH = "data/" DEFAULT_CONTAINER_OUTPUT_PARAMETERS = "parameters.json" # Default services DEFAULT_EXECUTOR = ServiceConfig(type="local", config={}) -DEFAULT_RUN_LOG_STORE = ServiceConfig(type="buffered", config={}) +DEFAULT_RUN_LOG_STORE = ServiceConfig(type="file-system", config={}) DEFAULT_CATALOG = ServiceConfig(type="file-system", config={}) DEFAULT_SECRETS = ServiceConfig(type="do-nothing", config={}) DEFAULT_EXPERIMENT_TRACKER = ServiceConfig(type="do-nothing", config={}) +DEFAULT_PICKLER = ServiceConfig(type="pickle", config={}) # Map state MAP_PLACEHOLDER = "map_variable_placeholder" @@ -94,7 +92,7 @@ class MagnusConfig(TypedDict, total=False): MAX_TIME = 86400 # 1 day in seconds # User extensions -USER_CONFIG_FILE = "magnus-config.yaml" +USER_CONFIG_FILE = "runnable-config.yaml" # Executor settings ENABLE_PARALLEL = False @@ -155,7 +153,7 @@ class ENTRYPOINT(Enum): "disable_existing_loggers": True, "formatters": { "standard": {"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"}, - "magnus_formatter": {"format": "%(message)s", "datefmt": "[%X]"}, + "runnable_formatter": {"format": "%(message)s", "datefmt": "[%X]"}, }, "handlers": { "default": { @@ -163,8 +161,8 @@ class ENTRYPOINT(Enum): "class": "logging.StreamHandler", "stream": "ext://sys.stdout", # Default is stderr }, - "magnus_handler": { - "formatter": "magnus_formatter", + "runnable_handler": { + "formatter": "runnable_formatter", "class": "rich.logging.RichHandler", "rich_tracebacks": True, }, @@ -174,6 +172,13 @@ class ENTRYPOINT(Enum): "handlers": ["default"], "propagate": True, }, # Root logger - LOGGER_NAME: {"handlers": ["magnus_handler"], "propagate": False}, + LOGGER_NAME: {"handlers": ["runnable_handler"], "propagate": False}, }, } + + +# styles +error_style = Style(color="red", bold=True) +warning_style = Style(color="yellow", bold=True) +success_style = Style(color="green", bold=True) +info_style = Style(color="blue", bold=True) diff --git a/magnus/entrypoints.py b/runnable/entrypoints.py similarity index 68% rename from magnus/entrypoints.py rename to runnable/entrypoints.py index 89791f26..2aa49c7c 100644 --- a/magnus/entrypoints.py +++ b/runnable/entrypoints.py @@ -1,19 +1,23 @@ +import importlib import json import logging +import os +import sys from typing import Optional, cast -from rich import print +from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn +from rich.table import Column -import magnus.context as context -from magnus import defaults, graph, utils -from magnus.defaults import MagnusConfig, ServiceConfig +import runnable.context as context +from runnable import console, defaults, graph, utils +from runnable.defaults import RunnableConfig, ServiceConfig logger = logging.getLogger(defaults.LOGGER_NAME) -def get_default_configs() -> MagnusConfig: +def get_default_configs() -> RunnableConfig: """ - User can provide extensions as part of their code base, magnus-config.yaml provides the place to put them. + User can provide extensions as part of their code base, runnable-config.yaml provides the place to put them. """ user_configs = {} if utils.does_file_exist(defaults.USER_CONFIG_FILE): @@ -34,7 +38,6 @@ def prepare_configurations( configuration_file: str = "", pipeline_file: str = "", tag: str = "", - use_cached: str = "", parameters_file: str = "", force_local_executor: bool = False, ) -> context.Context: @@ -48,12 +51,11 @@ def prepare_configurations( pipeline_file (str): The config/dag file run_id (str): The run id of the run. tag (str): If a tag is provided at the run time - use_cached (str): Provide the run_id of the older run Returns: executor.BaseExecutor : A prepared executor as per the dag/config """ - magnus_defaults = get_default_configs() + runnable_defaults = get_default_configs() variables = utils.gather_variables() @@ -61,33 +63,31 @@ def prepare_configurations( if configuration_file: templated_configuration = utils.load_yaml(configuration_file) or {} - configuration: MagnusConfig = cast(MagnusConfig, templated_configuration) + configuration: RunnableConfig = cast(RunnableConfig, templated_configuration) + + logger.info(f"Resolved configurations: {configuration}") # Run log settings, configuration over-rides everything run_log_config: Optional[ServiceConfig] = configuration.get("run_log_store", None) if not run_log_config: - run_log_config = cast(ServiceConfig, magnus_defaults.get("run_log_store", defaults.DEFAULT_RUN_LOG_STORE)) + run_log_config = cast(ServiceConfig, runnable_defaults.get("run_log_store", defaults.DEFAULT_RUN_LOG_STORE)) run_log_store = utils.get_provider_by_name_and_type("run_log_store", run_log_config) # Catalog handler settings, configuration over-rides everything catalog_config: Optional[ServiceConfig] = configuration.get("catalog", None) if not catalog_config: - catalog_config = cast(ServiceConfig, magnus_defaults.get("catalog", defaults.DEFAULT_CATALOG)) + catalog_config = cast(ServiceConfig, runnable_defaults.get("catalog", defaults.DEFAULT_CATALOG)) catalog_handler = utils.get_provider_by_name_and_type("catalog", catalog_config) # Secret handler settings, configuration over-rides everything secrets_config: Optional[ServiceConfig] = configuration.get("secrets", None) if not secrets_config: - secrets_config = cast(ServiceConfig, magnus_defaults.get("secrets", defaults.DEFAULT_SECRETS)) + secrets_config = cast(ServiceConfig, runnable_defaults.get("secrets", defaults.DEFAULT_SECRETS)) secrets_handler = utils.get_provider_by_name_and_type("secrets", secrets_config) - # experiment tracker settings, configuration over-rides everything - tracker_config: Optional[ServiceConfig] = configuration.get("experiment_tracker", None) - if not tracker_config: - tracker_config = cast( - ServiceConfig, magnus_defaults.get("experiment_tracker", defaults.DEFAULT_EXPERIMENT_TRACKER) - ) - tracker_handler = utils.get_provider_by_name_and_type("experiment_tracker", tracker_config) + # pickler + pickler_config = cast(ServiceConfig, runnable_defaults.get("pickler", defaults.DEFAULT_PICKLER)) + pickler_handler = utils.get_provider_by_name_and_type("pickler", pickler_config) # executor configurations, configuration over rides everything executor_config: Optional[ServiceConfig] = configuration.get("executor", None) @@ -95,7 +95,7 @@ def prepare_configurations( executor_config = ServiceConfig(type="local", config={}) if not executor_config: - executor_config = cast(ServiceConfig, magnus_defaults.get("executor", defaults.DEFAULT_EXECUTOR)) + executor_config = cast(ServiceConfig, runnable_defaults.get("executor", defaults.DEFAULT_EXECUTOR)) configured_executor = utils.get_provider_by_name_and_type("executor", executor_config) # Construct the context @@ -104,7 +104,7 @@ def prepare_configurations( run_log_store=run_log_store, catalog_handler=catalog_handler, secrets_handler=secrets_handler, - experiment_tracker=tracker_handler, + pickler=pickler_handler, variables=variables, tag=tag, run_id=run_id, @@ -113,25 +113,30 @@ def prepare_configurations( ) if pipeline_file: - # There are use cases where we are only preparing the executor - pipeline_config = utils.load_yaml(pipeline_file) + if pipeline_file.endswith(".py"): + # converting a pipeline defined in python to a dag in yaml + module_file = pipeline_file.strip(".py") + module, func = utils.get_module_and_attr_names(module_file) + sys.path.insert(0, os.getcwd()) # Need to add the current directory to path + imported_module = importlib.import_module(module) + + os.environ["RUNNABLE_PY_TO_YAML"] = "true" + dag = getattr(imported_module, func)().return_dag() + + else: + pipeline_config = utils.load_yaml(pipeline_file) - logger.info("The input pipeline:") - logger.info(json.dumps(pipeline_config, indent=4)) + logger.info("The input pipeline:") + logger.info(json.dumps(pipeline_config, indent=4)) - # Create the graph - dag_config = pipeline_config["dag"] - dag_hash = utils.get_dag_hash(dag_config) - dag = graph.create_graph(dag_config) + dag_config = pipeline_config["dag"] + + dag_hash = utils.get_dag_hash(dag_config) + dag = graph.create_graph(dag_config) + run_context.dag_hash = dag_hash run_context.pipeline_file = pipeline_file run_context.dag = dag - run_context.dag_hash = dag_hash - - run_context.use_cached = False - if use_cached: - run_context.use_cached = True - run_context.original_run_id = use_cached context.run_context = run_context @@ -143,22 +148,19 @@ def execute( pipeline_file: str, tag: str = "", run_id: str = "", - use_cached: str = "", parameters_file: str = "", ): # pylint: disable=R0914,R0913 """ - The entry point to magnus execution. This method would prepare the configurations and delegates traversal to the + The entry point to runnable execution. This method would prepare the configurations and delegates traversal to the executor Args: pipeline_file (str): The config/dag file run_id (str): The run id of the run. tag (str): If a tag is provided at the run time - use_cached (str): The previous run_id to use. parameters_file (str): The parameters being sent in to the application """ - # Re run settings run_id = utils.generate_run_id(run_id=run_id) run_context = prepare_configurations( @@ -166,23 +168,42 @@ def execute( pipeline_file=pipeline_file, run_id=run_id, tag=tag, - use_cached=use_cached, parameters_file=parameters_file, ) - print("Working with context:") - print(run_context) + console.print("Working with context:") + console.print(run_context) executor = run_context.executor run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) + utils.set_runnable_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) # Prepare for graph execution executor.prepare_for_graph_execution() - logger.info("Executing the graph") - executor.execute_graph(dag=run_context.dag) # type: ignore + logger.info(f"Executing the graph: {run_context.dag}") + with Progress( + TextColumn("[progress.description]{task.description}", table_column=Column(ratio=2)), + BarColumn(table_column=Column(ratio=1), style="dark_orange"), + TimeElapsedColumn(table_column=Column(ratio=1)), + console=console, + expand=True, + ) as progress: + pipeline_execution_task = progress.add_task("[dark_orange] Starting execution .. ", total=1) + try: + run_context.progress = progress + executor.execute_graph(dag=run_context.dag) # type: ignore + + run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False) + + if run_log.status == defaults.SUCCESS: + progress.update(pipeline_execution_task, description="[green] Success", completed=True) + else: + progress.update(pipeline_execution_task, description="[red] Failed", completed=True) + except Exception as e: # noqa: E722 + console.print(e, style=defaults.error_style) + progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True) executor.send_return_code() @@ -197,7 +218,7 @@ def execute_single_node( parameters_file: str = "", ): """ - The entry point into executing a single node of magnus. Orchestration modes should extensively use this + The entry point into executing a single node of runnable. Orchestration modes should extensively use this entry point. It should have similar set up of configurations to execute because orchestrator modes can initiate the execution. @@ -211,22 +232,21 @@ def execute_single_node( parameters_file (str): The parameters being sent in to the application """ - from magnus import nodes + from runnable import nodes run_context = prepare_configurations( configuration_file=configuration_file, pipeline_file=pipeline_file, run_id=run_id, tag=tag, - use_cached="", parameters_file=parameters_file, ) - print("Working with context:") - print(run_context) + console.print("Working with context:") + console.print(run_context) executor = run_context.executor run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) + utils.set_runnable_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) executor.prepare_for_node_execution() @@ -247,55 +267,6 @@ def execute_single_node( executor.send_return_code(stage="execution") -def execute_single_brach( - configuration_file: str, - pipeline_file: str, - branch_name: str, - map_variable: str, - run_id: str, - tag: str, -): - """ - The entry point into executing a branch of the graph. Interactive modes in parallel runs use this to execute - branches in parallel. - - This entry point is never used by its own but rather from a node. So the arguments sent into this are fewer. - - Args: - variables_file (str): The variables file, if used or None - branch_name : The name of the branch to execute, in dot.path.convention - pipeline_file (str): The config/dag file - run_id (str): The run id of the run. - tag (str): If a tag is provided at the run time - """ - from magnus import nodes - - run_context = prepare_configurations( - configuration_file=configuration_file, - pipeline_file=pipeline_file, - run_id=run_id, - tag=tag, - use_cached="", - ) - print("Working with context:") - print(run_context) - - executor = run_context.executor - run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) - - branch_internal_name = nodes.BaseNode._get_internal_name_from_command_name(branch_name) - - map_variable_dict = utils.json_to_ordered_dict(map_variable) - - branch_to_execute = graph.search_branch_by_internal_name(run_context.dag, branch_internal_name) # type: ignore - - logger.info("Executing the single branch of %s", branch_to_execute) - executor.execute_graph(dag=branch_to_execute, map_variable=map_variable_dict) - - executor.send_return_code() - - def execute_notebook( entrypoint: str, notebook_file: str, @@ -307,7 +278,7 @@ def execute_notebook( parameters_file: str = "", ): """ - The entry point to magnus execution of a notebook. This method would prepare the configurations and + The entry point to runnable execution of a notebook. This method would prepare the configurations and delegates traversal to the executor """ run_id = utils.generate_run_id(run_id=run_id) @@ -321,10 +292,10 @@ def execute_notebook( executor = run_context.executor run_context.execution_plan = defaults.EXECUTION_PLAN.UNCHAINED.value - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) + utils.set_runnable_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) - print("Working with context:") - print(run_context) + console.print("Working with context:") + console.print(run_context) step_config = { "command": notebook_file, @@ -368,7 +339,7 @@ def execute_function( parameters_file: str = "", ): """ - The entry point to magnus execution of a function. This method would prepare the configurations and + The entry point to runnable execution of a function. This method would prepare the configurations and delegates traversal to the executor """ run_id = utils.generate_run_id(run_id=run_id) @@ -383,10 +354,10 @@ def execute_function( executor = run_context.executor run_context.execution_plan = defaults.EXECUTION_PLAN.UNCHAINED.value - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) + utils.set_runnable_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) - print("Working with context:") - print(run_context) + console.print("Working with context:") + console.print(run_context) # Prepare the graph with a single node step_config = { @@ -445,22 +416,21 @@ def fan( parameters_file (str): The parameters being sent in to the application """ - from magnus import nodes + from runnable import nodes run_context = prepare_configurations( configuration_file=configuration_file, pipeline_file=pipeline_file, run_id=run_id, tag=tag, - use_cached="", parameters_file=parameters_file, ) - print("Working with context:") - print(run_context) + console.print("Working with context:") + console.print(run_context) executor = run_context.executor run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) + utils.set_runnable_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) executor.prepare_for_node_execution() @@ -481,4 +451,4 @@ def fan( if __name__ == "__main__": # This is only for perf testing purposes. - prepare_configurations(run_id="abc", pipeline_file="example/mocking.yaml") + prepare_configurations(run_id="abc", pipeline_file="examples/mocking.yaml") diff --git a/magnus/exceptions.py b/runnable/exceptions.py similarity index 95% rename from magnus/exceptions.py rename to runnable/exceptions.py index d936cfa0..61f57182 100644 --- a/magnus/exceptions.py +++ b/runnable/exceptions.py @@ -92,3 +92,7 @@ class ExecutionFailedError(Exception): # pragma: no cover def __init__(self, run_id: str): super().__init__() self.message = f"Execution failed for run id: {run_id}" + + +class CommandCallError(Exception): # pragma: no cover + "An exception during the call of the command" diff --git a/magnus/executor.py b/runnable/executor.py similarity index 83% rename from magnus/executor.py rename to runnable/executor.py index 56a77241..5eb8a9c7 100644 --- a/magnus/executor.py +++ b/runnable/executor.py @@ -7,15 +7,15 @@ from pydantic import BaseModel, ConfigDict -import magnus.context as context -from magnus import defaults -from magnus.datastore import DataCatalog, RunLog, StepLog -from magnus.defaults import TypeMapVariable -from magnus.graph import Graph +import runnable.context as context +from runnable import defaults +from runnable.datastore import DataCatalog, StepLog +from runnable.defaults import TypeMapVariable +from runnable.graph import Graph if TYPE_CHECKING: # pragma: no cover - from magnus.extensions.nodes import TaskNode - from magnus.nodes import BaseNode + from runnable.extensions.nodes import TaskNode + from runnable.nodes import BaseNode logger = logging.getLogger(defaults.LOGGER_NAME) @@ -25,7 +25,7 @@ class BaseExecutor(ABC, BaseModel): The skeleton of an executor class. Any implementation of an executor should inherit this class and over-ride accordingly. - There is a extension available in magnus/extensions/executor/__init__.py + There is a extension available in runnable/extensions/executor/__init__.py which implements the most common functionality which is easier to extend/override in most scenarios. @@ -34,13 +34,10 @@ class BaseExecutor(ABC, BaseModel): service_name: str = "" service_type: str = "executor" - enable_parallel: bool = defaults.ENABLE_PARALLEL overrides: dict = {} - _previous_run_log: Optional[RunLog] = None - _single_step: str = "" + _local: bool = False # This is a flag to indicate whether the executor is local or not. - _context_step_log = None # type : StepLog _context_node = None # type: BaseNode model_config = ConfigDict(extra="forbid") @@ -48,19 +45,6 @@ class BaseExecutor(ABC, BaseModel): def _context(self): return context.run_context - def _is_parallel_execution(self) -> bool: - """ - Controls the parallelization of branches in map and parallel state. - Defaults to False and left for the compute modes to decide. - - Interactive executors like local and local-container need decisions. - For most transpilers it is inconsequential as its always True and supported by platforms. - - Returns: - bool: True if the execution allows parallel execution of branches. - """ - return self.enable_parallel - @abstractmethod def _get_parameters(self) -> Dict[str, Any]: """ @@ -72,20 +56,6 @@ def _get_parameters(self) -> Dict[str, Any]: """ ... - @abstractmethod - def _set_up_for_re_run(self, parameters: Dict[str, Any]) -> None: - """ - Set up the executor for using a previous execution. - - Retrieve the older run log, error out if it does not exist. - Sync the catalogs from the previous run log with the current one. - - Update the parameters of this execution with the previous one. The previous one take precedence. - - Args: - parameters (Dict[str, Any]): The parameters for the current execution. - """ - @abstractmethod def _set_up_run_log(self, exists_ok=False): """ @@ -120,7 +90,7 @@ def prepare_for_node_execution(self): ... @abstractmethod - def _sync_catalog(self, step_log: StepLog, stage: str, synced_catalogs=None) -> Optional[List[DataCatalog]]: + def _sync_catalog(self, stage: str, synced_catalogs=None) -> Optional[List[DataCatalog]]: """ 1). Identify the catalog settings by over-riding node settings with the global settings. 2). For stage = get: @@ -171,7 +141,7 @@ def step_attempt_number(self) -> int: return int(os.environ.get(defaults.ATTEMPT_NUMBER, 1)) @abstractmethod - def _execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): + def _execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, mock: bool = False, **kwargs): """ This is the entry point when we do the actual execution of the function. @@ -251,7 +221,7 @@ def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = Non @abstractmethod def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): """ - Executor specific way of triggering jobs when magnus does both traversal and execution + Executor specific way of triggering jobs when runnable does both traversal and execution Transpilers will NEVER use this method and will NEVER call them. Only interactive executors who need execute_from_graph will ever implement it. @@ -304,27 +274,6 @@ def execute_graph(self, dag: Graph, map_variable: TypeMapVariable = None, **kwar """ ... - @abstractmethod - def _is_step_eligible_for_rerun(self, node: BaseNode, map_variable: TypeMapVariable = None): - """ - In case of a re-run, this method checks to see if the previous run step status to determine if a re-run is - necessary. - * True: If its not a re-run. - * True: If its a re-run and we failed in the last run or the corresponding logs do not exist. - * False: If its a re-run and we succeeded in the last run. - - Most cases, this logic need not be touched - - Args: - node (Node): The node to check against re-run - map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.. - Defaults to None. - - Returns: - bool: Eligibility for re-run. True means re-run, False means skip to the next step. - """ - ... - @abstractmethod def send_return_code(self, stage="traversal"): """ diff --git a/magnus/extensions/__init__.py b/runnable/extensions/__init__.py similarity index 100% rename from magnus/extensions/__init__.py rename to runnable/extensions/__init__.py diff --git a/magnus/extensions/catalog/__init__.py b/runnable/extensions/catalog/__init__.py similarity index 94% rename from magnus/extensions/catalog/__init__.py rename to runnable/extensions/catalog/__init__.py index 3561aa6b..bf50a4e9 100644 --- a/magnus/extensions/catalog/__init__.py +++ b/runnable/extensions/catalog/__init__.py @@ -1,6 +1,6 @@ from typing import List, Optional -from magnus.datastore import DataCatalog +from runnable.datastore import DataCatalog def is_catalog_out_of_sync(catalog, synced_catalogs=Optional[List[DataCatalog]]) -> bool: diff --git a/magnus/extensions/catalog/file_system/__init__.py b/runnable/extensions/catalog/file_system/__init__.py similarity index 100% rename from magnus/extensions/catalog/file_system/__init__.py rename to runnable/extensions/catalog/file_system/__init__.py diff --git a/magnus/extensions/catalog/file_system/implementation.py b/runnable/extensions/catalog/file_system/implementation.py similarity index 94% rename from magnus/extensions/catalog/file_system/implementation.py rename to runnable/extensions/catalog/file_system/implementation.py index 9928e067..b7977974 100644 --- a/magnus/extensions/catalog/file_system/implementation.py +++ b/runnable/extensions/catalog/file_system/implementation.py @@ -2,12 +2,12 @@ import os import shutil from pathlib import Path -from typing import List, Optional +from typing import Any, Dict, List, Optional -from magnus import defaults, utils -from magnus.catalog import BaseCatalog -from magnus.datastore import DataCatalog -from magnus.extensions.catalog import is_catalog_out_of_sync +from runnable import defaults, utils +from runnable.catalog import BaseCatalog +from runnable.datastore import DataCatalog +from runnable.extensions.catalog import is_catalog_out_of_sync logger = logging.getLogger(defaults.LOGGER_NAME) @@ -34,6 +34,13 @@ class FileSystemCatalog(BaseCatalog): def get_catalog_location(self): return self.catalog_location + def get_summary(self) -> Dict[str, Any]: + summary = { + "Catalog Location": self.get_catalog_location(), + } + + return summary + def get(self, name: str, run_id: str, compute_data_folder: str = "", **kwargs) -> List[DataCatalog]: """ Get the file by matching glob pattern to the name @@ -144,7 +151,8 @@ def put( if not utils.does_dir_exist(copy_from): msg = ( f"Expected compute data folder to be present at: {compute_data_folder} but not found. \n" - "Note: Magnus does not create the compute data folder for you. Please ensure that the folder exists.\n" + "Note: runnable does not create the compute data folder for you. Please ensure that the " + "folder exists.\n" ) raise Exception(msg) diff --git a/magnus/extensions/catalog/k8s_pvc/__init__.py b/runnable/extensions/catalog/k8s_pvc/__init__.py similarity index 100% rename from magnus/extensions/catalog/k8s_pvc/__init__.py rename to runnable/extensions/catalog/k8s_pvc/__init__.py diff --git a/magnus/extensions/catalog/k8s_pvc/implementation.py b/runnable/extensions/catalog/k8s_pvc/implementation.py similarity index 73% rename from magnus/extensions/catalog/k8s_pvc/implementation.py rename to runnable/extensions/catalog/k8s_pvc/implementation.py index 5ea47dec..010c35a5 100644 --- a/magnus/extensions/catalog/k8s_pvc/implementation.py +++ b/runnable/extensions/catalog/k8s_pvc/implementation.py @@ -1,8 +1,8 @@ import logging from pathlib import Path -from magnus import defaults -from magnus.extensions.catalog.file_system.implementation import FileSystemCatalog +from runnable import defaults +from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog logger = logging.getLogger(defaults.LOGGER_NAME) diff --git a/magnus/extensions/catalog/k8s_pvc/integration.py b/runnable/extensions/catalog/k8s_pvc/integration.py similarity index 85% rename from magnus/extensions/catalog/k8s_pvc/integration.py rename to runnable/extensions/catalog/k8s_pvc/integration.py index e3d886c1..a88542c2 100644 --- a/magnus/extensions/catalog/k8s_pvc/integration.py +++ b/runnable/extensions/catalog/k8s_pvc/integration.py @@ -1,8 +1,8 @@ import logging from typing import cast -from magnus import defaults -from magnus.integration import BaseIntegration +from runnable import defaults +from runnable.integration import BaseIntegration logger = logging.getLogger(defaults.NAME) @@ -45,8 +45,8 @@ class ArgoCompute(BaseIntegration): service_provider = "k8s-pvc" # The actual implementation of the service def configure_for_traversal(self, **kwargs): - from magnus.extensions.catalog.k8s_pvc.implementation import K8sPVCatalog - from magnus.extensions.executor.argo.implementation import ArgoExecutor, UserVolumeMounts + from runnable.extensions.catalog.k8s_pvc.implementation import K8sPVCatalog + from runnable.extensions.executor.argo.implementation import ArgoExecutor, UserVolumeMounts self.executor = cast(ArgoExecutor, self.executor) self.service = cast(K8sPVCatalog, self.service) diff --git a/magnus/extensions/executor/__init__.py b/runnable/extensions/executor/__init__.py similarity index 70% rename from magnus/extensions/executor/__init__.py rename to runnable/extensions/executor/__init__.py index 45430f2e..9ab9011e 100644 --- a/magnus/extensions/executor/__init__.py +++ b/runnable/extensions/executor/__init__.py @@ -1,20 +1,24 @@ import copy -import json import logging import os from abc import abstractmethod -from typing import Any, Dict, List, Optional, cast - -from rich import print - -from magnus import context, defaults, exceptions, integration, parameters, utils -from magnus.datastore import DataCatalog, RunLog, StepLog -from magnus.defaults import TypeMapVariable -from magnus.executor import BaseExecutor -from magnus.experiment_tracker import get_tracked_data -from magnus.extensions.nodes import TaskNode -from magnus.graph import Graph -from magnus.nodes import BaseNode +from typing import Dict, List, Optional, cast + +from runnable import ( + console, + context, + defaults, + exceptions, + integration, + parameters, + utils, +) +from runnable.datastore import DataCatalog, JsonParameter, RunLog, StepLog +from runnable.defaults import TypeMapVariable +from runnable.executor import BaseExecutor +from runnable.extensions.nodes import TaskNode +from runnable.graph import Graph +from runnable.nodes import BaseNode logger = logging.getLogger(defaults.LOGGER_NAME) @@ -40,21 +44,7 @@ class GenericExecutor(BaseExecutor): def _context(self): return context.run_context - @property - def step_decorator_run_id(self): - """ - TODO: Experimental feature, design is not mature yet. - - This function is used by the decorator function. - The design idea is we can over-ride this method in different implementations to retrieve the run_id. - But is it really intrusive to ask to set the environmental variable MAGNUS_RUN_ID? - - Returns: - _type_: _description_ - """ - return os.environ.get("MAGNUS_RUN_ID", None) - - def _get_parameters(self) -> Dict[str, Any]: + def _get_parameters(self) -> Dict[str, JsonParameter]: """ Consolidate the parameters from the environment variables and the parameters file. @@ -64,36 +54,18 @@ def _get_parameters(self) -> Dict[str, Any]: Returns: _type_: _description_ """ - params: Dict[str, Any] = {} + params: Dict[str, JsonParameter] = {} if self._context.parameters_file: - params.update(utils.load_yaml(self._context.parameters_file)) + user_defined = utils.load_yaml(self._context.parameters_file) or {} + + for key, value in user_defined.items(): + params[key] = JsonParameter(value=value, kind="json") # Update these with some from the environment variables params.update(parameters.get_user_set_parameters()) + logger.debug(f"parameters as seen by executor: {params}") return params - def _set_up_for_re_run(self, parameters: Dict[str, Any]) -> None: - try: - attempt_run_log = self._context.run_log_store.get_run_log_by_id( - run_id=self._context.original_run_id, full=False - ) - except exceptions.RunLogNotFoundError as e: - msg = ( - f"Expected a run log with id: {self._context.original_run_id} " - "but it does not exist in the run log store. " - "If the original execution was in a different environment, ensure that it is available in the current " - "environment." - ) - logger.exception(msg) - raise Exception(msg) from e - - # Sync the previous run log catalog to this one. - self._context.catalog_handler.sync_between_runs( - previous_run_id=self._context.original_run_id, run_id=self._context.run_id - ) - - parameters.update(cast(RunLog, attempt_run_log).parameters) - def _set_up_run_log(self, exists_ok=False): """ Create a run log and put that in the run log store @@ -103,7 +75,7 @@ def _set_up_run_log(self, exists_ok=False): try: attempt_run_log = self._context.run_log_store.get_run_log_by_id(run_id=self._context.run_id, full=False) - logger.warning(f"The run log by id: {self._context.run_id} already exists") + logger.warning(f"The run log by id: {self._context.run_id} already exists, is this designed?") raise exceptions.RunLogExistsError( f"The run log by id: {self._context.run_id} already exists and is {attempt_run_log.status}" ) @@ -115,24 +87,20 @@ def _set_up_run_log(self, exists_ok=False): raise # Consolidate and get the parameters - parameters = self._get_parameters() - - if self._context.use_cached: - self._set_up_for_re_run(parameters=parameters) + params = self._get_parameters() self._context.run_log_store.create_run_log( run_id=self._context.run_id, tag=self._context.tag, status=defaults.PROCESSING, dag_hash=self._context.dag_hash, - use_cached=self._context.use_cached, - original_run_id=self._context.original_run_id, ) # Any interaction with run log store attributes should happen via API if available. - self._context.run_log_store.set_parameters(run_id=self._context.run_id, parameters=parameters) + self._context.run_log_store.set_parameters(run_id=self._context.run_id, parameters=params) # Update run_config run_config = utils.get_run_config() + logger.debug(f"run_config as seen by executor: {run_config}") self._context.run_log_store.set_run_config(run_id=self._context.run_id, run_config=run_config) def prepare_for_graph_execution(self): @@ -155,9 +123,6 @@ def prepare_for_graph_execution(self): integration.validate(self, self._context.secrets_handler) integration.configure_for_traversal(self, self._context.secrets_handler) - integration.validate(self, self._context.experiment_tracker) - integration.configure_for_traversal(self, self._context.experiment_tracker) - self._set_up_run_log() def prepare_for_node_execution(self): @@ -177,10 +142,7 @@ def prepare_for_node_execution(self): integration.validate(self, self._context.secrets_handler) integration.configure_for_execution(self, self._context.secrets_handler) - integration.validate(self, self._context.experiment_tracker) - integration.configure_for_execution(self, self._context.experiment_tracker) - - def _sync_catalog(self, step_log: StepLog, stage: str, synced_catalogs=None) -> Optional[List[DataCatalog]]: + def _sync_catalog(self, stage: str, synced_catalogs=None) -> Optional[List[DataCatalog]]: """ 1). Identify the catalog settings by over-riding node settings with the global settings. 2). For stage = get: @@ -205,6 +167,7 @@ def _sync_catalog(self, step_log: StepLog, stage: str, synced_catalogs=None) -> "Catalog service only accepts get/put possible actions as part of node execution." f"Sync catalog of the executor: {self.service_name} asks for {stage} which is not accepted" ) + logger.exception(msg) raise Exception(msg) try: @@ -222,23 +185,25 @@ def _sync_catalog(self, step_log: StepLog, stage: str, synced_catalogs=None) -> data_catalogs = [] for name_pattern in node_catalog_settings.get(stage) or []: if stage == "get": + get_catalog_progress = self._context.progress.add_task(f"Getting from catalog {name_pattern}", total=1) data_catalog = self._context.catalog_handler.get( name=name_pattern, run_id=self._context.run_id, compute_data_folder=compute_data_folder ) + self._context.progress.update(get_catalog_progress, completed=True, visible=False, refresh=True) + elif stage == "put": + put_catalog_progress = self._context.progress.add_task(f"Putting in catalog {name_pattern}", total=1) data_catalog = self._context.catalog_handler.put( name=name_pattern, run_id=self._context.run_id, compute_data_folder=compute_data_folder, synced_catalogs=synced_catalogs, ) - else: - raise Exception(f"Invalid stage: {stage}") - logger.info(f"Added data catalog: {data_catalog} to step log") - data_catalogs.extend(data_catalog) - if data_catalogs: - step_log.add_data_catalogs(data_catalogs) + self._context.progress.update(put_catalog_progress, completed=True, visible=False) + + logger.debug(f"Added data catalog: {data_catalog} to step log") + data_catalogs.extend(data_catalog) return data_catalogs @@ -274,7 +239,7 @@ def step_attempt_number(self) -> int: """ return int(os.environ.get(defaults.ATTEMPT_NUMBER, 1)) - def _execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): + def _execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, mock: bool = False, **kwargs): """ This is the entry point when we do the actual execution of the function. DO NOT Over-ride this function. @@ -294,60 +259,32 @@ def _execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, ** map_variable (dict, optional): If the node is of a map state, map_variable is the value of the iterable. Defaults to None. """ - step_log = self._context.run_log_store.get_step_log(node._get_step_log_name(map_variable), self._context.run_id) - """ - By now, all the parameters are part of the run log as a dictionary. - We set them as environment variables, serialized as json strings. - """ - params = self._context.run_log_store.get_parameters(run_id=self._context.run_id) - parameters.set_user_defined_params_as_environment_variables(params) - - attempt = self.step_attempt_number - logger.info(f"Trying to execute node: {node.internal_name}, attempt : {attempt}") + logger.info(f"Trying to execute node: {node.internal_name}, attempt : {self.step_attempt_number}") - attempt_log = self._context.run_log_store.create_attempt_log() - self._context_step_log = step_log self._context_node = node - data_catalogs_get: Optional[List[DataCatalog]] = self._sync_catalog(step_log, stage="get") - try: - attempt_log = node.execute(executor=self, mock=step_log.mock, map_variable=map_variable, **kwargs) - except Exception as e: - # Any exception here is a magnus exception as node suppresses exceptions. - msg = "This is clearly magnus fault, please report a bug and the logs" - logger.exception(msg) - raise Exception(msg) from e - finally: - attempt_log.attempt_number = attempt - attempt_log.parameters = params.copy() - step_log.attempts.append(attempt_log) - - tracked_data = get_tracked_data() - - self._context.experiment_tracker.publish_data(tracked_data) - # By this point, the updated parameters are deserialized as json strings. - parameters_out = parameters.get_user_set_parameters(remove=True) - - if attempt_log.status == defaults.FAIL: - logger.exception(f"Node: {node} failed") - step_log.status = defaults.FAIL - else: - # Mock is always set to False, bad design?? - # TODO: Stub nodes should not sync back data - # TODO: Errors in catalog syncing should point to Fail step - # TODO: Even for a failed execution, the catalog can happen - step_log.status = defaults.SUCCESS - self._sync_catalog(step_log, stage="put", synced_catalogs=data_catalogs_get) - step_log.user_defined_metrics = tracked_data - diff_parameters = utils.diff_dict(params, parameters_out) - self._context.run_log_store.set_parameters(self._context.run_id, diff_parameters) - - # Remove the step context - self._context_step_log = None - self._context_node = None # type: ignore - self._context_metrics = {} # type: ignore - - self._context.run_log_store.add_step_log(step_log, self._context.run_id) + data_catalogs_get: Optional[List[DataCatalog]] = self._sync_catalog(stage="get") + logger.debug(f"data_catalogs_get: {data_catalogs_get}") + + step_log = node.execute( + map_variable=map_variable, + attempt_number=self.step_attempt_number, + mock=mock, + **kwargs, + ) + + data_catalogs_put: Optional[List[DataCatalog]] = self._sync_catalog(stage="put") + logger.debug(f"data_catalogs_put: {data_catalogs_put}") + + step_log.add_data_catalogs(data_catalogs_get or []) + step_log.add_data_catalogs(data_catalogs_put or []) + + console.print(f"Summary of the step: {step_log.internal_name}") + console.print(step_log.get_summary(), style=defaults.info_style) + + self._context_node = None # type: ignore + + self._context.run_log_store.add_step_log(step_log, self._context.run_id) def add_code_identities(self, node: BaseNode, step_log: StepLog, **kwargs): """ @@ -393,31 +330,23 @@ def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = Non step_log.step_type = node.node_type step_log.status = defaults.PROCESSING + self._context.run_log_store.add_step_log(step_log, self._context.run_id) + + logger.info(f"Executing node: {node.get_summary()}") + # Add the step log to the database as per the situation. # If its a terminal node, complete it now if node.node_type in ["success", "fail"]: - self._context.run_log_store.add_step_log(step_log, self._context.run_id) self._execute_node(node, map_variable=map_variable, **kwargs) return - # In single step - if (self._single_step and not node.name == self._single_step) or not self._is_step_eligible_for_rerun( - node, map_variable=map_variable - ): - # If the node name does not match, we move on to the next node. - # If previous run was successful, move on to the next step - step_log.mock = True - step_log.status = defaults.SUCCESS - self._context.run_log_store.add_step_log(step_log, self._context.run_id) - return # We call an internal function to iterate the sub graphs and execute them if node.is_composite: - self._context.run_log_store.add_step_log(step_log, self._context.run_id) node.execute_as_graph(map_variable=map_variable, **kwargs) return - # Executor specific way to trigger a job - self._context.run_log_store.add_step_log(step_log, self._context.run_id) + task_name = node._resolve_map_placeholders(node.internal_name, map_variable) + console.print(f":runner: Executing the node {task_name} ... ", style="bold color(208)") self.trigger_job(node=node, map_variable=map_variable, **kwargs) def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): @@ -493,31 +422,70 @@ def execute_graph(self, dag: Graph, map_variable: TypeMapVariable = None, **kwar previous_node = None logger.info(f"Running the execution with {current_node}") + branch_execution_task = None + branch_task_name: str = "" + if dag.internal_branch_name: + branch_task_name = BaseNode._resolve_map_placeholders( + dag.internal_branch_name or "Graph", + map_variable, + ) + branch_execution_task = self._context.progress.add_task( + f"[dark_orange]Executing {branch_task_name}", + total=1, + ) + while True: working_on = dag.get_node_by_name(current_node) + task_name = working_on._resolve_map_placeholders(working_on.internal_name, map_variable) if previous_node == current_node: raise Exception("Potentially running in a infinite loop") previous_node = current_node - logger.info(f"Creating execution log for {working_on}") - self.execute_from_graph(working_on, map_variable=map_variable, **kwargs) + logger.debug(f"Creating execution log for {working_on}") - status, next_node_name = self._get_status_and_next_node_name( - current_node=working_on, dag=dag, map_variable=map_variable - ) + depth = " " * ((task_name.count(".")) or 1 - 1) - if status == defaults.TRIGGERED: - # Some nodes go into triggered state and self traverse - logger.info(f"Triggered the job to execute the node {current_node}") - break + task_execution = self._context.progress.add_task(f"{depth}Executing {task_name}", total=1) + + try: + self.execute_from_graph(working_on, map_variable=map_variable, **kwargs) + status, next_node_name = self._get_status_and_next_node_name( + current_node=working_on, dag=dag, map_variable=map_variable + ) + + if status == defaults.SUCCESS: + self._context.progress.update( + task_execution, + description=f"{depth}[green] {task_name} Completed", + completed=True, + overflow="fold", + ) + else: + self._context.progress.update( + task_execution, description=f"{depth}[red] {task_name} Failed", completed=True + ) # type ignore + except Exception as e: # noqa: E722 + self._context.progress.update( + task_execution, + description=f"{depth}[red] {task_name} Errored", + completed=True, + ) + console.print(e, style=defaults.error_style) + logger.exception(e) + raise if working_on.node_type in ["success", "fail"]: break current_node = next_node_name + if branch_execution_task: + self._context.progress.update( + branch_execution_task, description=f"[green3] {branch_task_name} completed", completed=True + ) + run_log = self._context.run_log_store.get_branch_log( working_on._get_branch_log_name(map_variable), self._context.run_id ) @@ -528,50 +496,10 @@ def execute_graph(self, dag: Graph, map_variable: TypeMapVariable = None, **kwar logger.info(f"Finished execution of the {branch} with status {run_log.status}") - # get the final run log - if branch == "graph": - run_log = self._context.run_log_store.get_run_log_by_id(run_id=self._context.run_id, full=True) - print(json.dumps(run_log.model_dump(), indent=4)) - - def _is_step_eligible_for_rerun(self, node: BaseNode, map_variable: TypeMapVariable = None): - """ - In case of a re-run, this method checks to see if the previous run step status to determine if a re-run is - necessary. - * True: If its not a re-run. - * True: If its a re-run and we failed in the last run or the corresponding logs do not exist. - * False: If its a re-run and we succeeded in the last run. - - Most cases, this logic need not be touched - - Args: - node (Node): The node to check against re-run - map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.. - Defaults to None. - - Returns: - bool: Eligibility for re-run. True means re-run, False means skip to the next step. - """ - if self._context.use_cached: - node_step_log_name = node._get_step_log_name(map_variable=map_variable) - logger.info(f"Scanning previous run logs for node logs of: {node_step_log_name}") - - try: - previous_node_log = self._context.run_log_store.get_step_log( - internal_name=node_step_log_name, run_id=self._context.original_run_id - ) - except exceptions.StepLogNotFoundError: - logger.warning(f"Did not find the node {node.name} in previous run log") - return True # We should re-run the node. - - logger.info(f"The original step status: {previous_node_log.status}") - - if previous_node_log.status == defaults.SUCCESS: - return False # We need not run the node - - logger.info(f"The new execution should start executing graph from this node {node.name}") - return True - - return True + if dag == self._context.dag: + run_log = cast(RunLog, run_log) + console.print("Completed Execution, Summary:", style="bold color(208)") + console.print(run_log.get_summary(), style=defaults.info_style) def send_return_code(self, stage="traversal"): """ diff --git a/magnus/extensions/executor/argo/__init__.py b/runnable/extensions/executor/argo/__init__.py similarity index 100% rename from magnus/extensions/executor/argo/__init__.py rename to runnable/extensions/executor/argo/__init__.py diff --git a/magnus/extensions/executor/argo/implementation.py b/runnable/extensions/executor/argo/implementation.py similarity index 97% rename from magnus/extensions/executor/argo/implementation.py rename to runnable/extensions/executor/argo/implementation.py index 2065381e..135f3e6b 100644 --- a/magnus/extensions/executor/argo/implementation.py +++ b/runnable/extensions/executor/argo/implementation.py @@ -7,18 +7,25 @@ from collections import OrderedDict from typing import Any, Dict, List, Optional, Union, cast -from pydantic import BaseModel, ConfigDict, Field, computed_field, field_serializer, field_validator +from pydantic import ( + BaseModel, + ConfigDict, + Field, + computed_field, + field_serializer, + field_validator, +) from pydantic.functional_serializers import PlainSerializer from ruamel.yaml import YAML from typing_extensions import Annotated -from magnus import defaults, exceptions, integration, parameters, utils -from magnus.defaults import TypeMapVariable -from magnus.extensions.executor import GenericExecutor -from magnus.extensions.nodes import DagNode, MapNode, ParallelNode -from magnus.graph import Graph, create_node, search_node_by_internal_name -from magnus.integration import BaseIntegration -from magnus.nodes import BaseNode +from runnable import defaults, exceptions, integration, parameters, utils +from runnable.defaults import TypeMapVariable +from runnable.extensions.executor import GenericExecutor +from runnable.extensions.nodes import DagNode, MapNode, ParallelNode +from runnable.graph import Graph, create_node, search_node_by_internal_name +from runnable.integration import BaseIntegration +from runnable.nodes import BaseNode logger = logging.getLogger(defaults.NAME) @@ -292,7 +299,7 @@ def reshape_inputs(self, inputs: List[Parameter]) -> Dict[str, List[Parameter]]: class DagTemplate(BaseModel): # These are used for parallel, map nodes dag definition - name: str = "magnus-dag" + name: str = "runnable-dag" tasks: List[DagTaskTemplate] = Field(default=[], exclude=True) inputs: Optional[List[Parameter]] = Field(default=None, serialization_alias="inputs") parallelism: Optional[int] = None @@ -561,7 +568,7 @@ def get_renderer(node): class MetaData(BaseModel): - generate_name: str = Field(default="magnus-dag-", serialization_alias="generateName") + generate_name: str = Field(default="runnable-dag-", serialization_alias="generateName") annotations: Optional[Dict[str, str]] = Field(default_factory=dict) labels: Optional[Dict[str, str]] = Field(default_factory=dict) namespace: Optional[str] = Field(default=None) @@ -569,7 +576,7 @@ class MetaData(BaseModel): class Spec(BaseModel): active_deadline_seconds: int = Field(serialization_alias="activeDeadlineSeconds") - entrypoint: str = Field(default="magnus-dag") + entrypoint: str = Field(default="runnable-dag") node_selector: Optional[Dict[str, str]] = Field(default_factory=dict, serialization_alias="nodeSelector") tolerations: Optional[List[Toleration]] = Field(default=None, serialization_alias="tolerations") parallelism: Optional[int] = Field(default=None, serialization_alias="parallelism") @@ -665,6 +672,7 @@ def validate_parallelism(cls, parallelism: Optional[int]) -> Optional[int]: class ArgoExecutor(GenericExecutor): service_name: str = "argo" + _local: bool = False model_config = ConfigDict(extra="forbid") @@ -674,7 +682,7 @@ class ArgoExecutor(GenericExecutor): output_file: str = "argo-pipeline.yaml" # Metadata related fields - name: str = Field(default="magnus-dag-", description="Used as an identifier for the workflow") + name: str = Field(default="runnable-dag-", description="Used as an identifier for the workflow") annotations: Dict[str, str] = Field(default_factory=dict) labels: Dict[str, str] = Field(default_factory=dict) @@ -772,9 +780,6 @@ def prepare_for_graph_execution(self): integration.validate(self, self._context.secrets_handler) integration.configure_for_traversal(self, self._context.secrets_handler) - integration.validate(self, self._context.experiment_tracker) - integration.configure_for_traversal(self, self._context.experiment_tracker) - def prepare_for_node_execution(self): """ Perform any modifications to the services prior to execution of the node. @@ -994,7 +999,7 @@ def _create_fan_in_template(self, composite_node, list_of_iter_values: Optional[ return DagTaskTemplate(name=f"{clean_name}-fan-in", template=f"{clean_name}-fan-in") def _gather_task_templates_of_dag( - self, dag: Graph, dag_name="magnus-dag", list_of_iter_values: Optional[List] = None + self, dag: Graph, dag_name="runnable-dag", list_of_iter_values: Optional[List] = None ): current_node = dag.start_at previous_node = None diff --git a/magnus/extensions/executor/argo/specification.yaml b/runnable/extensions/executor/argo/specification.yaml similarity index 97% rename from magnus/extensions/executor/argo/specification.yaml rename to runnable/extensions/executor/argo/specification.yaml index 9c126361..d87efddc 100644 --- a/magnus/extensions/executor/argo/specification.yaml +++ b/runnable/extensions/executor/argo/specification.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: magnus-dag + generateName: runnable-dag spec: activeDeadlineSeconds: int # max run time of the workflow entrypoint: str diff --git a/magnus/extensions/executor/k8s_job/__init__.py b/runnable/extensions/executor/k8s_job/__init__.py similarity index 100% rename from magnus/extensions/executor/k8s_job/__init__.py rename to runnable/extensions/executor/k8s_job/__init__.py diff --git a/magnus/extensions/executor/k8s_job/implementation_FF.py b/runnable/extensions/executor/k8s_job/implementation_FF.py similarity index 98% rename from magnus/extensions/executor/k8s_job/implementation_FF.py rename to runnable/extensions/executor/k8s_job/implementation_FF.py index 40b69612..604246ff 100644 --- a/magnus/extensions/executor/k8s_job/implementation_FF.py +++ b/runnable/extensions/executor/k8s_job/implementation_FF.py @@ -4,10 +4,10 @@ # from pydantic import BaseModel -# from magnus import defaults, integration, utils -# from magnus.executor import BaseExecutor -# from magnus.graph import Graph -# from magnus.nodes import BaseNode +# from runnable import defaults, integration, utils +# from runnable.executor import BaseExecutor +# from runnable.graph import Graph +# from runnable.nodes import BaseNode # logger = logging.getLogger(defaults.NAME) diff --git a/magnus/extensions/executor/k8s_job/integration_FF.py b/runnable/extensions/executor/k8s_job/integration_FF.py similarity index 96% rename from magnus/extensions/executor/k8s_job/integration_FF.py rename to runnable/extensions/executor/k8s_job/integration_FF.py index d8e21f2a..47a27825 100644 --- a/magnus/extensions/executor/k8s_job/integration_FF.py +++ b/runnable/extensions/executor/k8s_job/integration_FF.py @@ -1,7 +1,7 @@ import logging -from magnus import defaults -from magnus.integration import BaseIntegration +from runnable import defaults +from runnable.integration import BaseIntegration logger = logging.getLogger(defaults.NAME) diff --git a/magnus/extensions/executor/local/__init__.py b/runnable/extensions/executor/local/__init__.py similarity index 100% rename from magnus/extensions/executor/local/__init__.py rename to runnable/extensions/executor/local/__init__.py diff --git a/magnus/extensions/executor/local/implementation.py b/runnable/extensions/executor/local/implementation.py similarity index 90% rename from magnus/extensions/executor/local/implementation.py rename to runnable/extensions/executor/local/implementation.py index 49de2863..c13ffec3 100644 --- a/magnus/extensions/executor/local/implementation.py +++ b/runnable/extensions/executor/local/implementation.py @@ -1,10 +1,10 @@ import logging -from magnus import defaults -from magnus.defaults import TypeMapVariable -from magnus.extensions.executor import GenericExecutor -from magnus.extensions.nodes import TaskNode -from magnus.nodes import BaseNode +from runnable import defaults +from runnable.defaults import TypeMapVariable +from runnable.extensions.executor import GenericExecutor +from runnable.extensions.nodes import TaskNode +from runnable.nodes import BaseNode logger = logging.getLogger(defaults.LOGGER_NAME) @@ -25,6 +25,7 @@ class LocalExecutor(GenericExecutor): """ service_name: str = "local" + _local: bool = True def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): """ @@ -34,6 +35,7 @@ def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kw node (BaseNode): [description] map_variable (str, optional): [description]. Defaults to ''. """ + self.prepare_for_node_execution() self.execute_node(node=node, map_variable=map_variable, **kwargs) diff --git a/magnus/extensions/executor/local_container/__init__.py b/runnable/extensions/executor/local_container/__init__.py similarity index 100% rename from magnus/extensions/executor/local_container/__init__.py rename to runnable/extensions/executor/local_container/__init__.py diff --git a/magnus/extensions/executor/local_container/implementation.py b/runnable/extensions/executor/local_container/implementation.py similarity index 90% rename from magnus/extensions/executor/local_container/implementation.py rename to runnable/extensions/executor/local_container/implementation.py index 9e38e27a..f1e794f1 100644 --- a/magnus/extensions/executor/local_container/implementation.py +++ b/runnable/extensions/executor/local_container/implementation.py @@ -5,13 +5,13 @@ from pydantic import Field from rich import print -from magnus import defaults, integration, utils -from magnus.datastore import StepLog -from magnus.defaults import TypeMapVariable -from magnus.extensions.executor import GenericExecutor -from magnus.extensions.nodes import TaskNode -from magnus.integration import BaseIntegration -from magnus.nodes import BaseNode +from runnable import defaults, integration, utils +from runnable.datastore import StepLog +from runnable.defaults import TypeMapVariable +from runnable.extensions.executor import GenericExecutor +from runnable.extensions.nodes import TaskNode +from runnable.integration import BaseIntegration +from runnable.nodes import BaseNode logger = logging.getLogger(defaults.LOGGER_NAME) @@ -55,6 +55,8 @@ class LocalContainerExecutor(GenericExecutor): run_in_local: bool = False environment: Dict[str, str] = Field(default_factory=dict) + _local: bool = False + _container_log_location = "/tmp/run_logs/" _container_catalog_location = "/tmp/catalog/" _container_secrets_location = "/tmp/dotenv" @@ -131,7 +133,7 @@ def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kw If the config has "run_in_local: True", we compute it on local system instead of container. - In local container execution, we just spin the container to execute magnus execute_single_node. + In local container execution, we just spin the container to execute runnable execute_single_node. Args: node (BaseNode): The node we are currently executing @@ -198,6 +200,7 @@ def _spin_container( try: logger.info(f"Running the command {command}") + print(command) #  Overrides global config with local executor_config = self._resolve_executor_config(node) @@ -256,17 +259,8 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration): service_type = "run_log_store" # One of secret, catalog, datastore service_provider = "file-system" # The actual implementation of the service - def validate(self, **kwargs): - if self.executor._is_parallel_execution(): # pragma: no branch - msg = ( - "Run log generated by file-system run log store are not thread safe. " - "Inconsistent results are possible because of race conditions to write to the same file.\n" - "Consider using partitioned run log store like database for consistent results." - ) - logger.warning(msg) - def configure_for_traversal(self, **kwargs): - from magnus.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore + from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore self.executor = cast(LocalContainerExecutor, self.executor) self.service = cast(FileSystemRunLogstore, self.service) @@ -278,7 +272,7 @@ def configure_for_traversal(self, **kwargs): } def configure_for_execution(self, **kwargs): - from magnus.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore + from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore self.executor = cast(LocalContainerExecutor, self.executor) self.service = cast(FileSystemRunLogstore, self.service) @@ -296,7 +290,7 @@ class LocalContainerComputeFileSystemCatalog(BaseIntegration): service_provider = "file-system" # The actual implementation of the service def configure_for_traversal(self, **kwargs): - from magnus.extensions.catalog.file_system.implementation import FileSystemCatalog + from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog self.executor = cast(LocalContainerExecutor, self.executor) self.service = cast(FileSystemCatalog, self.service) @@ -308,7 +302,7 @@ def configure_for_traversal(self, **kwargs): } def configure_for_execution(self, **kwargs): - from magnus.extensions.catalog.file_system.implementation import FileSystemCatalog + from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog self.executor = cast(LocalContainerExecutor, self.executor) self.service = cast(FileSystemCatalog, self.service) @@ -329,7 +323,7 @@ def validate(self, **kwargs): logger.warning("Using dot env for non local deployments is not ideal, consider options") def configure_for_traversal(self, **kwargs): - from magnus.extensions.secrets.dotenv.implementation import DotEnvSecrets + from runnable.extensions.secrets.dotenv.implementation import DotEnvSecrets self.executor = cast(LocalContainerExecutor, self.executor) self.service = cast(DotEnvSecrets, self.service) @@ -341,7 +335,7 @@ def configure_for_traversal(self, **kwargs): } def configure_for_execution(self, **kwargs): - from magnus.extensions.secrets.dotenv.implementation import DotEnvSecrets + from runnable.extensions.secrets.dotenv.implementation import DotEnvSecrets self.executor = cast(LocalContainerExecutor, self.executor) self.service = cast(DotEnvSecrets, self.service) diff --git a/magnus/extensions/executor/mocked/__init__.py b/runnable/extensions/executor/mocked/__init__.py similarity index 100% rename from magnus/extensions/executor/mocked/__init__.py rename to runnable/extensions/executor/mocked/__init__.py diff --git a/magnus/extensions/executor/mocked/implementation.py b/runnable/extensions/executor/mocked/implementation.py similarity index 52% rename from magnus/extensions/executor/mocked/implementation.py rename to runnable/extensions/executor/mocked/implementation.py index b7e9d4f6..ab6c6dd3 100644 --- a/magnus/extensions/executor/mocked/implementation.py +++ b/runnable/extensions/executor/mocked/implementation.py @@ -4,13 +4,12 @@ from pydantic import ConfigDict, Field -from magnus import context, defaults -from magnus.defaults import TypeMapVariable -from magnus.extensions.executor import GenericExecutor -from magnus.extensions.nodes import TaskNode -from magnus.integration import BaseIntegration -from magnus.nodes import BaseNode -from magnus.tasks import BaseTaskType +from runnable import context, defaults +from runnable.defaults import TypeMapVariable +from runnable.extensions.executor import GenericExecutor +from runnable.extensions.nodes import TaskNode +from runnable.nodes import BaseNode +from runnable.tasks import BaseTaskType logger = logging.getLogger(defaults.LOGGER_NAME) @@ -25,8 +24,7 @@ class EasyModel(model): # type: ignore class MockedExecutor(GenericExecutor): service_name: str = "mocked" - - enable_parallel: bool = defaults.ENABLE_PARALLEL + _local_executor: bool = True patches: Dict[str, Any] = Field(default_factory=dict) @@ -34,9 +32,6 @@ class MockedExecutor(GenericExecutor): def _context(self): return context.run_context - def _set_up_for_re_run(self, parameters: Dict[str, Any]) -> None: - raise Exception("MockedExecutor does not support re-run") - def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): """ This is the entry point to from the graph execution. @@ -82,13 +77,13 @@ def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = Non node.execute_as_graph(map_variable=map_variable, **kwargs) return - node_to_send: TaskNode = cast(TaskNode, node).model_copy(deep=True) if node.name not in self.patches: # node is not patched, so mock it - step_log.mock = True + self._execute_node(node, map_variable=map_variable, mock=True, **kwargs) else: - # node is mocked, change the executable to python with the + # node is patched # command as the patch value + node_to_send: TaskNode = cast(TaskNode, node).model_copy(deep=True) executable_type = node_to_send.executable.__class__ executable = create_executable( self.patches[node.name], @@ -96,48 +91,7 @@ def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = Non node_name=node.name, ) node_to_send.executable = executable - pass - - # Executor specific way to trigger a job - self._context.run_log_store.add_step_log(step_log, self._context.run_id) - self.trigger_job(node=node_to_send, map_variable=map_variable, **kwargs) - - def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): - """ - Call this method only if we are responsible for traversing the graph via - execute_from_graph(). - - We are not prepared to execute node as of now. - - Args: - node (BaseNode): The node to execute - map_variable (str, optional): If the node if of a map state, this corresponds to the value of iterable. - Defaults to ''. - - NOTE: We do not raise an exception as this method is not required by many extensions - """ - self.prepare_for_node_execution() - self.execute_node(node=node, map_variable=map_variable, **kwargs) - - def _is_step_eligible_for_rerun(self, node: BaseNode, map_variable: TypeMapVariable = None): - """ - In case of a re-run, this method checks to see if the previous run step status to determine if a re-run is - necessary. - * True: If its not a re-run. - * True: If its a re-run and we failed in the last run or the corresponding logs do not exist. - * False: If its a re-run and we succeeded in the last run. - - Most cases, this logic need not be touched - - Args: - node (Node): The node to check against re-run - map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.. - Defaults to None. - - Returns: - bool: Eligibility for re-run. True means re-run, False means skip to the next step. - """ - return True + self._execute_node(node_to_send, map_variable=map_variable, mock=False, **kwargs) def _resolve_executor_config(self, node: BaseNode): """ @@ -178,43 +132,3 @@ def _resolve_executor_config(self, node: BaseNode): def execute_job(self, node: TaskNode): pass - - def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): - """ - For local execution, we just execute the node. - - Args: - node (BaseNode): _description_ - map_variable (dict[str, str], optional): _description_. Defaults to None. - """ - self._execute_node(node=node, map_variable=map_variable, **kwargs) - - -class LocalContainerComputeFileSystemRunLogstore(BaseIntegration): - """ - Integration between local container and file system run log store - """ - - executor_type = "local-container" - service_type = "run_log_store" # One of secret, catalog, datastore - service_provider = "file-system" # The actual implementation of the service - - def validate(self, **kwargs): - if self.executor._is_parallel_execution(): # pragma: no branch - msg = "Mocked executor does not support parallel execution. " - logger.warning(msg) - - -class LocalContainerComputeChunkedFSRunLogstore(BaseIntegration): - """ - Integration between local container and file system run log store - """ - - executor_type = "local-container" - service_type = "run_log_store" # One of secret, catalog, datastore - service_provider = "chunked-fs" # The actual implementation of the service - - def validate(self, **kwargs): - if self.executor._is_parallel_execution(): # pragma: no branch - msg = "Mocked executor does not support parallel execution. " - logger.warning(msg) diff --git a/magnus/extensions/experiment_tracker/__init__.py b/runnable/extensions/executor/retry/__init__.py similarity index 100% rename from magnus/extensions/experiment_tracker/__init__.py rename to runnable/extensions/executor/retry/__init__.py diff --git a/runnable/extensions/executor/retry/implementation.py b/runnable/extensions/executor/retry/implementation.py new file mode 100644 index 00000000..09256dd1 --- /dev/null +++ b/runnable/extensions/executor/retry/implementation.py @@ -0,0 +1,158 @@ +import logging +from functools import cached_property +from typing import Any, Dict, Optional + +from runnable import context, defaults, exceptions +from runnable.datastore import RunLog +from runnable.defaults import TypeMapVariable +from runnable.extensions.executor import GenericExecutor +from runnable.nodes import BaseNode + +logger = logging.getLogger(defaults.LOGGER_NAME) + + +class RetryExecutor(GenericExecutor): + """ + The skeleton of an executor class. + Any implementation of an executor should inherit this class and over-ride accordingly. + + This is a loaded base class which has a lot of methods already implemented for "typical" executions. + Look at the function docs to understand how to use them appropriately. + + For any implementation: + 1). Who/when should the run log be set up? + 2). Who/When should the step log be set up? + + """ + + service_name: str = "retry" + service_type: str = "executor" + run_id: str + + _local: bool = True + _original_run_log: Optional[RunLog] = None + + @property + def _context(self): + return context.run_context + + @cached_property + def original_run_log(self): + self.original_run_log = self._context.run_log_store.get_run_log_by_id( + run_id=self.run_id, + full=True, + ) + + def _set_up_for_re_run(self, params: Dict[str, Any]) -> None: + # Sync the previous run log catalog to this one. + self._context.catalog_handler.sync_between_runs(previous_run_id=self.run_id, run_id=self._context.run_id) + + params.update(self.original_run_log.parameters) + + def _set_up_run_log(self, exists_ok=False): + """ + Create a run log and put that in the run log store + + If exists_ok, we allow the run log to be already present in the run log store. + """ + super()._set_up_run_log(exists_ok=exists_ok) + + # Should the parameters be copied from previous execution + # self._set_up_for_re_run(params=params) + + def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): + """ + This is the entry point to from the graph execution. + + While the self.execute_graph is responsible for traversing the graph, this function is responsible for + actual execution of the node. + + If the node type is: + * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run + * success: We can delegate to _execute_node + * fail: We can delegate to _execute_node + + For nodes that are internally graphs: + * parallel: Delegate the responsibility of execution to the node.execute_as_graph() + * dag: Delegate the responsibility of execution to the node.execute_as_graph() + * map: Delegate the responsibility of execution to the node.execute_as_graph() + + Transpilers will NEVER use this method and will NEVER call ths method. + This method should only be used by interactive executors. + + Args: + node (Node): The node to execute + map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable. + Defaults to None. + """ + step_log = self._context.run_log_store.create_step_log(node.name, node._get_step_log_name(map_variable)) + + self.add_code_identities(node=node, step_log=step_log) + + step_log.step_type = node.node_type + step_log.status = defaults.PROCESSING + + # Add the step log to the database as per the situation. + # If its a terminal node, complete it now + if node.node_type in ["success", "fail"]: + self._context.run_log_store.add_step_log(step_log, self._context.run_id) + self._execute_node(node, map_variable=map_variable, **kwargs) + return + + # In retry step + if not self._is_step_eligible_for_rerun(node, map_variable=map_variable): + # If the node name does not match, we move on to the next node. + # If previous run was successful, move on to the next step + step_log.mock = True + step_log.status = defaults.SUCCESS + self._context.run_log_store.add_step_log(step_log, self._context.run_id) + return + + # We call an internal function to iterate the sub graphs and execute them + if node.is_composite: + self._context.run_log_store.add_step_log(step_log, self._context.run_id) + node.execute_as_graph(map_variable=map_variable, **kwargs) + return + + # Executor specific way to trigger a job + self._context.run_log_store.add_step_log(step_log, self._context.run_id) + self.execute_node(node=node, map_variable=map_variable, **kwargs) + + def _is_step_eligible_for_rerun(self, node: BaseNode, map_variable: TypeMapVariable = None): + """ + In case of a re-run, this method checks to see if the previous run step status to determine if a re-run is + necessary. + * True: If its not a re-run. + * True: If its a re-run and we failed in the last run or the corresponding logs do not exist. + * False: If its a re-run and we succeeded in the last run. + + Most cases, this logic need not be touched + + Args: + node (Node): The node to check against re-run + map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.. + Defaults to None. + + Returns: + bool: Eligibility for re-run. True means re-run, False means skip to the next step. + """ + + node_step_log_name = node._get_step_log_name(map_variable=map_variable) + logger.info(f"Scanning previous run logs for node logs of: {node_step_log_name}") + + try: + previous_attempt_log, _ = self.original_run_log.search_step_by_internal_name(node_step_log_name) + except exceptions.StepLogNotFoundError: + logger.warning(f"Did not find the node {node.name} in previous run log") + return True # We should re-run the node. + + logger.info(f"The original step status: {previous_attempt_log.status}") + + if previous_attempt_log.status == defaults.SUCCESS: + return False # We need not run the node + + logger.info(f"The new execution should start executing graph from this node {node.name}") + return True + + def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs): + self._execute_node(node, map_variable=map_variable, **kwargs) diff --git a/magnus/extensions/nodes.py b/runnable/extensions/nodes.py similarity index 66% rename from magnus/extensions/nodes.py rename to runnable/extensions/nodes.py index 17f0a981..577cf449 100644 --- a/magnus/extensions/nodes.py +++ b/runnable/extensions/nodes.py @@ -1,20 +1,27 @@ -import json +import importlib import logging -import multiprocessing +import os +import sys from collections import OrderedDict from copy import deepcopy from datetime import datetime -from typing import Any, Dict, cast - -from pydantic import ConfigDict, Field, ValidationInfo, field_serializer, field_validator +from typing import Any, Dict, List, Optional, Tuple, Union, cast + +from pydantic import ( + ConfigDict, + Field, + ValidationInfo, + field_serializer, + field_validator, +) from typing_extensions import Annotated -from magnus import defaults, utils -from magnus.datastore import StepAttempt -from magnus.defaults import TypeMapVariable -from magnus.graph import Graph, create_graph -from magnus.nodes import CompositeNode, ExecutableNode, TerminalNode -from magnus.tasks import BaseTaskType, create_task +from runnable import datastore, defaults, utils +from runnable.datastore import JsonParameter, MetricParameter, ObjectParameter, StepLog +from runnable.defaults import TypeMapVariable +from runnable.graph import Graph, create_graph +from runnable.nodes import CompositeNode, ExecutableNode, TerminalNode +from runnable.tasks import BaseTaskType, create_task logger = logging.getLogger(defaults.LOGGER_NAME) @@ -44,9 +51,25 @@ def parse_from_config(cls, config: Dict[str, Any]) -> "TaskNode": executable = create_task(task_config) return cls(executable=executable, **node_config, **task_config) - def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt: + def get_summary(self) -> Dict[str, Any]: + summary = { + "name": self.name, + "type": self.node_type, + "executable": self.executable.get_summary(), + "catalog": self._get_catalog_settings(), + } + + return summary + + def execute( + self, + mock=False, + map_variable: TypeMapVariable = None, + attempt_number: int = 1, + **kwargs, + ) -> StepLog: """ - All that we do in magnus is to come to this point where we actually execute the command. + All that we do in runnable is to come to this point where we actually execute the command. Args: executor (_type_): The executor class @@ -56,25 +79,28 @@ def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> Returns: StepAttempt: The attempt object """ - print("Executing task:", self._context.executor._context_node) - # Here is where the juice is - attempt_log = self._context.run_log_store.create_attempt_log() - try: - attempt_log.start_time = str(datetime.now()) - attempt_log.status = defaults.SUCCESS - if not mock: - # Do not run if we are mocking the execution, could be useful for caching and dry runs - self.executable.execute_command(map_variable=map_variable) - except Exception as _e: # pylint: disable=W0703 - logger.exception("Task failed") - attempt_log.status = defaults.FAIL - attempt_log.message = str(_e) - finally: - attempt_log.end_time = str(datetime.now()) - attempt_log.duration = utils.get_duration_between_datetime_strings( - attempt_log.start_time, attempt_log.end_time + step_log = self._context.run_log_store.get_step_log(self._get_step_log_name(map_variable), self._context.run_id) + + if not mock: + # Do not run if we are mocking the execution, could be useful for caching and dry runs + attempt_log = self.executable.execute_command(map_variable=map_variable) + attempt_log.attempt_number = attempt_number + else: + attempt_log = datastore.StepAttempt( + status=defaults.SUCCESS, + start_time=str(datetime.now()), + end_time=str(datetime.now()), + attempt_number=attempt_number, ) - return attempt_log + + logger.debug(f"attempt_log: {attempt_log}") + logger.info(f"Step {self.name} completed with status: {attempt_log.status}") + + step_log.status = attempt_log.status + + step_log.attempts.append(attempt_log) + + return step_log class FailNode(TerminalNode): @@ -88,7 +114,21 @@ class FailNode(TerminalNode): def parse_from_config(cls, config: Dict[str, Any]) -> "FailNode": return cast("FailNode", super().parse_from_config(config)) - def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt: + def get_summary(self) -> Dict[str, Any]: + summary = { + "name": self.name, + "type": self.node_type, + } + + return summary + + def execute( + self, + mock=False, + map_variable: TypeMapVariable = None, + attempt_number: int = 1, + **kwargs, + ) -> StepLog: """ Execute the failure node. Set the run or branch log status to failure. @@ -101,25 +141,26 @@ def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> Returns: StepAttempt: The step attempt object """ - attempt_log = self._context.run_log_store.create_attempt_log() - try: - attempt_log.start_time = str(datetime.now()) - attempt_log.status = defaults.SUCCESS - #  could be a branch or run log - run_or_branch_log = self._context.run_log_store.get_branch_log( - self._get_branch_log_name(map_variable), self._context.run_id - ) - run_or_branch_log.status = defaults.FAIL - self._context.run_log_store.add_branch_log(run_or_branch_log, self._context.run_id) - except BaseException: # pylint: disable=W0703 - logger.exception("Fail node execution failed") - finally: - attempt_log.status = defaults.SUCCESS # This is a dummy node, so we ignore errors and mark SUCCESS - attempt_log.end_time = str(datetime.now()) - attempt_log.duration = utils.get_duration_between_datetime_strings( - attempt_log.start_time, attempt_log.end_time - ) - return attempt_log + step_log = self._context.run_log_store.get_step_log(self._get_step_log_name(map_variable), self._context.run_id) + + attempt_log = datastore.StepAttempt( + status=defaults.SUCCESS, + start_time=str(datetime.now()), + end_time=str(datetime.now()), + attempt_number=attempt_number, + ) + + run_or_branch_log = self._context.run_log_store.get_branch_log( + self._get_branch_log_name(map_variable), self._context.run_id + ) + run_or_branch_log.status = defaults.FAIL + self._context.run_log_store.add_branch_log(run_or_branch_log, self._context.run_id) + + step_log.status = attempt_log.status + + step_log.attempts.append(attempt_log) + + return step_log class SuccessNode(TerminalNode): @@ -133,7 +174,21 @@ class SuccessNode(TerminalNode): def parse_from_config(cls, config: Dict[str, Any]) -> "SuccessNode": return cast("SuccessNode", super().parse_from_config(config)) - def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt: + def get_summary(self) -> Dict[str, Any]: + summary = { + "name": self.name, + "type": self.node_type, + } + + return summary + + def execute( + self, + mock=False, + map_variable: TypeMapVariable = None, + attempt_number: int = 1, + **kwargs, + ) -> StepLog: """ Execute the success node. Set the run or branch log status to success. @@ -146,25 +201,26 @@ def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> Returns: StepAttempt: The step attempt object """ - attempt_log = self._context.run_log_store.create_attempt_log() - try: - attempt_log.start_time = str(datetime.now()) - attempt_log.status = defaults.SUCCESS - #  could be a branch or run log - run_or_branch_log = self._context.run_log_store.get_branch_log( - self._get_branch_log_name(map_variable), self._context.run_id - ) - run_or_branch_log.status = defaults.SUCCESS - self._context.run_log_store.add_branch_log(run_or_branch_log, self._context.run_id) - except BaseException: # pylint: disable=W0703 - logger.exception("Success node execution failed") - finally: - attempt_log.status = defaults.SUCCESS # This is a dummy node and we make sure we mark it as success - attempt_log.end_time = str(datetime.now()) - attempt_log.duration = utils.get_duration_between_datetime_strings( - attempt_log.start_time, attempt_log.end_time - ) - return attempt_log + step_log = self._context.run_log_store.get_step_log(self._get_step_log_name(map_variable), self._context.run_id) + + attempt_log = datastore.StepAttempt( + status=defaults.SUCCESS, + start_time=str(datetime.now()), + end_time=str(datetime.now()), + attempt_number=attempt_number, + ) + + run_or_branch_log = self._context.run_log_store.get_branch_log( + self._get_branch_log_name(map_variable), self._context.run_id + ) + run_or_branch_log.status = defaults.SUCCESS + self._context.run_log_store.add_branch_log(run_or_branch_log, self._context.run_id) + + step_log.status = attempt_log.status + + step_log.attempts.append(attempt_log) + + return step_log class ParallelNode(CompositeNode): @@ -185,6 +241,15 @@ class ParallelNode(CompositeNode): branches: Dict[str, Graph] is_composite: bool = Field(default=True, exclude=True) + def get_summary(self) -> Dict[str, Any]: + summary = { + "name": self.name, + "type": self.node_type, + "branches": [branch.get_summary() for branch in self.branches.values()], + } + + return summary + @field_serializer("branches") def ser_branches(self, branches: Dict[str, Graph]) -> Dict[str, Graph]: ret: Dict[str, Graph] = {} @@ -257,35 +322,10 @@ def execute_as_graph(self, map_variable: TypeMapVariable = None, **kwargs): executor (Executor): The Executor as per the use config **kwargs: Optional kwargs passed around """ - from magnus import entrypoints - self.fan_out(map_variable=map_variable, **kwargs) - jobs = [] - # Given that we can have nesting and complex graphs, controlling the number of processes is hard. - # A better way is to actually submit the job to some process scheduler which does resource management - for internal_branch_name, branch in self.branches.items(): - if self._context.executor._is_parallel_execution(): - # Trigger parallel jobs - action = entrypoints.execute_single_brach - kwargs = { - "configuration_file": self._context.configuration_file, - "pipeline_file": self._context.pipeline_file, - "branch_name": internal_branch_name.replace(" ", defaults.COMMAND_FRIENDLY_CHARACTER), - "run_id": self._context.run_id, - "map_variable": json.dumps(map_variable), - "tag": self._context.tag, - } - process = multiprocessing.Process(target=action, kwargs=kwargs) - jobs.append(process) - process.start() - - else: - # If parallel is not enabled, execute them sequentially - self._context.executor.execute_graph(branch, map_variable=map_variable, **kwargs) - - for job in jobs: - job.join() # Find status of the branches + for _, branch in self.branches.items(): + self._context.executor.execute_graph(branch, map_variable=map_variable, **kwargs) self.fan_in(map_variable=map_variable, **kwargs) @@ -299,6 +339,7 @@ def fan_in(self, map_variable: TypeMapVariable = None, **kwargs): executor (BaseExecutor): The executor class as defined by the config map_variable (dict, optional): If the node is part of a map. Defaults to None. """ + effective_internal_name = self._resolve_map_placeholders(self.internal_name, map_variable=map_variable) step_success_bool = True for internal_branch_name, _ in self.branches.items(): effective_branch_name = self._resolve_map_placeholders(internal_branch_name, map_variable=map_variable) @@ -307,7 +348,7 @@ def fan_in(self, map_variable: TypeMapVariable = None, **kwargs): step_success_bool = False # Collate all the results and update the status of the step - effective_internal_name = self._resolve_map_placeholders(self.internal_name, map_variable=map_variable) + step_log = self._context.run_log_store.get_step_log(effective_internal_name, self._context.run_id) if step_success_bool: #  If none failed @@ -333,12 +374,49 @@ class MapNode(CompositeNode): The internal naming convention creates branches dynamically based on the iteration value """ + # TODO: Should it be one function or a dict of functions indexed by the return name + node_type: str = Field(default="map", serialization_alias="type") iterate_on: str iterate_as: str + iterate_index: bool = Field(default=False) # TODO: Need to design this + reducer: Optional[str] = Field(default=None) branch: Graph is_composite: bool = True + def get_summary(self) -> Dict[str, Any]: + summary = { + "name": self.name, + "type": self.node_type, + "branch": self.branch.get_summary(), + "iterate_on": self.iterate_on, + "iterate_as": self.iterate_as, + "iterate_index": self.iterate_index, + "reducer": self.reducer, + } + + return summary + + def get_reducer_function(self): + if not self.reducer: + return lambda *x: list(x) # returns a list of the args + + # try a lambda function + try: + f = eval(self.reducer) + if callable(f): + return f + except SyntaxError: + logger.info(f"{self.reducer} is not a lambda function") + + # Load the reducer function from dotted path + mod, func = utils.get_module_and_attr_names(self.reducer) + sys.path.insert(0, os.getcwd()) # Need to add the current directory to path + imported_module = importlib.import_module(mod) + f = getattr(imported_module, func) + + return f + @classmethod def parse_from_config(cls, config: Dict[str, Any]) -> "MapNode": internal_name = cast(str, config.get("internal_name")) @@ -353,6 +431,34 @@ def parse_from_config(cls, config: Dict[str, Any]) -> "MapNode": ) return cls(branch=branch, **config) + @property + def branch_returns(self): + branch_returns: List[Tuple[str, Union[ObjectParameter, MetricParameter, JsonParameter]]] = [] + for _, node in self.branch.nodes.items(): + if isinstance(node, TaskNode): + for task_return in node.executable.returns: + if task_return.kind == "json": + branch_returns.append((task_return.name, JsonParameter(kind="json", value="", reduced=False))) + elif task_return.kind == "object": + branch_returns.append( + ( + task_return.name, + ObjectParameter( + kind="object", + value="Will be reduced", + reduced=False, + ), + ) + ) + elif task_return.kind == "metric": + branch_returns.append( + (task_return.name, MetricParameter(kind="metric", value="", reduced=False)) + ) + else: + raise Exception("kind should be either json or object") + + return branch_returns + def _get_branch_by_name(self, branch_name: str) -> Graph: """ Retrieve a branch by name. @@ -381,7 +487,7 @@ def fan_out(self, map_variable: TypeMapVariable = None, **kwargs): executor (BaseExecutor): The executor class as defined by the config map_variable (dict, optional): If the node is part of map. Defaults to None. """ - iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on] + iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on].get_value() # Prepare the branch logs for iter_variable in iterate_on: @@ -392,6 +498,21 @@ def fan_out(self, map_variable: TypeMapVariable = None, **kwargs): branch_log.status = defaults.PROCESSING self._context.run_log_store.add_branch_log(branch_log, self._context.run_id) + # Gather all the returns of the task nodes and create parameters in reduced=False state. + raw_parameters = {} + if map_variable: + # If we are in a map state already, the param should have an index of the map variable. + for _, v in map_variable.items(): + for branch_return in self.branch_returns: + param_name, param_type = branch_return + raw_parameters[f"{param_name}_{v}"] = param_type.copy() + else: + for branch_return in self.branch_returns: + param_name, param_type = branch_return + raw_parameters[f"{param_name}"] = param_type.copy() + + self._context.run_log_store.set_parameters(parameters=raw_parameters, run_id=self._context.run_id) + def execute_as_graph(self, map_variable: TypeMapVariable = None, **kwargs): """ This function does the actual execution of the branch of the map node. @@ -418,11 +539,10 @@ def execute_as_graph(self, map_variable: TypeMapVariable = None, **kwargs): map_variable (dict): The map variables the graph belongs to **kwargs: Optional kwargs passed around """ - from magnus import entrypoints iterate_on = None try: - iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on] + iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on].get_value() except KeyError: raise Exception( f"Expected parameter {self.iterate_on} not present in Run Log parameters, was it ever set before?" @@ -433,34 +553,11 @@ def execute_as_graph(self, map_variable: TypeMapVariable = None, **kwargs): self.fan_out(map_variable=map_variable, **kwargs) - jobs = [] - # Given that we can have nesting and complex graphs, controlling the number of processess is hard. - # A better way is to actually submit the job to some process scheduler which does resource management for iter_variable in iterate_on: effective_map_variable = map_variable or OrderedDict() effective_map_variable[self.iterate_as] = iter_variable - if self._context.executor._is_parallel_execution(): - # Trigger parallel jobs - action = entrypoints.execute_single_brach - kwargs = { - "configuration_file": self._context.configuration_file, - "pipeline_file": self._context.pipeline_file, - "branch_name": self.branch.internal_branch_name.replace(" ", defaults.COMMAND_FRIENDLY_CHARACTER), - "run_id": self._context.run_id, - "map_variable": json.dumps(effective_map_variable), - "tag": self._context.tag, - } - process = multiprocessing.Process(target=action, kwargs=kwargs) - jobs.append(process) - process.start() - - else: - # If parallel is not enabled, execute them sequentially - self._context.executor.execute_graph(self.branch, map_variable=effective_map_variable, **kwargs) - - for job in jobs: - job.join() + self._context.executor.execute_graph(self.branch, map_variable=effective_map_variable, **kwargs) self.fan_in(map_variable=map_variable, **kwargs) @@ -474,9 +571,11 @@ def fan_in(self, map_variable: TypeMapVariable = None, **kwargs): executor (BaseExecutor): The executor class as defined by the config map_variable (dict, optional): If the node is part of map node. Defaults to None. """ - iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on] + params = self._context.run_log_store.get_parameters(self._context.run_id) + iterate_on = params[self.iterate_on].get_value() # # Find status of the branches step_success_bool = True + effective_internal_name = self._resolve_map_placeholders(self.internal_name, map_variable=map_variable) for iter_variable in iterate_on: effective_branch_name = self._resolve_map_placeholders( @@ -487,7 +586,6 @@ def fan_in(self, map_variable: TypeMapVariable = None, **kwargs): step_success_bool = False # Collate all the results and update the status of the step - effective_internal_name = self._resolve_map_placeholders(self.internal_name, map_variable=map_variable) step_log = self._context.run_log_store.get_step_log(effective_internal_name, self._context.run_id) if step_success_bool: #  If none failed and nothing is waiting @@ -497,6 +595,35 @@ def fan_in(self, map_variable: TypeMapVariable = None, **kwargs): self._context.run_log_store.add_step_log(step_log, self._context.run_id) + # Apply the reduce function and reduce the returns of the task nodes. + # The final value of the parameter is the result of the reduce function. + reducer_f = self.get_reducer_function() + + if map_variable: + # If we are in a map state already, the param should have an index of the map variable. + for _, v in map_variable.items(): + for branch_return in self.branch_returns: + param_name, _ = branch_return + to_reduce = [] + for iter_variable in iterate_on: + to_reduce.append(params[f"{param_name}_{iter_variable}"].get_value()) + + param_name = f"{param_name}_{v}" + params[param_name].value = reducer_f(to_reduce) + params[param_name].reduced = True + else: + for branch_return in self.branch_returns: + param_name, _ = branch_return + + to_reduce = [] + for iter_variable in iterate_on: + to_reduce.append(params[f"{param_name}_{iter_variable}"].get_value()) + + params[param_name].value = reducer_f(*to_reduce) + params[param_name].reduced = True + + self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id) + class DagNode(CompositeNode): """ @@ -515,6 +642,13 @@ class DagNode(CompositeNode): is_composite: bool = True internal_branch_name: Annotated[str, Field(validate_default=True)] = "" + def get_summary(self) -> Dict[str, Any]: + summary = { + "name": self.name, + "type": self.node_type, + } + return summary + @field_validator("internal_branch_name") @classmethod def validate_internal_branch_name(cls, internal_branch_name: str, info: ValidationInfo): @@ -646,13 +780,27 @@ class StubNode(ExecutableNode): """ node_type: str = Field(default="stub", serialization_alias="type") - model_config = ConfigDict(extra="allow") + model_config = ConfigDict(extra="ignore") + + def get_summary(self) -> Dict[str, Any]: + summary = { + "name": self.name, + "type": self.node_type, + } + + return summary @classmethod def parse_from_config(cls, config: Dict[str, Any]) -> "StubNode": return cls(**config) - def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt: + def execute( + self, + mock=False, + map_variable: TypeMapVariable = None, + attempt_number: int = 1, + **kwargs, + ) -> StepLog: """ Do Nothing node. We just send an success attempt log back to the caller @@ -665,11 +813,17 @@ def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> Returns: [type]: [description] """ - attempt_log = self._context.run_log_store.create_attempt_log() + step_log = self._context.run_log_store.get_step_log(self._get_step_log_name(map_variable), self._context.run_id) + + attempt_log = datastore.StepAttempt( + status=defaults.SUCCESS, + start_time=str(datetime.now()), + end_time=str(datetime.now()), + attempt_number=attempt_number, + ) + + step_log.status = attempt_log.status - attempt_log.start_time = str(datetime.now()) - attempt_log.status = defaults.SUCCESS # This is a dummy node and always will be success + step_log.attempts.append(attempt_log) - attempt_log.end_time = str(datetime.now()) - attempt_log.duration = utils.get_duration_between_datetime_strings(attempt_log.start_time, attempt_log.end_time) - return attempt_log + return step_log diff --git a/magnus/extensions/experiment_tracker/mlflow/__init__.py b/runnable/extensions/run_log_store/__init__.py similarity index 100% rename from magnus/extensions/experiment_tracker/mlflow/__init__.py rename to runnable/extensions/run_log_store/__init__.py diff --git a/magnus/extensions/run_log_store/__init__.py b/runnable/extensions/run_log_store/chunked_file_system/__init__.py similarity index 100% rename from magnus/extensions/run_log_store/__init__.py rename to runnable/extensions/run_log_store/chunked_file_system/__init__.py diff --git a/magnus/extensions/run_log_store/chunked_file_system/implementation.py b/runnable/extensions/run_log_store/chunked_file_system/implementation.py similarity index 90% rename from magnus/extensions/run_log_store/chunked_file_system/implementation.py rename to runnable/extensions/run_log_store/chunked_file_system/implementation.py index 425b42e4..39cca3a4 100644 --- a/magnus/extensions/run_log_store/chunked_file_system/implementation.py +++ b/runnable/extensions/run_log_store/chunked_file_system/implementation.py @@ -2,10 +2,10 @@ import logging from pathlib import Path from string import Template -from typing import Optional, Sequence, Union +from typing import Any, Dict, Optional, Sequence, Union -from magnus import defaults, utils -from magnus.extensions.run_log_store.generic_chunked import ChunkedRunLogStore +from runnable import defaults, utils +from runnable.extensions.run_log_store.generic_chunked import ChunkedRunLogStore logger = logging.getLogger(defaults.LOGGER_NAME) @@ -21,6 +21,11 @@ class ChunkedFileSystemRunLogStore(ChunkedRunLogStore): service_name: str = "chunked-fs" log_folder: str = defaults.LOG_LOCATION_FOLDER + def get_summary(self) -> Dict[str, Any]: + summary = {"Type": self.service_name, "Location": self.log_folder} + + return summary + def get_matches(self, run_id: str, name: str, multiple_allowed: bool = False) -> Optional[Union[Sequence[T], T]]: """ Get contents of files matching the pattern name* diff --git a/magnus/extensions/run_log_store/chunked_file_system/__init__.py b/runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py similarity index 100% rename from magnus/extensions/run_log_store/chunked_file_system/__init__.py rename to runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py diff --git a/magnus/extensions/run_log_store/chunked_k8s_pvc/implementation.py b/runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py similarity index 75% rename from magnus/extensions/run_log_store/chunked_k8s_pvc/implementation.py rename to runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py index 1de5cdb2..a6876732 100644 --- a/magnus/extensions/run_log_store/chunked_k8s_pvc/implementation.py +++ b/runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py @@ -1,8 +1,8 @@ import logging from pathlib import Path -from magnus import defaults -from magnus.extensions.run_log_store.chunked_file_system.implementation import ChunkedFileSystemRunLogStore +from runnable import defaults +from runnable.extensions.run_log_store.chunked_file_system.implementation import ChunkedFileSystemRunLogStore logger = logging.getLogger(defaults.NAME) diff --git a/magnus/extensions/run_log_store/chunked_k8s_pvc/integration.py b/runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py similarity index 83% rename from magnus/extensions/run_log_store/chunked_k8s_pvc/integration.py rename to runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py index 233e3c26..f3ab736b 100644 --- a/magnus/extensions/run_log_store/chunked_k8s_pvc/integration.py +++ b/runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py @@ -1,8 +1,8 @@ import logging from typing import cast -from magnus import defaults -from magnus.integration import BaseIntegration +from runnable import defaults +from runnable.integration import BaseIntegration logger = logging.getLogger(defaults.NAME) @@ -45,8 +45,10 @@ class ArgoCompute(BaseIntegration): service_provider = "chunked-k8s-pvc" # The actual implementation of the service def configure_for_traversal(self, **kwargs): - from magnus.extensions.executor.argo.implementation import ArgoExecutor, UserVolumeMounts - from magnus.extensions.run_log_store.chunked_k8s_pvc.implementation import ChunkedK8PersistentVolumeRunLogstore + from runnable.extensions.executor.argo.implementation import ArgoExecutor, UserVolumeMounts + from runnable.extensions.run_log_store.chunked_k8s_pvc.implementation import ( + ChunkedK8PersistentVolumeRunLogstore, + ) self.executor = cast(ArgoExecutor, self.executor) self.service = cast(ChunkedK8PersistentVolumeRunLogstore, self.service) diff --git a/magnus/extensions/run_log_store/db/implementation_FF.py b/runnable/extensions/run_log_store/db/implementation_FF.py similarity index 97% rename from magnus/extensions/run_log_store/db/implementation_FF.py rename to runnable/extensions/run_log_store/db/implementation_FF.py index 33e6acea..a7788284 100644 --- a/magnus/extensions/run_log_store/db/implementation_FF.py +++ b/runnable/extensions/run_log_store/db/implementation_FF.py @@ -5,8 +5,8 @@ from string import Template from typing import Any, Dict, List, Optional, Union, cast -from magnus import defaults, utils -from magnus.extensions.run_log_store.generic_chunked import ChunkedRunLogStore +from runnable import defaults, utils +from runnable.extensions.run_log_store.generic_chunked import ChunkedRunLogStore logger = logging.getLogger(defaults.LOGGER_NAME) diff --git a/magnus/extensions/run_log_store/db/integration_FF.py b/runnable/extensions/run_log_store/db/integration_FF.py similarity index 100% rename from magnus/extensions/run_log_store/db/integration_FF.py rename to runnable/extensions/run_log_store/db/integration_FF.py diff --git a/magnus/extensions/run_log_store/chunked_k8s_pvc/__init__.py b/runnable/extensions/run_log_store/file_system/__init__.py similarity index 100% rename from magnus/extensions/run_log_store/chunked_k8s_pvc/__init__.py rename to runnable/extensions/run_log_store/file_system/__init__.py diff --git a/magnus/extensions/run_log_store/file_system/implementation.py b/runnable/extensions/run_log_store/file_system/implementation.py similarity index 93% rename from magnus/extensions/run_log_store/file_system/implementation.py rename to runnable/extensions/run_log_store/file_system/implementation.py index 00211ef3..9f1ec07d 100644 --- a/magnus/extensions/run_log_store/file_system/implementation.py +++ b/runnable/extensions/run_log_store/file_system/implementation.py @@ -1,9 +1,10 @@ import json import logging from pathlib import Path +from typing import Any, Dict -from magnus import defaults, exceptions, utils -from magnus.datastore import BaseRunLogStore, RunLog +from runnable import defaults, exceptions, utils +from runnable.datastore import BaseRunLogStore, RunLog logger = logging.getLogger(defaults.LOGGER_NAME) @@ -37,6 +38,11 @@ class FileSystemRunLogstore(BaseRunLogStore): def log_folder_name(self): return self.log_folder + def get_summary(self) -> Dict[str, Any]: + summary = {"Type": self.service_name, "Location": self.log_folder} + + return summary + def write_to_folder(self, run_log: RunLog): """ Write the run log to the folder @@ -108,9 +114,7 @@ def create_run_log( run_log = RunLog( run_id=run_id, dag_hash=dag_hash, - use_cached=use_cached, tag=tag, - original_run_id=original_run_id, status=status, ) self.write_to_folder(run_log) diff --git a/magnus/extensions/run_log_store/generic_chunked.py b/runnable/extensions/run_log_store/generic_chunked.py similarity index 99% rename from magnus/extensions/run_log_store/generic_chunked.py rename to runnable/extensions/run_log_store/generic_chunked.py index 2156dcf0..a5f93688 100644 --- a/magnus/extensions/run_log_store/generic_chunked.py +++ b/runnable/extensions/run_log_store/generic_chunked.py @@ -6,8 +6,8 @@ from string import Template from typing import Any, Dict, Optional, Sequence, Union -from magnus import defaults, exceptions -from magnus.datastore import BaseRunLogStore, BranchLog, RunLog, StepLog +from runnable import defaults, exceptions +from runnable.datastore import BaseRunLogStore, BranchLog, RunLog, StepLog logger = logging.getLogger(defaults.LOGGER_NAME) @@ -305,9 +305,7 @@ def create_run_log( run_log = RunLog( run_id=run_id, dag_hash=dag_hash, - use_cached=use_cached, tag=tag, - original_run_id=original_run_id, status=status, ) diff --git a/magnus/extensions/run_log_store/file_system/__init__.py b/runnable/extensions/run_log_store/k8s_pvc/__init__.py similarity index 100% rename from magnus/extensions/run_log_store/file_system/__init__.py rename to runnable/extensions/run_log_store/k8s_pvc/__init__.py diff --git a/magnus/extensions/run_log_store/k8s_pvc/implementation.py b/runnable/extensions/run_log_store/k8s_pvc/implementation.py similarity index 76% rename from magnus/extensions/run_log_store/k8s_pvc/implementation.py rename to runnable/extensions/run_log_store/k8s_pvc/implementation.py index 943489b4..c3e544bb 100644 --- a/magnus/extensions/run_log_store/k8s_pvc/implementation.py +++ b/runnable/extensions/run_log_store/k8s_pvc/implementation.py @@ -1,8 +1,8 @@ import logging from pathlib import Path -from magnus import defaults -from magnus.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore +from runnable import defaults +from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore logger = logging.getLogger(defaults.NAME) diff --git a/magnus/extensions/run_log_store/k8s_pvc/integration.py b/runnable/extensions/run_log_store/k8s_pvc/integration.py similarity index 84% rename from magnus/extensions/run_log_store/k8s_pvc/integration.py rename to runnable/extensions/run_log_store/k8s_pvc/integration.py index 4fa4f63a..5153e843 100644 --- a/magnus/extensions/run_log_store/k8s_pvc/integration.py +++ b/runnable/extensions/run_log_store/k8s_pvc/integration.py @@ -1,8 +1,8 @@ import logging from typing import cast -from magnus import defaults -from magnus.integration import BaseIntegration +from runnable import defaults +from runnable.integration import BaseIntegration logger = logging.getLogger(defaults.NAME) @@ -45,8 +45,8 @@ class ArgoCompute(BaseIntegration): service_provider = "k8s-pvc" # The actual implementation of the service def configure_for_traversal(self, **kwargs): - from magnus.extensions.executor.argo.implementation import ArgoExecutor, UserVolumeMounts - from magnus.extensions.run_log_store.k8s_pvc.implementation import K8PersistentVolumeRunLogstore + from runnable.extensions.executor.argo.implementation import ArgoExecutor, UserVolumeMounts + from runnable.extensions.run_log_store.k8s_pvc.implementation import K8PersistentVolumeRunLogstore self.executor = cast(ArgoExecutor, self.executor) self.service = cast(K8PersistentVolumeRunLogstore, self.service) diff --git a/magnus/extensions/run_log_store/k8s_pvc/__init__.py b/runnable/extensions/secrets/__init__.py similarity index 100% rename from magnus/extensions/run_log_store/k8s_pvc/__init__.py rename to runnable/extensions/secrets/__init__.py diff --git a/magnus/extensions/secrets/__init__.py b/runnable/extensions/secrets/dotenv/__init__.py similarity index 100% rename from magnus/extensions/secrets/__init__.py rename to runnable/extensions/secrets/dotenv/__init__.py diff --git a/magnus/extensions/secrets/dotenv/implementation.py b/runnable/extensions/secrets/dotenv/implementation.py similarity index 97% rename from magnus/extensions/secrets/dotenv/implementation.py rename to runnable/extensions/secrets/dotenv/implementation.py index d1d8a637..acce276f 100644 --- a/magnus/extensions/secrets/dotenv/implementation.py +++ b/runnable/extensions/secrets/dotenv/implementation.py @@ -1,8 +1,8 @@ import logging import os -from magnus import defaults, exceptions, utils -from magnus.secrets import BaseSecrets +from runnable import defaults, exceptions, utils +from runnable.secrets import BaseSecrets logger = logging.getLogger(defaults.LOGGER_NAME) diff --git a/magnus/extensions/secrets/dotenv/__init__.py b/runnable/extensions/secrets/env_secrets/__init__.py similarity index 100% rename from magnus/extensions/secrets/dotenv/__init__.py rename to runnable/extensions/secrets/env_secrets/__init__.py diff --git a/magnus/extensions/secrets/env_secrets/implementation.py b/runnable/extensions/secrets/env_secrets/implementation.py similarity index 93% rename from magnus/extensions/secrets/env_secrets/implementation.py rename to runnable/extensions/secrets/env_secrets/implementation.py index d0f2c319..5b88d1bd 100644 --- a/magnus/extensions/secrets/env_secrets/implementation.py +++ b/runnable/extensions/secrets/env_secrets/implementation.py @@ -1,8 +1,8 @@ import logging import os -from magnus import defaults, exceptions -from magnus.secrets import BaseSecrets +from runnable import defaults, exceptions +from runnable.secrets import BaseSecrets logger = logging.getLogger(defaults.LOGGER_NAME) diff --git a/magnus/graph.py b/runnable/graph.py similarity index 97% rename from magnus/graph.py rename to runnable/graph.py index 2bf46cd7..f9a54172 100644 --- a/magnus/graph.py +++ b/runnable/graph.py @@ -6,7 +6,7 @@ from pydantic import BaseModel, Field, SerializeAsAny from stevedore import driver -from magnus import defaults, exceptions +from runnable import defaults, exceptions logger = logging.getLogger(defaults.LOGGER_NAME) logging.getLogger("stevedore").setLevel(logging.CRITICAL) @@ -26,6 +26,17 @@ class Graph(BaseModel): internal_branch_name: str = Field(default="", exclude=True) nodes: SerializeAsAny[Dict[str, "BaseNode"]] = Field(default_factory=dict, serialization_alias="steps") + def get_summary(self) -> Dict[str, Any]: + """ + Return a summary of the graph + """ + return { + "name": self.name, + "description": self.description, + "start_at": self.start_at, + "nodes": [node.get_summary() for node in list(self.nodes.values())], + } + def get_node_by_name(self, name: str) -> "BaseNode": """ Return the Node object by the name @@ -292,7 +303,7 @@ def add_terminal_nodes( self.add_node(fail_node) -from magnus.nodes import BaseNode # noqa: E402 +from runnable.nodes import BaseNode # noqa: E402 Graph.model_rebuild() diff --git a/magnus/integration.py b/runnable/integration.py similarity index 89% rename from magnus/integration.py rename to runnable/integration.py index 1ae9a436..d9794fef 100644 --- a/magnus/integration.py +++ b/runnable/integration.py @@ -2,8 +2,8 @@ from stevedore import extension -from magnus import defaults -from magnus.executor import BaseExecutor +from runnable import defaults +from runnable.executor import BaseExecutor logger = logging.getLogger(defaults.LOGGER_NAME) logging.getLogger("stevedore").setLevel(logging.CRITICAL) @@ -84,7 +84,7 @@ def get_integration_handler(executor: "BaseExecutor", service: object) -> BaseIn logger.info(f"Identified an integration pattern {kls.obj}") integrations.append(kls.obj) - # Get all the implementations defined by the magnus package + # Get all the implementations defined by the runnable package for kls in BaseIntegration.__subclasses__(): # Match the exact service type if kls.service_type == service_type and kls.service_provider == service_name: @@ -95,14 +95,14 @@ def get_integration_handler(executor: "BaseExecutor", service: object) -> BaseIn if len(integrations) > 1: msg = ( f"Multiple integrations between {executor.service_name} and {service_name} of type {service_type} found. " - "If you defined an integration pattern, please ensure it is specific and does not conflict with magnus " + "If you defined an integration pattern, please ensure it is specific and does not conflict with runnable " " implementations." ) logger.exception(msg) raise Exception(msg) if not integrations: - logger.warning( + logger.info( f"Could not find an integration pattern for {executor.service_name} and {service_name} for {service_type}." " This implies that there is no need to change the configurations." ) @@ -163,7 +163,7 @@ def validate(self, **kwargs): "Run log generated by buffered run log store are not persisted. " "Re-running this run, in case of a failure, is not possible" ) - logger.warning(msg) + logger.info(msg) class DoNothingCatalog(BaseIntegration): @@ -176,7 +176,7 @@ class DoNothingCatalog(BaseIntegration): def validate(self, **kwargs): msg = "A do-nothing catalog does not hold any data and therefore cannot pass data between nodes." - logger.warning(msg) + logger.info(msg) class DoNothingSecrets(BaseIntegration): @@ -189,17 +189,4 @@ class DoNothingSecrets(BaseIntegration): def validate(self, **kwargs): msg = "A do-nothing secrets does not hold any secrets and therefore cannot return you any secrets." - logger.warning(msg) - - -class DoNothingExperimentTracker(BaseIntegration): - """ - Integration between any executor and do nothing experiment tracker - """ - - service_type = "experiment_tracker" # One of secret, catalog, datastore - service_provider = "do-nothing" # The actual implementation of the service - - def validate(self, **kwargs): - msg = "A do-nothing experiment tracker does nothing and therefore cannot track anything." - logger.warning(msg) + logger.info(msg) diff --git a/runnable/names.py b/runnable/names.py new file mode 100644 index 00000000..d1bc5ffc --- /dev/null +++ b/runnable/names.py @@ -0,0 +1,546 @@ +import random + +left = [ + "descent", + "citric", + "grating", + "glossy", + "undecidable", + "senile", + "dark", + "rude", + "tart", + "rubbery", + "internal", + "primordial", + "timid", + "trite", + "inverted", + "rancid", + "finite", + "hidden", + "grouchy", + "local", + "affable", + "vintage", + "online", + "grilled", + "absolute", + "chocolaty", + "fermented", + "obnoxious", + "sensitive", + "exponential", + "cheerful", + "persistent", + "spicy", + "humble", + "mean", + "ternary", + "acidic", + "novel", + "inverse", + "shortest", + "grizzled", + "honest", + "symmetric", + "jumbo", + "naive", + "recursive", + "serious", + "rowdy", + "offline", + "quick", + "parallel", + "stern", + "decidable", + "pureed", + "nutty", + "brute", + "damaged", + "gentle", + "deterministic", + "ripe", + "piquant", + "poached", + "unsolvable", + "abstract", + "sticky", + "gritty", + "dynamic", + "candied", + "loyal", + "flavorful", + "visible", + "zesty", + "approximate", + "icy", + "immature", + "bitter", + "stable", + "threaded", + "juicy", + "baked", + "hushed", + "frozen", + "selfish", + "planar", + "clever", + "shabby", + "extended", + "concave", + "pleasant", + "perfect", + "sugary", + "patient", + "unsorted", + "odious", + "piercing", + "careful", + "tattered", + "ferocious", + "crunchy", + "toasted", + "forgiving", + "heartless", + "sleek", + "undirected", + "optimal", + "tractable", + "sharp", + "eager", + "plain", + "shrewd", + "maximum", + "pounded", + "dull", + "zingy", + "counting", + "sophisticated", + "contemporary", + "proud", + "yummy", + "radiant", + "religious", + "glowing", + "messy", + "external", + "balanced", + "new", + "prepared", + "refined", + "grim", + "syrupy", + "graceful", + "annoying", + "tender", + "blazing", + "noisy", + "delicious", + "matte", + "witty", + "polite", + "nearest", + "helpful", + "thundering", + "adventurous", + "milky", + "mute", + "ordered", + "blended", + "pallid", + "ascent", + "roaring", + "brave", + "curious", + "devout", + "energetic", + "burning", + "merciless", + "orthogonal", + "juvenile", + "accepting", + "topped", + "lean", + "greasy", + "deafening", + "reduced", + "obliging", + "null", + "rank", + "shiny", + "forward", + "boolean", + "partial", + "current", + "brilliant", + "bland", + "violent", + "amiable", + "loud", + "savage", + "bright", + "threadbare", + "minimum", + "creamy", + "doughy", + "mild", + "wise", + "urbane", + "greedy", + "genteel", + "sweet", + "fresh", + "sunny", + "linear", + "uniform", + "cheesy", + "cold", + "cyclic", + "obsolete", + "calm", + "augmenting", + "asymptotic", + "tough", + "proper", + "quiet", + "bounded", + "rich", + "complete", + "archaic", + "seasoned", + "intractable", + "light", + "funny", + "muffled", + "silly", + "clean", + "edible", + "vicious", + "dyadic", + "simple", + "smoky", + "caramelized", + "fixed", + "excited", + "recent", + "cautious", + "nervous", + "muted", + "trusting", + "mode", + "oriented", + "savory", + "active", + "young", + "amortized", + "ambitious", + "meek", + "ragged", + "terminal", + "blaring", + "factorial", + "relaxed", + "mashed", + "weighted", + "rectilinear", + "warm", + "cruel", + "organic", + "faded", + "generous", + "lazy", + "worn", + "fried", + "isomorphic", + "booming", + "nutritious", + "median", + "colorful", + "tossed", + "briny", + "lower", + "exact", + "oscillating", + "friendly", + "nondeterministic", + "humane", + "sour", + "happy", + "creative", + "marinated", + "dry", + "simmered", + "strong", + "connected", + "free", + "amicable", + "exhaustive", + "vibrant", + "indulgent", + "sparse", + "swarm", + "quadratic", + "jellied", + "courtly", + "independent", + "salty", + "faint", + "adaptive", + "antique", + "polynomial", + "saucy", + "randomized", + "binary", + "average", + "cloying", + "frayed", + "objective", + "blocking", + "steamed", + "feasible", + "random", + "stale", + "braised", + "gourmet", + "chalky", + "moist", + "formal", + "brute force", + "ancient", + "wan", + "square", + "matching", + "smoked", + "unary", + "covering", + "kind", + "modern", + "tense", + "old", + "associative", + "daring", + "spatial", + "constant", + "stringy", + "concurrent", + "inventive", + "brutal", + "bipartite", + "charitable", + "mature", + "universal", + "stubborn", + "best", + "callous", + "commutative", + "advanced", + "vain", + "neat", + "soft", + "flat", + "golden", + "oily", + "merry", + "intense", + "succulent", + "pale", + "drab", + "impulsive", + "crispy", + "lenient", + "raw", + "tangy", + "inclusive", + "minty", + "acyclic", + "smart", + "chewy", + "bold", + "aged", + "vivid", + "silent", + "weathered", + "respectful", + "buttery", + "hoary", + "elegant", +] + +right = [ + "albattani", + "allen", + "almeida", + "agnesi", + "archimedes", + "ardinghelli", + "aryabhata", + "austin", + "babbage", + "banach", + "bardeen", + "bartik", + "bassi", + "beaver", + "bell", + "benz", + "bhabha", + "bhaskara", + "blackwell", + "bohr", + "booth", + "borg", + "bose", + "boyd", + "brahmagupta", + "brattain", + "brown", + "carson", + "chandrasekhar", + "shannon", + "clarke", + "colden", + "cori", + "cray", + "curran", + "curie", + "darwin", + "davinci", + "dijkstra", + "dubinsky", + "easley", + "edison", + "einstein", + "elion", + "engelbart", + "euclid", + "euler", + "fermat", + "fermi", + "feynman", + "franklin", + "galileo", + "gates", + "goldberg", + "goldstine", + "goldwasser", + "golick", + "goodall", + "haibt", + "hamilton", + "hawking", + "heisenberg", + "hermann", + "heyrovsky", + "hodgkin", + "hoover", + "hopper", + "hugle", + "hypatia", + "jackson", + "jang", + "jennings", + "jepsen", + "johnson", + "joliot", + "jones", + "kalam", + "kare", + "keller", + "kepler", + "khorana", + "kilby", + "kirch", + "knuth", + "kowalevski", + "lalande", + "lamarr", + "lamport", + "leakey", + "leavitt", + "lewin", + "lichterman", + "liskov", + "lovelace", + "lumiere", + "mahavira", + "mayer", + "mccarthy", + "mcclintock", + "mclean", + "mcnulty", + "meitner", + "meninsky", + "mestorf", + "minsky", + "mirzakhani", + "morse", + "murdock", + "neumann", + "newton", + "nightingale", + "nobel", + "noether", + "northcutt", + "noyce", + "panini", + "pare", + "pasteur", + "payne", + "perlman", + "pike", + "poincare", + "poitras", + "ptolemy", + "raman", + "ramanujan", + "ride", + "montalcini", + "ritchie", + "roentgen", + "rosalind", + "saha", + "sammet", + "shaw", + "shirley", + "shockley", + "sinoussi", + "snyder", + "spence", + "stallman", + "stonebraker", + "swanson", + "swartz", + "swirles", + "tesla", + "thompson", + "torvalds", + "turing", + "varahamihira", + "visvesvaraya", + "volhard", + "wescoff", + "wiles", + "williams", + "wilson", + "wing", + "wozniak", + "wright", + "yalow", + "yonath", +] + + +def get_random_name(sep="-") -> str: + """ + Returns a random name in the format of docker container names. + + Args: + sep (str, optional): The seperator to use between the names. Defaults to '-'. + + Returns: + str: The random name. + """ + r = random.SystemRandom() + name = "%s%s%s" % (r.choice(left), sep, r.choice(right)) + return name diff --git a/magnus/nodes.py b/runnable/nodes.py similarity index 93% rename from magnus/nodes.py rename to runnable/nodes.py index e28b06fd..b6787887 100644 --- a/magnus/nodes.py +++ b/runnable/nodes.py @@ -4,10 +4,10 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator -import magnus.context as context -from magnus import defaults, exceptions -from magnus.datastore import StepAttempt -from magnus.defaults import TypeMapVariable +import runnable.context as context +from runnable import defaults, exceptions +from runnable.datastore import StepLog +from runnable.defaults import TypeMapVariable logger = logging.getLogger(defaults.LOGGER_NAME) @@ -64,7 +64,7 @@ def _command_friendly_name(self, replace_with=defaults.COMMAND_FRIENDLY_CHARACTE @classmethod def _get_internal_name_from_command_name(cls, command_name: str) -> str: """ - Replace Magnus specific character (%) with whitespace. + Replace runnable specific character (%) with whitespace. The opposite of _command_friendly_name. Args: @@ -274,7 +274,13 @@ def _get_max_attempts(self) -> int: ... @abstractmethod - def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt: + def execute( + self, + mock=False, + map_variable: TypeMapVariable = None, + attempt_number: int = 1, + **kwargs, + ) -> StepLog: """ The actual function that does the execution of the command in the config. @@ -282,7 +288,7 @@ def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> composite nodes. Args: - executor (magnus.executor.BaseExecutor): The executor class + executor (runnable.executor.BaseExecutor): The executor class mock (bool, optional): Don't run, just pretend. Defaults to False. map_variable (str, optional): The value of the map iteration variable, if part of a map node. Defaults to ''. @@ -301,7 +307,7 @@ def execute_as_graph(self, map_variable: TypeMapVariable = None, **kwargs): Function should only be implemented for composite nodes like dag, map, parallel. Args: - executor (magnus.executor.BaseExecutor): The executor. + executor (runnable.executor.BaseExecutor): The executor. Raises: NotImplementedError: Base class, hence not implemented. @@ -317,7 +323,7 @@ def fan_out(self, map_variable: TypeMapVariable = None, **kwargs): Function should only be implemented for composite nodes like dag, map, parallel. Args: - executor (magnus.executor.BaseExecutor): The executor. + executor (runnable.executor.BaseExecutor): The executor. map_variable (str, optional): The value of the map iteration variable, if part of a map node. Raises: @@ -334,7 +340,7 @@ def fan_in(self, map_variable: TypeMapVariable = None, **kwargs): Function should only be implemented for composite nodes like dag, map, parallel. Args: - executor (magnus.executor.BaseExecutor): The executor. + executor (runnable.executor.BaseExecutor): The executor. map_variable (str, optional): The value of the map iteration variable, if part of a map node. Raises: @@ -356,6 +362,15 @@ def parse_from_config(cls, config: Dict[str, Any]) -> "BaseNode": """ ... + @abstractmethod + def get_summary(self) -> Dict[str, Any]: + """ + Return the summary of the node + + Returns: + Dict[str, Any]: _description_ + """ + # --8<-- [end:docs] class TraversalNode(BaseNode): @@ -449,13 +464,19 @@ def _get_catalog_settings(self) -> Dict[str, Any]: def _get_max_attempts(self) -> int: raise Exception("This is a composite node and does not have a max_attempts") - def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt: + def execute( + self, + mock=False, + map_variable: TypeMapVariable = None, + attempt_number: int = 1, + **kwargs, + ) -> StepLog: raise Exception("This is a composite node and does not have an execute function") class TerminalNode(BaseNode): def _get_on_failure_node(self) -> str: - raise exceptions.TerminalNodeError() + return "" def _get_next_node(self) -> str: raise exceptions.TerminalNodeError() diff --git a/magnus/parameters.py b/runnable/parameters.py similarity index 51% rename from magnus/parameters.py rename to runnable/parameters.py index 59953dca..7cc59680 100644 --- a/magnus/parameters.py +++ b/runnable/parameters.py @@ -2,21 +2,23 @@ import json import logging import os -from typing import Any, Dict, Optional, Type, Union +from typing import Any, Dict, Type +import pydantic from pydantic import BaseModel, ConfigDict from typing_extensions import Callable -from magnus import defaults -from magnus.defaults import TypeMapVariable -from magnus.utils import remove_prefix +from runnable import defaults +from runnable.datastore import JsonParameter +from runnable.defaults import TypeMapVariable +from runnable.utils import remove_prefix logger = logging.getLogger(defaults.LOGGER_NAME) -def get_user_set_parameters(remove: bool = False) -> Dict[str, Any]: +def get_user_set_parameters(remove: bool = False) -> Dict[str, JsonParameter]: """ - Scans the environment variables for any user returned parameters that have a prefix MAGNUS_PRM_. + Scans the environment variables for any user returned parameters that have a prefix runnable_PRM_. This function does not deal with any type conversion of the parameters. It just deserializes the parameters and returns them as a dictionary. @@ -27,93 +29,21 @@ def get_user_set_parameters(remove: bool = False) -> Dict[str, Any]: Returns: dict: The dictionary of found user returned parameters """ - parameters = {} + parameters: Dict[str, JsonParameter] = {} for env_var, value in os.environ.items(): if env_var.startswith(defaults.PARAMETER_PREFIX): key = remove_prefix(env_var, defaults.PARAMETER_PREFIX) try: - parameters[key.lower()] = json.loads(value) + parameters[key.lower()] = JsonParameter(kind="json", value=json.loads(value)) except json.decoder.JSONDecodeError: logger.error(f"Parameter {key} could not be JSON decoded, adding the literal value") - parameters[key.lower()] = value + parameters[key.lower()] = JsonParameter(kind="json", value=value) if remove: del os.environ[env_var] return parameters -def set_user_defined_params_as_environment_variables(params: Dict[str, Any]): - """ - Sets the user set parameters as environment variables. - - At this point in time, the params are already in Dict or some kind of literal - - Args: - parameters (Dict[str, Any]): The parameters to set as environment variables - update (bool, optional): Flag to update the environment variables. Defaults to True. - - """ - for key, value in params.items(): - logger.info(f"Storing parameter {key} with value: {value}") - environ_key = defaults.PARAMETER_PREFIX + key - - os.environ[environ_key] = serialize_parameter_as_str(value) - - -def cast_parameters_as_type(value: Any, newT: Optional[Type] = None) -> Union[Any, BaseModel, Dict[str, Any]]: - """ - Casts the environment variable to the given type. - - Note: Only pydantic models special, everything else just goes through. - - Args: - value (Any): The value to cast - newT (T): The type to cast to - - Returns: - T: The casted value - - Examples: - >>> class MyBaseModel(BaseModel): - ... a: int - ... b: str - >>> - >>> class MyDict(dict): - ... pass - >>> - >>> cast_parameters_as_type({"a": 1, "b": "2"}, MyBaseModel) - MyBaseModel(a=1, b="2") - >>> cast_parameters_as_type({"a": 1, "b": "2"}, MyDict) - MyDict({'a': 1, 'b': '2'}) - >>> cast_parameters_as_type(MyBaseModel(a=1, b="2"), MyBaseModel) - MyBaseModel(a=1, b="2") - >>> cast_parameters_as_type(MyDict({"a": 1, "b": "2"}), MyBaseModel) - MyBaseModel(a=1, b="2") - >>> cast_parameters_as_type({"a": 1, "b": "2"}, MyDict[str, int]) - MyDict({'a': 1, 'b': '2'}) - >>> cast_parameters_as_type({"a": 1, "b": "2"}, Dict[str, int]) - MyDict({'a': 1, 'b': '2'}) - >>> with pytest.warns(UserWarning): - ... cast_parameters_as_type(1, MyBaseModel) - MyBaseModel(a=1, b=None) - >>> with pytest.raises(TypeError): - ... cast_parameters_as_type(1, MyDict) - """ - if not newT: - return value - - if issubclass(newT, BaseModel): - return newT(**value) - - if issubclass(newT, Dict): - return dict(value) - - if type(value) != newT: - logger.warning(f"Casting {value} of {type(value)} to {newT} seems wrong!!") - - return newT(value) - - def serialize_parameter_as_str(value: Any) -> str: if isinstance(value, BaseModel): return json.dumps(value.model_dump()) @@ -139,11 +69,29 @@ def filter_arguments_for_func( function_args = inspect.signature(func).parameters # Update parameters with the map variables - params.update(map_variable or {}) + for key, v in (map_variable or {}).items(): + params[key] = JsonParameter(kind="json", value=v) - unassigned_params = set(params.keys()) bound_args = {} + unassigned_params = set(params.keys()) + # Check if VAR_KEYWORD is used, it is we send back everything + # If **kwargs is present in the function signature, we send back everything for name, value in function_args.items(): + if value.kind != inspect.Parameter.VAR_KEYWORD: + continue + # Found VAR_KEYWORD, we send back everything as found + for key, value in params.items(): + bound_args[key] = params[key].get_value() + + return bound_args + + # Lets return what is asked for then!! + for name, value in function_args.items(): + # Ignore any *args + if value.kind == inspect.Parameter.VAR_POSITIONAL: + logger.warning(f"Ignoring parameter {name} as it is VAR_POSITIONAL") + continue + if name not in params: # No parameter of this name was provided if value.default == inspect.Parameter.empty: @@ -152,9 +100,9 @@ def filter_arguments_for_func( # default value is given in the function signature, nothing further to do. continue - if issubclass(value.annotation, BaseModel): - # We try to cast it as a pydantic model. - named_param = params[name] + if type(value.annotation) in [BaseModel, pydantic._internal._model_construction.ModelMetaclass]: + # We try to cast it as a pydantic model if asked + named_param = params[name].get_value() if not isinstance(named_param, dict): # A case where the parameter is a one attribute model @@ -163,9 +111,12 @@ def filter_arguments_for_func( bound_model = bind_args_for_pydantic_model(named_param, value.annotation) bound_args[name] = bound_model unassigned_params = unassigned_params.difference(bound_model.model_fields.keys()) + + elif value.annotation in [str, int, float, bool]: + # Cast it if its a primitive type. Ensure the type matches the annotation. + bound_args[name] = value.annotation(params[name].get_value()) else: - # simple python data type. - bound_args[name] = cast_parameters_as_type(params[name], value.annotation) # type: ignore + bound_args[name] = params[name].get_value() unassigned_params.remove(name) diff --git a/magnus/pickler.py b/runnable/pickler.py similarity index 96% rename from magnus/pickler.py rename to runnable/pickler.py index 66166031..7d2306b0 100644 --- a/magnus/pickler.py +++ b/runnable/pickler.py @@ -1,10 +1,10 @@ -import pickle from abc import ABC, abstractmethod from typing import Any +import dill as pickle from pydantic import BaseModel, ConfigDict -import magnus.context as context +import runnable.context as context class BasePickler(ABC, BaseModel): @@ -64,7 +64,7 @@ class NativePickler(BasePickler): Uses native python pickle to load and dump files """ - extension: str = ".pickle" + extension: str = ".dill" service_name: str = "pickle" def dump(self, data: Any, path: str): diff --git a/magnus/sdk.py b/runnable/sdk.py similarity index 52% rename from magnus/sdk.py rename to runnable/sdk.py index 30f6704b..3a707a0e 100644 --- a/magnus/sdk.py +++ b/runnable/sdk.py @@ -3,24 +3,46 @@ import logging import os from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Union - -from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, computed_field, field_validator, model_validator +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Union + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PrivateAttr, + computed_field, + field_validator, + model_validator, +) from rich import print -from ruamel.yaml import YAML +from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn +from rich.table import Column from typing_extensions import Self -from magnus import defaults, entrypoints, graph, utils -from magnus.extensions.nodes import FailNode, MapNode, ParallelNode, StubNode, SuccessNode, TaskNode -from magnus.nodes import TraversalNode +from runnable import console, defaults, entrypoints, graph, utils +from runnable.extensions.nodes import ( + FailNode, + MapNode, + ParallelNode, + StubNode, + SuccessNode, + TaskNode, +) +from runnable.nodes import TraversalNode +from runnable.tasks import TaskReturns logger = logging.getLogger(defaults.LOGGER_NAME) -StepType = Union["Stub", "Task", "Success", "Fail", "Parallel", "Map"] -TraversalTypes = Union["Stub", "Task", "Parallel", "Map"] +StepType = Union["Stub", "PythonTask", "NotebookTask", "ShellTask", "Parallel", "Map"] + + +def pickled(name: str) -> TaskReturns: + return TaskReturns(name=name, kind="object") -ALLOWED_COMMAND_TYPES = ["shell", "python", "notebook"] +def metric(name: str) -> TaskReturns: + return TaskReturns(name=name, kind="metric") class Catalog(BaseModel): @@ -33,7 +55,7 @@ class Catalog(BaseModel): put (List[str]): List of glob patterns to put into central catalog from the compute data folder. Examples: - >>> from magnus import Catalog, Task + >>> from runnable import Catalog, Task >>> catalog = Catalog(compute_data_folder="/path/to/data", get=["*.csv"], put=["*.csv"]) >>> task = Task(name="task", catalog=catalog, command="echo 'hello'") @@ -107,7 +129,7 @@ def create_node(self) -> TraversalNode: ... -class Task(BaseTraversal): +class BaseTask(BaseTraversal): """ An execution node of the pipeline. Please refer to [concepts](concepts/task.md) for more information. @@ -133,10 +155,10 @@ class Task(BaseTraversal): executor: type: local-container config: - docker_image: "magnus/magnus:latest" + docker_image: "runnable/runnable:latest" overrides: custom_docker_image: - docker_image: "magnus/magnus:custom" + docker_image: "runnable/runnable:custom" ``` ### Task specific configuration ```python @@ -148,50 +170,188 @@ class Task(BaseTraversal): optional_ploomber_args (Optional[Dict[str, Any]]): Any optional ploomber args. Only used when command_type is 'notebook', defaults to {} output_cell_tag (Optional[str]): The tag of the output cell. - Only used when command_type is 'notebook', defaults to "magnus_output" + Only used when command_type is 'notebook', defaults to "runnable_output" terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node. terminate_with_success (bool): Whether to terminate the pipeline with a success after this node. on_failure (str): The name of the node to execute if the step fails. """ - command: str = Field(alias="command") - command_type: str = Field(default="python") catalog: Optional[Catalog] = Field(default=None, alias="catalog") overrides: Dict[str, Any] = Field(default_factory=dict, alias="overrides") + returns: List[Union[str, TaskReturns]] = Field(default_factory=list, alias="returns") - notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path") - optional_ploomber_args: Optional[Dict[str, Any]] = Field(default=None, alias="optional_ploomber_args") - output_cell_tag: Optional[str] = Field(default=None, alias="output_cell_tag") - - @field_validator("command_type", mode="before") + @field_validator("returns", mode="before") @classmethod - def validate_command_type(cls, value: str) -> str: - if value not in ALLOWED_COMMAND_TYPES: - raise ValueError(f"Invalid command_type: {value}") - return value + def serialize_returns(cls, returns: List[Union[str, TaskReturns]]) -> List[TaskReturns]: + task_returns = [] - @model_validator(mode="after") - def check_notebook_args(self) -> "Task": - if self.command_type != "notebook": - assert ( - self.notebook_output_path is None - ), "Only command_types of 'notebook' can be used with notebook_output_path" + for x in returns: + if isinstance(x, str): + task_returns.append(TaskReturns(name=x, kind="json")) + continue - assert ( - self.optional_ploomber_args is None - ), "Only command_types of 'notebook' can be used with optional_ploomber_args" + # Its already task returns + task_returns.append(x) - assert self.output_cell_tag is None, "Only command_types of 'notebook' can be used with output_cell_tag" - return self + return task_returns def create_node(self) -> TaskNode: if not self.next_node: if not (self.terminate_with_failure or self.terminate_with_success): raise AssertionError("A node not being terminated must have a user defined next node") + return TaskNode.parse_from_config(self.model_dump(exclude_none=True)) +class PythonTask(BaseTask): + """ + An execution node of the pipeline of python functions. + Please refer to [concepts](concepts/task.md) for more information. + + Attributes: + name (str): The name of the node. + function (callable): The function to execute. + catalog (Optional[Catalog]): The catalog to sync data from/to. + Please see Catalog about the structure of the catalog. + overrides (Dict[str, Any]): Any overrides to the command. + Individual tasks can override the global configuration config by referring to the + specific override. + + For example, + ### Global configuration + ```yaml + executor: + type: local-container + config: + docker_image: "runnable/runnable:latest" + overrides: + custom_docker_image: + docker_image: "runnable/runnable:custom" + ``` + ### Task specific configuration + ```python + task = PythonTask(name="task", function="function'", + overrides={'local-container': custom_docker_image}) + ``` + + terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node. + terminate_with_success (bool): Whether to terminate the pipeline with a success after this node. + on_failure (str): The name of the node to execute if the step fails. + + """ + + function: Callable = Field(exclude=True) + + @computed_field + def command_type(self) -> str: + return "python" + + @computed_field + def command(self) -> str: + module = self.function.__module__ + name = self.function.__name__ + + return f"{module}.{name}" + + +class NotebookTask(BaseTask): + """ + An execution node of the pipeline of type notebook. + Please refer to [concepts](concepts/task.md) for more information. + + Attributes: + name (str): The name of the node. + notebook: The path to the notebook + catalog (Optional[Catalog]): The catalog to sync data from/to. + Please see Catalog about the structure of the catalog. + returns: A list of the names of variables to return from the notebook. + overrides (Dict[str, Any]): Any overrides to the command. + Individual tasks can override the global configuration config by referring to the + specific override. + + For example, + ### Global configuration + ```yaml + executor: + type: local-container + config: + docker_image: "runnable/runnable:latest" + overrides: + custom_docker_image: + docker_image: "runnable/runnable:custom" + ``` + ### Task specific configuration + ```python + task = NotebookTask(name="task", notebook="evaluation.ipynb", + overrides={'local-container': custom_docker_image}) + ``` + notebook_output_path (Optional[str]): The path to save the notebook output. + Only used when command_type is 'notebook', defaults to command+_out.ipynb + optional_ploomber_args (Optional[Dict[str, Any]]): Any optional ploomber args. + Only used when command_type is 'notebook', defaults to {} + + terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node. + terminate_with_success (bool): Whether to terminate the pipeline with a success after this node. + on_failure (str): The name of the node to execute if the step fails. + + """ + + notebook: str = Field(alias="command") + + notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path") + optional_ploomber_args: Optional[Dict[str, Any]] = Field(default=None, alias="optional_ploomber_args") + + @computed_field + def command_type(self) -> str: + return "notebook" + + +class ShellTask(BaseTask): + """ + An execution node of the pipeline of type shell. + Please refer to [concepts](concepts/task.md) for more information. + + Attributes: + name (str): The name of the node. + command: The shell command to execute. + catalog (Optional[Catalog]): The catalog to sync data from/to. + Please see Catalog about the structure of the catalog. + returns: A list of the names of variables to capture from environment variables of shell. + overrides (Dict[str, Any]): Any overrides to the command. + Individual tasks can override the global configuration config by referring to the + specific override. + + For example, + ### Global configuration + ```yaml + executor: + type: local-container + config: + docker_image: "runnable/runnable:latest" + overrides: + custom_docker_image: + docker_image: "runnable/runnable:custom" + ``` + ### Task specific configuration + ```python + task = ShellTask(name="task", command="exit 0", + overrides={'local-container': custom_docker_image}) + ``` + + terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node. + terminate_with_success (bool): Whether to terminate the pipeline with a success after this node. + on_failure (str): The name of the node to execute if the step fails. + + """ + + command: str = Field(alias="command") + + @computed_field + def command_type(self) -> str: + return "shell" + + class Stub(BaseTraversal): """ A node that does nothing. @@ -206,7 +366,7 @@ class Stub(BaseTraversal): """ - model_config = ConfigDict(extra="allow") + model_config = ConfigDict(extra="ignore") catalog: Optional[Catalog] = Field(default=None, alias="catalog") def create_node(self) -> StubNode: @@ -267,6 +427,7 @@ class Map(BaseTraversal): branch: "Pipeline" iterate_on: str iterate_as: str + reducer: Optional[str] = Field(default=None, alias="reducer") overrides: Dict[str, Any] = Field(default_factory=dict) @computed_field # type: ignore @@ -287,6 +448,7 @@ def create_node(self) -> MapNode: iterate_on=self.iterate_on, iterate_as=self.iterate_as, overrides=self.overrides, + reducer=self.reducer, ) return node @@ -341,7 +503,8 @@ class Pipeline(BaseModel): A Pipeline is a directed acyclic graph of Steps that define a workflow. Attributes: - steps (List[Stub | Task | Parallel | Map | Success | Fail]): A list of Steps that make up the Pipeline. + steps (List[Stub | PythonTask | NotebookTask | ShellTask | Parallel | Map | Success | Fail]): + A list of Steps that make up the Pipeline. start_at (Stub | Task | Parallel | Map): The name of the first Step in the Pipeline. name (str, optional): The name of the Pipeline. Defaults to "". description (str, optional): A description of the Pipeline. Defaults to "". @@ -353,8 +516,7 @@ class Pipeline(BaseModel): """ - steps: List[StepType] - start_at: TraversalTypes + steps: List[Union[StepType, List[StepType]]] name: str = "" description: str = "" add_terminal_nodes: bool = True # Adds "success" and "fail" nodes @@ -364,20 +526,87 @@ class Pipeline(BaseModel): _dag: graph.Graph = PrivateAttr() model_config = ConfigDict(extra="forbid") + def _validate_path(self, path: List[StepType]) -> None: + # Check if one and only one step terminates with success + # Check no more than one step terminates with failure + + reached_success = False + reached_failure = False + + for step in path: + if step.terminate_with_success: + if reached_success: + raise Exception("A pipeline cannot have more than one step that terminates with success") + reached_success = True + continue + if step.terminate_with_failure: + if reached_failure: + raise Exception("A pipeline cannot have more than one step that terminates with failure") + reached_failure = True + + if not reached_success: + raise Exception("A pipeline must have at least one step that terminates with success") + + def _construct_path(self, path: List[StepType]) -> None: + prev_step = path[0] + + for step in path: + if step == prev_step: + continue + + if prev_step.terminate_with_success or prev_step.terminate_with_failure: + raise Exception(f"A step that terminates with success/failure cannot have a next step: {prev_step}") + + if prev_step.next_node and prev_step.next_node not in ["success", "fail"]: + raise Exception(f"Step already has a next node: {prev_step} ") + + prev_step.next_node = step.name + prev_step = step + def model_post_init(self, __context: Any) -> None: - self.steps = [model.model_copy(deep=True) for model in self.steps] + """ + The sequence of steps can either be: + [step1, step2,..., stepN, [step11, step12,..., step1N], [step21, step22,...,]] + indicates: + - step1 > step2 > ... > stepN + - We expect terminate with success or fail to be explicitly stated on a step. + - If it is stated, the step cannot have a next step defined apart from "success" and "fail". + + The inner list of steps is only to accommodate on-failure behaviors. + - For sake of simplicity, lets assume that it has the same behavior as the happy pipeline. + - A task which was already seen should not be part of this. + - There should be at least one step which terminates with success + + Any definition of pipeline should have one node that terminates with success. + """ + + success_path: List[StepType] = [] + on_failure_paths: List[List[StepType]] = [] + + for step in self.steps: + if isinstance(step, (Stub, PythonTask, NotebookTask, ShellTask, Parallel, Map)): + success_path.append(step) + continue + on_failure_paths.append(step) + + if not success_path: + raise Exception("There should be some success path") + + # Check all paths are valid and construct the path + paths = [success_path] + on_failure_paths + for path in paths: + self._validate_path(path) + self._construct_path(path) + + all_steps: List[StepType] = [step for step in success_path + on_failure_paths] # type: ignore self._dag = graph.Graph( - start_at=self.start_at.name, + start_at=all_steps[0].name, description=self.description, internal_branch_name=self.internal_branch_name, ) - for step in self.steps: - if step.name == self.start_at.name: - if isinstance(step, Success) or isinstance(step, Fail): - raise Exception("A success or fail node cannot be the start_at of the graph") - assert step.next_node + for step in all_steps: self._dag.add_node(step.create_node()) if self.add_terminal_nodes: @@ -385,15 +614,16 @@ def model_post_init(self, __context: Any) -> None: self._dag.check_graph() + def return_dag(self) -> graph.Graph: + return self._dag + def execute( self, configuration_file: str = "", run_id: str = "", tag: str = "", parameters_file: str = "", - use_cached: str = "", log_level: str = defaults.LOG_LEVEL, - output_pipeline_definition: str = "magnus-pipeline.yaml", ): """ *Execute* the Pipeline. @@ -408,7 +638,7 @@ def execute( Args: configuration_file (str, optional): The path to the configuration file. Defaults to "". - The configuration file can be overridden by the environment variable MAGNUS_CONFIGURATION_FILE. + The configuration file can be overridden by the environment variable runnable_CONFIGURATION_FILE. run_id (str, optional): The ID of the run. Defaults to "". tag (str, optional): The tag of the run. Defaults to "". @@ -419,28 +649,27 @@ def execute( Provide the run_id of the older execution to recover. log_level (str, optional): The log level. Defaults to defaults.LOG_LEVEL. - output_pipeline_definition (str, optional): The path to the output pipeline definition file. - Defaults to "magnus-pipeline.yaml". - - Only applicable for the execution via SDK for non ```local``` executors. """ - from magnus.extensions.executor.local.implementation import LocalExecutor - from magnus.extensions.executor.mocked.implementation import MockedExecutor + + # py_to_yaml is used by non local executors to generate the yaml representation of the pipeline. + py_to_yaml = os.environ.get("RUNNABLE_PY_TO_YAML", "false") + + if py_to_yaml == "true": + return {} logger.setLevel(log_level) run_id = utils.generate_run_id(run_id=run_id) - configuration_file = os.environ.get("MAGNUS_CONFIGURATION_FILE", configuration_file) + configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file) run_context = entrypoints.prepare_configurations( configuration_file=configuration_file, run_id=run_id, tag=tag, parameters_file=parameters_file, - use_cached=use_cached, ) run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value - utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) + utils.set_runnable_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag) dag_definition = self._dag.model_dump(by_alias=True, exclude_none=True) @@ -449,22 +678,41 @@ def execute( print("Working with context:") print(run_context) - if not (isinstance(run_context.executor, LocalExecutor) or isinstance(run_context.executor, MockedExecutor)): - logger.debug(run_context.dag.model_dump(by_alias=True)) - yaml = YAML() + if not run_context.executor._local: + # We are not working with non local executor + import inspect - with open(output_pipeline_definition, "w", encoding="utf-8") as f: - yaml.dump( - {"dag": run_context.dag.model_dump(by_alias=True, exclude_none=True)}, - f, - ) + caller_stack = inspect.stack()[1] + relative_to_root = str(Path(caller_stack.filename).relative_to(Path.cwd())) - return + module_to_call = f"{relative_to_root.replace('/', '.').replace('.py', '')}.{caller_stack.function}" + + run_context.pipeline_file = f"{module_to_call}.py" # Prepare for graph execution run_context.executor.prepare_for_graph_execution() - logger.info("Executing the graph") - run_context.executor.execute_graph(dag=run_context.dag) - - return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id) + with Progress( + TextColumn("[progress.description]{task.description}", table_column=Column(ratio=2)), + BarColumn(table_column=Column(ratio=1), style="dark_orange"), + TimeElapsedColumn(table_column=Column(ratio=1)), + console=console, + expand=True, + ) as progress: + try: + run_context.progress = progress + pipeline_execution_task = progress.add_task("[dark_orange] Starting execution .. ", total=1) + run_context.executor.execute_graph(dag=run_context.dag) + + run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False) + + if run_log.status == defaults.SUCCESS: + progress.update(pipeline_execution_task, description="[green] Success", completed=True) + else: + progress.update(pipeline_execution_task, description="[red] Failed", completed=True) + except Exception as e: # noqa: E722 + console.print(e, style=defaults.error_style) + progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True) + + if run_context.executor._local: + return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id) diff --git a/magnus/secrets.py b/runnable/secrets.py similarity index 96% rename from magnus/secrets.py rename to runnable/secrets.py index a0a49716..a8cd5ec1 100644 --- a/magnus/secrets.py +++ b/runnable/secrets.py @@ -4,8 +4,8 @@ from pydantic import BaseModel, ConfigDict -import magnus.context as context -from magnus import defaults, exceptions +import runnable.context as context +from runnable import defaults, exceptions logger = logging.getLogger(defaults.LOGGER_NAME) diff --git a/runnable/tasks.py b/runnable/tasks.py new file mode 100644 index 00000000..1c46c8e2 --- /dev/null +++ b/runnable/tasks.py @@ -0,0 +1,508 @@ +import contextlib +import importlib +import io +import json +import logging +import os +import subprocess +import sys +from datetime import datetime +from pickle import PicklingError +from string import Template +from typing import Any, Dict, List, Literal, Tuple + +from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator +from stevedore import driver + +import runnable.context as context +from runnable import console, defaults, exceptions, parameters, utils +from runnable.datastore import ( + JsonParameter, + MetricParameter, + ObjectParameter, + Parameter, + StepAttempt, +) +from runnable.defaults import TypeMapVariable + +logger = logging.getLogger(defaults.LOGGER_NAME) +logging.getLogger("stevedore").setLevel(logging.CRITICAL) + + +# TODO: Can we add memory peak, cpu usage, etc. to the metrics? + + +class TaskReturns(BaseModel): + name: str + kind: Literal["json", "object", "metric"] = Field(default="json") + + +class BaseTaskType(BaseModel): + """A base task class which does the execution of command defined by the user.""" + + task_type: str = Field(serialization_alias="command_type") + node_name: str = Field(exclude=True) + secrets: Dict[str, str] = Field(default_factory=dict) + returns: List[TaskReturns] = Field(default_factory=list, alias="returns") + + model_config = ConfigDict(extra="forbid") + + def get_summary(self) -> Dict[str, Any]: + return self.model_dump(by_alias=True, exclude_none=True) + + @property + def _context(self): + return context.run_context + + def get_cli_options(self) -> Tuple[str, dict]: + """ + Key is the name of the cli option and value is the value of the cli option. + This should always be in sync with the cli options defined in execute_*. + + Returns: + str: The name of the cli option. + dict: The dict of cli options for the task. + + Raises: + NotImplementedError: Base class, not implemented + """ + raise NotImplementedError() + + def set_secrets_as_env_variables(self): + for key, value in self.secrets.items(): + secret_value = context.run_context.secrets_handler.get(key) + self.secrets[value] = secret_value + os.environ[value] = secret_value + + def delete_secrets_from_env_variables(self): + for _, value in self.secrets.items(): + if value in os.environ: + del os.environ[value] + + def execute_command( + self, + map_variable: TypeMapVariable = None, + **kwargs, + ) -> StepAttempt: + """The function to execute the command. + + And map_variable is sent in as an argument into the function. + + Args: + map_variable (dict, optional): If the command is part of map node, the value of map. Defaults to None. + + Raises: + NotImplementedError: Base class, not implemented + """ + raise NotImplementedError() + + @contextlib.contextmanager + def expose_secrets(self): + """Context manager to expose secrets to the execution. + + Args: + map_variable (dict, optional): If the command is part of map node, the value of map. Defaults to None. + + """ + self.set_secrets_as_env_variables() + try: + yield + except Exception as e: # pylint: disable=broad-except + logger.exception(e) + finally: + self.delete_secrets_from_env_variables() + + @contextlib.contextmanager + def execution_context(self, map_variable: TypeMapVariable = None, allow_complex: bool = True): + params = self._context.run_log_store.get_parameters(run_id=self._context.run_id).copy() + logger.info(f"Parameters available for the execution: {params}") + + for param_name, param in params.items(): + # Any access to unreduced param should be replaced. + # The replacement is the context param + # It is possible that the unreduced param is not created as no upstream step + # has created it yet. + if param.reduced is False: + context_param = param_name + for _, v in map_variable.items(): # type: ignore + context_param = f"{context_param}_{v}" + + if context_param in params: + params[param_name].value = params[context_param].value + + logger.debug(f"Resolved parameters: {params}") + + if not allow_complex: + params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)} + + log_file_name = self.node_name.replace(" ", "_") + ".execution.log" + if map_variable: + for _, value in map_variable.items(): + log_file_name += "_" + str(value) + + log_file = open(log_file_name, "w") + + f = io.StringIO() + try: + with contextlib.redirect_stdout(f): + yield params + except Exception as e: # pylint: disable=broad-except + logger.exception(e) + finally: + print(f.getvalue()) # print to console + log_file.write(f.getvalue()) # Print to file + + f.close() + log_file.close() + + # Put the log file in the catalog + # self._context.catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id) + os.remove(log_file.name) + + # Update parameters + self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id) + + +def task_return_to_parameter(task_return: TaskReturns, value: Any) -> Parameter: + # implicit support for pydantic models + if isinstance(value, BaseModel) and task_return.kind == "json": + try: + return JsonParameter(kind="json", value=value.model_dump(by_alias=True)) + except PicklingError: + logging.warning("Pydantic model is not serializable") + + if task_return.kind == "json": + return JsonParameter(kind="json", value=value) + + if task_return.kind == "metric": + return MetricParameter(kind="metric", value=value) + + if task_return.kind == "object": + obj = ObjectParameter(value=task_return.name, kind="object") + obj.put_object(data=value) + return obj + + raise Exception(f"Unknown return type: {task_return.kind}") + + +class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods + """The task class for python command.""" + + task_type: str = Field(default="python", serialization_alias="command_type") + command: str + + def get_cli_options(self) -> Tuple[str, dict]: + """Return the cli options for the task. + + Returns: + dict: The cli options for the task + """ + return "function", {"command": self.command} + + def execute_command( + self, + map_variable: TypeMapVariable = None, + **kwargs, + ) -> StepAttempt: + """Execute the notebook as defined by the command.""" + attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now())) + + with self.execution_context(map_variable=map_variable) as params, self.expose_secrets() as _: + module, func = utils.get_module_and_attr_names(self.command) + sys.path.insert(0, os.getcwd()) # Need to add the current directory to path + imported_module = importlib.import_module(module) + f = getattr(imported_module, func) + + try: + try: + filtered_parameters = parameters.filter_arguments_for_func(f, params.copy(), map_variable) + logger.info(f"Calling {func} from {module} with {filtered_parameters}") + user_set_parameters = f(**filtered_parameters) # This is a tuple or single value + except Exception as e: + logger.exception(e) + console.print(e, style=defaults.error_style) + raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e + + attempt_log.input_parameters = params.copy() + + if map_variable: + attempt_log.input_parameters.update( + {k: JsonParameter(value=v, kind="json") for k, v in map_variable.items()} + ) + + if self.returns: + if not isinstance(user_set_parameters, tuple): # make it a tuple + user_set_parameters = (user_set_parameters,) + + if len(user_set_parameters) != len(self.returns): + raise ValueError("Returns task signature does not match the function returns") + + output_parameters: Dict[str, Parameter] = {} + metrics: Dict[str, Parameter] = {} + + for i, task_return in enumerate(self.returns): + output_parameter = task_return_to_parameter( + task_return=task_return, + value=user_set_parameters[i], + ) + + if task_return.kind == "metric": + metrics[task_return.name] = output_parameter + + param_name = task_return.name + if map_variable: + for _, v in map_variable.items(): + param_name = f"{param_name}_{v}" + + output_parameters[param_name] = output_parameter + + attempt_log.output_parameters = output_parameters + attempt_log.user_defined_metrics = metrics + params.update(output_parameters) + + attempt_log.status = defaults.SUCCESS + except Exception as _e: + msg = f"Call to the function {self.command} did not succeed.\n" + logger.exception(_e) + attempt_log.message = msg + console.print(_e, style=defaults.error_style) + + attempt_log.end_time = str(datetime.now()) + + return attempt_log + + +class NotebookTaskType(BaseTaskType): + """The task class for Notebook based execution.""" + + task_type: str = Field(default="notebook", serialization_alias="command_type") + command: str + notebook_output_path: str = Field(default="", validate_default=True) + optional_ploomber_args: dict = {} + + @field_validator("command") + @classmethod + def notebook_should_end_with_ipynb(cls, command: str): + if not command.endswith(".ipynb"): + raise Exception("Notebook task should point to a ipynb file") + + return command + + @field_validator("notebook_output_path") + @classmethod + def correct_notebook_output_path(cls, notebook_output_path: str, info: ValidationInfo): + if notebook_output_path: + return notebook_output_path + + command = info.data["command"] + return "".join(command.split(".")[:-1]) + "_out.ipynb" + + def get_cli_options(self) -> Tuple[str, dict]: + return "notebook", {"command": self.command, "notebook-output-path": self.notebook_output_path} + + def execute_command( + self, + map_variable: TypeMapVariable = None, + **kwargs, + ) -> StepAttempt: + """Execute the python notebook as defined by the command. + + Args: + map_variable (dict, optional): If the node is part of internal branch. Defaults to None. + + Raises: + ImportError: If necessary dependencies are not installed + Exception: If anything else fails + """ + attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now())) + try: + import ploomber_engine as pm + from ploomber_engine.ipython import PloomberClient + + notebook_output_path = self.notebook_output_path + + with self.execution_context( + map_variable=map_variable, allow_complex=False + ) as params, self.expose_secrets() as _: + if map_variable: + for key, value in map_variable.items(): + notebook_output_path += "_" + str(value) + params[key] = value + + notebook_params = {k: v.get_value() for k, v in params.items()} + + ploomber_optional_args = self.optional_ploomber_args + + kwds = { + "input_path": self.command, + "output_path": notebook_output_path, + "parameters": notebook_params, + "log_output": True, + "progress_bar": False, + } + kwds.update(ploomber_optional_args) + + pm.execute_notebook(**kwds) + context.run_context.catalog_handler.put(name=notebook_output_path, run_id=context.run_context.run_id) + + client = PloomberClient.from_path(path=notebook_output_path) + namespace = client.get_namespace() + + output_parameters: Dict[str, Parameter] = {} + try: + for task_return in self.returns: + param_name = Template(task_return.name).safe_substitute(map_variable) # type: ignore + output_parameters[param_name] = task_return_to_parameter( + task_return=task_return, + value=namespace[task_return.name], + ) + except PicklingError as e: + logger.exception("Notebooks cannot return objects") + logger.exception(e) + raise + + if output_parameters: + attempt_log.output_parameters = output_parameters + params.update(output_parameters) + + attempt_log.status = defaults.SUCCESS + + except (ImportError, Exception) as e: + msg = ( + f"Call to the notebook command {self.command} did not succeed.\n" + "Ensure that you have installed runnable with notebook extras" + ) + logger.exception(msg) + logger.exception(e) + attempt_log.status = defaults.FAIL + + attempt_log.end_time = str(datetime.now()) + + return attempt_log + + +class ShellTaskType(BaseTaskType): + """ + The task class for shell based commands. + """ + + task_type: str = Field(default="shell", serialization_alias="command_type") + command: str + + @field_validator("returns") + @classmethod + def returns_should_be_json(cls, returns: List[TaskReturns]): + for task_return in returns: + if task_return.kind == "object" or task_return.kind == "pydantic": + raise ValueError("Pydantic models or Objects are not allowed in returns") + + return returns + + def execute_command( + self, + map_variable: TypeMapVariable = None, + **kwargs, + ) -> StepAttempt: + # Using shell=True as we want to have chained commands to be executed in the same shell. + """Execute the shell command as defined by the command. + + Args: + map_variable (dict, optional): If the node is part of an internal branch. Defaults to None. + """ + attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now())) + subprocess_env = {} + + # Expose RUNNABLE environment variables to be passed to the subprocess. + for key, value in os.environ.items(): + if key.startswith("RUNNABLE_"): + subprocess_env[key] = value + + # Expose map variable as environment variables + if map_variable: + for key, value in map_variable.items(): # type: ignore + subprocess_env[key] = str(value) + + # Expose secrets as environment variables + if self.secrets: + for key, value in self.secrets.items(): + secret_value = context.run_context.secrets_handler.get(key) + subprocess_env[value] = secret_value + + with self.execution_context(map_variable=map_variable, allow_complex=False) as params: + subprocess_env.update({k: v.get_value() for k, v in params.items()}) + + # Json dumps all runnable environment variables + for key, value in subprocess_env.items(): + subprocess_env[key] = json.dumps(value) + + collect_delimiter = "=== COLLECT ===" + + command = self.command.strip() + f" && echo '{collect_delimiter}' && env" + logger.info(f"Executing shell command: {command}") + + capture = False + return_keys = [x.name for x in self.returns] + + with subprocess.Popen( + command, + shell=True, + env=subprocess_env, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) as proc: + for line in proc.stdout: # type: ignore + logger.info(line) + print(line) + + if line.strip() == collect_delimiter: + # The lines from now on should be captured + capture = True + continue + + if capture: + key, value = line.strip().split("=", 1) + if key in (return_keys or []): + param_name = Template(key).safe_substitute(map_variable) # type: ignore + try: + params[param_name] = JsonParameter(kind="json", value=json.loads(value)) + except json.JSONDecodeError: + params[param_name] = JsonParameter(kind="json", value=value) + + proc.wait() + if proc.returncode == 0: + attempt_log.status = defaults.SUCCESS + + attempt_log.end_time = str(datetime.now()) + return attempt_log + + +def create_task(kwargs_for_init) -> BaseTaskType: + """ + Creates a task object from the command configuration. + + Args: + A dictionary of keyword arguments that are sent by the user to the task. + Check against the model class for the validity of it. + + Returns: + tasks.BaseTaskType: The command object + """ + # The dictionary cannot be modified + kwargs = kwargs_for_init.copy() + command_type = kwargs.pop("command_type", defaults.COMMAND_TYPE) + + try: + task_mgr = driver.DriverManager( + namespace="tasks", + name=command_type, + invoke_on_load=True, + invoke_kwds=kwargs, + ) + return task_mgr.driver + except Exception as _e: + msg = ( + f"Could not find the task type {command_type}. Please ensure you have installed " + "the extension that provides the node type." + ) + raise Exception(msg) from _e diff --git a/magnus/utils.py b/runnable/utils.py similarity index 91% rename from magnus/utils.py rename to runnable/utils.py index b00c212e..d45e6ac4 100644 --- a/magnus/utils.py +++ b/runnable/utils.py @@ -14,13 +14,13 @@ from ruamel.yaml import YAML from stevedore import driver -import magnus.context as context -from magnus import defaults, names -from magnus.defaults import TypeMapVariable +import runnable.context as context +from runnable import defaults, names +from runnable.defaults import TypeMapVariable if TYPE_CHECKING: # pragma: no cover - from magnus.extensions.nodes import TaskNode - from magnus.nodes import BaseNode + from runnable.extensions.nodes import TaskNode + from runnable.nodes import BaseNode logger = logging.getLogger(defaults.LOGGER_NAME) @@ -281,10 +281,10 @@ def get_git_code_identity(): """Returns a code identity object for version controlled code. Args: - run_log_store (magnus.datastore.BaseRunLogStore): The run log store used in this process + run_log_store (runnable.datastore.BaseRunLogStore): The run log store used in this process Returns: - magnus.datastore.CodeIdentity: The code identity used by the run log store. + runnable.datastore.CodeIdentity: The code identity used by the run log store. """ code_identity = context.run_context.run_log_store.create_code_identity() try: @@ -316,7 +316,7 @@ def remove_prefix(text: str, prefix: str) -> str: def get_tracked_data() -> Dict[str, str]: - """Scans the environment variables to find any user tracked variables that have a prefix MAGNUS_TRACK_ + """Scans the environment variables to find any user tracked variables that have a prefix runnable_TRACK_ Removes the environment variable to prevent any clashes in the future steps. Returns: @@ -412,7 +412,7 @@ def get_node_execution_command( log_level = logging.getLevelName(logger.getEffectiveLevel()) - action = f"magnus execute_single_node {run_id} " f"{node._command_friendly_name()}" f" --log-level {log_level}" + action = f"runnable execute_single_node {run_id} " f"{node._command_friendly_name()}" f" --log-level {log_level}" if context.run_context.pipeline_file: action = action + f" --file {context.run_context.pipeline_file}" @@ -453,7 +453,7 @@ def get_fan_command( """ log_level = logging.getLevelName(logger.getEffectiveLevel()) action = ( - f"magnus fan {run_id} " + f"runnable fan {run_id} " f"{node._command_friendly_name()} " f"--mode {mode} " f"--file {context.run_context.pipeline_file} " @@ -497,7 +497,7 @@ def get_job_execution_command(node: TaskNode, over_write_run_id: str = "") -> st cli_command, cli_options = node.executable.get_cli_options() - action = f"magnus execute_{cli_command} {run_id} " f" --log-level {log_level}" + action = f"runnable execute_{cli_command} {run_id} " f" --log-level {log_level}" action = action + f" --entrypoint {defaults.ENTRYPOINT.SYSTEM.value}" @@ -538,7 +538,7 @@ def get_provider_by_name_and_type(service_type: str, service_details: defaults.S if "config" in service_details: service_config = service_details.get("config", {}) - logger.info(f"Trying to get a service of {service_type} of the name {service_name} with config: {service_config}") + logger.debug(f"Trying to get a service of {service_type} of the name {service_name} with config: {service_config}") try: mgr = driver.DriverManager( namespace=namespace, @@ -548,24 +548,10 @@ def get_provider_by_name_and_type(service_type: str, service_details: defaults.S ) return mgr.driver except Exception as _e: + logger.exception(f"Could not find the service of type: {service_type} with config: {service_details}") raise Exception(f"Could not find the service of type: {service_type} with config: {service_details}") from _e -def get_duration_between_datetime_strings(start_time: str, end_time: str) -> str: - """Given two datetime strings, compute the duration between them. - - Args: - start_time (str): ISO format datetime string - end_time (str): ISO format datetime string - Returns: - The duration between the time in string format - """ - start = datetime.fromisoformat(start_time) - end = datetime.fromisoformat(end_time) - - return str(end - start) - - def get_run_config() -> dict: """Given an executor with assigned services, return the run_config. @@ -595,8 +581,8 @@ def json_to_ordered_dict(json_str: str) -> TypeMapVariable: return OrderedDict() -def set_magnus_environment_variables(run_id: str = "", configuration_file: str = "", tag: str = "") -> None: - """Set the environment variables used by magnus. This function should be called during the prepare configurations +def set_runnable_environment_variables(run_id: str = "", configuration_file: str = "", tag: str = "") -> None: + """Set the environment variables used by runnable. This function should be called during the prepare configurations by all executors. Args: @@ -608,14 +594,14 @@ def set_magnus_environment_variables(run_id: str = "", configuration_file: str = os.environ[defaults.ENV_RUN_ID] = run_id if configuration_file: - os.environ[defaults.MAGNUS_CONFIG_FILE] = configuration_file + os.environ[defaults.RUNNABLE_CONFIG_FILE] = configuration_file if tag: - os.environ[defaults.MAGNUS_RUN_TAG] = tag + os.environ[defaults.RUNNABLE_RUN_TAG] = tag def gather_variables() -> dict: - """Gather all the environment variables used by magnus. All the variables start with MAGNUS_VAR_. + """Gather all the environment variables used by runnable. All the variables start with runnable_VAR_. Returns: dict: All the environment variables present in the environment. diff --git a/scripts/update_version.py b/scripts/update_version.py index a67a007f..10a1b3d8 100644 --- a/scripts/update_version.py +++ b/scripts/update_version.py @@ -3,9 +3,7 @@ from tomlkit import dump, load if len(sys.argv) < 2: - msg = ( - "Usage: python upload_version " - ) + msg = "Usage: python upload_version " new_version = sys.argv[1] output_file = "pyproject.toml" @@ -13,6 +11,6 @@ output_file = sys.argv[2] toml_file = "pyproject.toml" -contents = load(open(toml_file, 'rb')) +contents = load(open(toml_file, "rb")) contents["tool"]["poetry"]["version"] = str(new_version) -dump(contents, open(output_file, 'w')) +dump(contents, open(output_file, "w")) diff --git a/tests/conftest.py b/tests/conftest.py index 3b9fd1cd..5d4e4fa3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,10 +2,10 @@ @fixture(scope="session", autouse=True) -def magnus_log(): +def runnable_log(): import logging - logger = logging.getLogger("magnus") + logger = logging.getLogger("runnable") logger.setLevel(logging.WARNING) logger.propagate = True yield logger diff --git a/tests/magnus/extensions/experiment_tracker/test_mlflow.py b/tests/magnus/extensions/experiment_tracker/test_mlflow.py deleted file mode 100644 index f972af4e..00000000 --- a/tests/magnus/extensions/experiment_tracker/test_mlflow.py +++ /dev/null @@ -1 +0,0 @@ -from magnus.extensions.experiment_tracker.mlflow import implementation diff --git a/tests/magnus/extensions/run_log_store/__init__.py b/tests/magnus/extensions/run_log_store/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/magnus/extensions/run_log_store/test_generic_chunked.py b/tests/magnus/extensions/run_log_store/test_generic_chunked.py deleted file mode 100644 index 3f4d4335..00000000 --- a/tests/magnus/extensions/run_log_store/test_generic_chunked.py +++ /dev/null @@ -1 +0,0 @@ -from magnus.extensions.run_log_store import generic_chunked diff --git a/tests/magnus/test_experiment_tracker.py b/tests/magnus/test_experiment_tracker.py deleted file mode 100644 index acdfdd2e..00000000 --- a/tests/magnus/test_experiment_tracker.py +++ /dev/null @@ -1,36 +0,0 @@ -import pytest -import contextlib - -from magnus import experiment_tracker - - -@pytest.fixture(autouse=True) -def instantiable_base_class(monkeypatch): - monkeypatch.setattr(experiment_tracker.BaseExperimentTracker, "__abstractmethods__", set()) - yield - - -def test_base_run_log_store_context_property(mocker, monkeypatch, instantiable_base_class): - mock_run_context = mocker.Mock() - - monkeypatch.setattr(experiment_tracker.context, "run_context", mock_run_context) - - assert experiment_tracker.BaseExperimentTracker()._context == mock_run_context - - -def test_client_connection_is_null_context(): - ep = experiment_tracker.BaseExperimentTracker() - - assert isinstance(ep.client_context, contextlib.nullcontext) - - -def test_do_nothing_experiment_tracker_log_metric_does_nothing(): - ep = experiment_tracker.DoNothingTracker() - - ep.log_metric(key="foo", value=3.41) - - -def test_do_nothing_experiment_tracker_log_parmeter_does_nothing(): - ep = experiment_tracker.DoNothingTracker() - - ep.log_parameter(key="foo", value="bar") diff --git a/tests/magnus/test_interaction.py b/tests/magnus/test_interaction.py deleted file mode 100644 index 6b54f6d7..00000000 --- a/tests/magnus/test_interaction.py +++ /dev/null @@ -1,272 +0,0 @@ -import os -import json -import logging - -import pytest - -from magnus import ( - defaults, # pylint: disable=import-error - exceptions, # pylint: disable=import-error - interaction, # pylint: disable=import-error -) - - -@pytest.fixture(autouse=True) -def mock_context(monkeypatch, mocker, request): - if "noautofixt" in request.keywords: - yield None - return - mc = mocker.MagicMock() - monkeypatch.setattr(interaction, "context", mc) - yield - - -def test_track_this_adds_values_to_environ(): - interaction.track_this(a="b") - assert defaults.TRACK_PREFIX + "a" + f"{defaults.STEP_INDICATOR}0" in os.environ - del os.environ[defaults.TRACK_PREFIX + "a" + f"{defaults.STEP_INDICATOR}0"] - - -def test_track_this_adds_multiple_values_to_environ(): - interaction.track_this(a="b", b="a") - assert defaults.TRACK_PREFIX + "a" + f"{defaults.STEP_INDICATOR}0" in os.environ - assert defaults.TRACK_PREFIX + "b" + f"{defaults.STEP_INDICATOR}0" in os.environ - del os.environ[defaults.TRACK_PREFIX + "a" + f"{defaults.STEP_INDICATOR}0"] - del os.environ[defaults.TRACK_PREFIX + "b" + f"{defaults.STEP_INDICATOR}0"] - - -def test_track_this_adds_step_if_non_zero(): - interaction.track_this(a="b", b="a", step=1) - assert defaults.TRACK_PREFIX + "a" f"{defaults.STEP_INDICATOR}1" in os.environ - assert defaults.TRACK_PREFIX + "b" + f"{defaults.STEP_INDICATOR}1" in os.environ - del os.environ[defaults.TRACK_PREFIX + "a" + f"{defaults.STEP_INDICATOR}1"] - del os.environ[defaults.TRACK_PREFIX + "b" + f"{defaults.STEP_INDICATOR}1"] - - -def test_store_paramenter_adds_values_to_environ(): - interaction.set_parameter(a="b") - assert defaults.PARAMETER_PREFIX + "a" in os.environ - del os.environ[defaults.PARAMETER_PREFIX + "a"] - - -def test_store_parameter_adds_multiple_values_to_environ(): - interaction.set_parameter(a="b", b="a") - assert defaults.PARAMETER_PREFIX + "a" in os.environ - assert defaults.PARAMETER_PREFIX + "b" in os.environ - del os.environ[defaults.PARAMETER_PREFIX + "a"] - del os.environ[defaults.PARAMETER_PREFIX + "b"] - - -def test_store_parameter_updates_if_present_and_asked(): - os.environ[defaults.PARAMETER_PREFIX + "a"] = "b" - os.environ[defaults.PARAMETER_PREFIX + "b"] = "a" - interaction.set_parameter(a="c", b="d") - assert json.loads(os.environ[defaults.PARAMETER_PREFIX + "a"]) == "c" - assert json.loads(os.environ[defaults.PARAMETER_PREFIX + "b"]) == "d" - - del os.environ[defaults.PARAMETER_PREFIX + "a"] - del os.environ[defaults.PARAMETER_PREFIX + "b"] - - -def test_get_parameter_returns_all_parameters_if_no_key_provided(monkeypatch, mocker): - monkeypatch.setattr(interaction.parameters, "get_user_set_parameters", mocker.MagicMock(return_value="this")) - - assert interaction.get_parameter() == "this" - - -def test_get_parameter_returns_parameters_if_provided(mocker, monkeypatch): - monkeypatch.setattr( - interaction.parameters, "get_user_set_parameters", mocker.MagicMock(return_value={"this": "that"}) - ) - - assert interaction.get_parameter("this") == "that" - - -def test_get_parameter_returns_parameters_raises_exception_if_key_not_found(mocker, monkeypatch): - monkeypatch.setattr( - interaction.parameters, "get_user_set_parameters", mocker.MagicMock(return_value={"this": "that"}) - ) - - with pytest.raises(Exception): - interaction.get_parameter("this1") - - -def test_get_secret_delegates_to_secrets_handler_get(mocker, monkeypatch): - mock_context = mocker.MagicMock() - mock_secrets_handler = mocker.MagicMock() - - mock_context.run_context.secrets_handler = mock_secrets_handler - - monkeypatch.setattr(interaction, "context", mock_context) - - mock_secrets_handler.get.return_value = "test" - - assert interaction.get_secret("secret") == "test" - - -def test_get_secret_raises_exception_if_secrets_handler_raises(mocker, monkeypatch): - mock_context = mocker.MagicMock() - mock_secrets_handler = mocker.MagicMock() - - mock_context.run_context.secrets_handler = mock_secrets_handler - - monkeypatch.setattr(interaction, "context", mock_context) - - mock_secrets_handler.get.side_effect = exceptions.SecretNotFoundError("test", "test") - with pytest.raises(exceptions.SecretNotFoundError): - assert interaction.get_secret("secret") - - -def test_get_from_catalog_delegates_to_catalog_handler(mocker, monkeypatch): - mock_context = mocker.MagicMock() - mock_catalog_handler = mocker.MagicMock() - - mock_context.run_context.catalog_handler = mock_catalog_handler - - mock_catalog_handler_get = mocker.MagicMock() - mock_catalog_handler.get = mock_catalog_handler_get - mock_context.run_context.run_id = "RUN_ID" - - mock_catalog_handler.compute_data_folder = "compute_folder" - monkeypatch.setattr(interaction, "context", mock_context) - - interaction.get_from_catalog("this") - - mock_catalog_handler_get.assert_called_once_with("this", run_id="RUN_ID") - - -def test_get_from_catalog_uses_destination_folder(mocker, monkeypatch): - mock_context = mocker.MagicMock() - mock_catalog_handler = mocker.MagicMock() - - mock_context.run_context.catalog_handler = mock_catalog_handler - - mock_catalog_handler_get = mocker.MagicMock() - mock_catalog_handler.get = mock_catalog_handler_get - mock_context.run_context.run_id = "RUN_ID" - - mock_catalog_handler.compute_data_folder = "compute_folder" - monkeypatch.setattr(interaction, "context", mock_context) - - interaction.get_from_catalog("this", destination_folder="use_this_folder") - - mock_catalog_handler_get.assert_called_once_with("this", run_id="RUN_ID") - - -def test_get_from_catalog_raises_warning_if_no_context_step_log(mocker, monkeypatch, caplog): - mock_context = mocker.MagicMock() - mock_catalog_handler = mocker.MagicMock() - - mock_context.run_context.catalog_handler = mock_catalog_handler - mock_context.run_context.executor._context_step_log = None - - mock_catalog_handler_get = mocker.MagicMock() - mock_catalog_handler.get = mock_catalog_handler_get - mock_context.run_context.run_id = "RUN_ID" - - mock_catalog_handler.compute_data_folder = "compute_folder" - monkeypatch.setattr(interaction, "context", mock_context) - - with caplog.at_level(logging.WARNING, logger="magnus"): - interaction.get_from_catalog("this") - - assert "Step log context was not found during interaction" in caplog.text - - mock_catalog_handler_get.assert_called_once_with("this", run_id="RUN_ID") - - -@pytest.mark.noautofixt -def test_get_run_id_returns_from_context(monkeypatch, mocker): - mock_context = mocker.MagicMock() - mock_context.run_context.run_id = "1234" - monkeypatch.setattr(interaction, "context", mock_context) - - assert interaction.get_run_id() == "1234" - - -@pytest.mark.noautofixt -def test_get_tag_raises_exception_if_no_executor(monkeypatch, mocker): - mock_context = mocker.MagicMock() - mock_context.run_context.executor = None - monkeypatch.setattr(interaction, "context", mock_context) - - with pytest.raises(Exception, match="Please raise a bug report"): - assert interaction.get_tag() == "1234" - - -@pytest.mark.noautofixt -def test_get_tag_gets_tag_from_context(monkeypatch, mocker): - mock_context = mocker.MagicMock() - mock_context.run_context.tag = "1234" - monkeypatch.setattr(interaction, "context", mock_context) - - assert interaction.get_tag() == "1234" - - -@pytest.mark.noautofixt -def test_get_experiment_context_raises_exception_if_no_executor(monkeypatch, mocker): - mock_context = mocker.MagicMock() - mock_context.run_context.executor = None - monkeypatch.setattr(interaction, "context", mock_context) - - with pytest.raises(Exception, match="Please raise a bug report"): - interaction.get_experiment_tracker_context() - - -@pytest.mark.noautofixt -def test_get_experiment_context_returns_client_context(monkeypatch, mocker): - mock_context = mocker.MagicMock() - mock_experiment_tracker = mocker.MagicMock() - mock_client_context = mocker.MagicMock() - - mock_experiment_tracker.client_context = mock_client_context - - mock_context.run_context.experiment_tracker = mock_experiment_tracker - monkeypatch.setattr(interaction, "context", mock_context) - - assert interaction.get_experiment_tracker_context() == mock_client_context - - -def test_put_object_calls_put_in_catalog(monkeypatch, mocker): - mock_dump = mocker.MagicMock() - mock_put_in_catalog = mocker.MagicMock() - mock_os_remove = mocker.MagicMock() - - monkeypatch.setattr(interaction, "put_in_catalog", mock_put_in_catalog) - monkeypatch.setattr(interaction.pickler.NativePickler, "dump", mock_dump) - monkeypatch.setattr(interaction.os, "remove", mock_os_remove) - - interaction.put_object("imdata", "iamsam") - - mock_dump.assert_called_once_with(data="imdata", path="iamsam") - mock_put_in_catalog.assert_called_once_with(f"iamsam.pickle") - mock_os_remove.assert_called_once_with(f"iamsam.pickle") - - -def test_get_object_calls_get_from_catalog(monkeypatch, mocker): - mock_load = mocker.MagicMock() - mock_get_from_catalog = mocker.MagicMock() - mock_os_remove = mocker.MagicMock() - - monkeypatch.setattr(interaction, "get_from_catalog", mock_get_from_catalog) - monkeypatch.setattr(interaction.pickler.NativePickler, "load", mock_load) - monkeypatch.setattr(interaction.os, "remove", mock_os_remove) - - interaction.get_object("iamsam") - - mock_load.assert_called_once_with("iamsam") - mock_get_from_catalog.assert_called_once_with(name="iamsam.pickle", destination_folder=".") - mock_os_remove.assert_called_once_with("iamsam.pickle") - - -def test_get_object_raises_exception_if_file_not_found(monkeypatch, mocker): - mock_load = mocker.MagicMock(side_effect=FileNotFoundError()) - mock_get_from_catalog = mocker.MagicMock() - mock_os_remove = mocker.MagicMock() - - monkeypatch.setattr(interaction, "get_from_catalog", mock_get_from_catalog) - monkeypatch.setattr(interaction.pickler.NativePickler, "load", mock_load) - monkeypatch.setattr(interaction.os, "remove", mock_os_remove) - - with pytest.raises(FileNotFoundError): - interaction.get_object("iamsam") diff --git a/tests/magnus/test_parmeters.py b/tests/magnus/test_parmeters.py deleted file mode 100644 index 718443b4..00000000 --- a/tests/magnus/test_parmeters.py +++ /dev/null @@ -1,196 +0,0 @@ -import os -import logging - -import pytest - -from pydantic import BaseModel, ValidationError - -from magnus import defaults -from magnus.parameters import ( - get_user_set_parameters, - cast_parameters_as_type, - bind_args_for_pydantic_model, - filter_arguments_for_func, -) - - -def test_get_user_set_parameters_does_nothing_if_prefix_does_not_match(monkeypatch): - monkeypatch.setenv("random", "value") - - assert get_user_set_parameters() == {} - - -def test_get_user_set_parameters_returns_the_parameter_if_prefix_match_int(monkeypatch): - monkeypatch.setenv(defaults.PARAMETER_PREFIX + "key", "1") - - assert get_user_set_parameters() == {"key": 1} - - -def test_get_user_set_parameters_returns_the_parameter_if_prefix_match_string(monkeypatch): - monkeypatch.setenv(defaults.PARAMETER_PREFIX + "key", '"value"') - - assert get_user_set_parameters() == {"key": "value"} - - -def test_get_user_set_parameters_removes_the_parameter_if_prefix_match_remove(monkeypatch): - monkeypatch.setenv(defaults.PARAMETER_PREFIX + "key", "1") - - assert defaults.PARAMETER_PREFIX + "key" in os.environ - - get_user_set_parameters(remove=True) - - assert defaults.PARAMETER_PREFIX + "key" not in os.environ - - -def test_cast_parameters_as_type_with_pydantic_model(): - class MyModel(BaseModel): - a: int - b: str - - value = {"a": 1, "b": "test"} - cast_value = cast_parameters_as_type(value, MyModel) - - assert isinstance(cast_value, MyModel) - assert cast_value.a == 1 - assert cast_value.b == "test" - - -def test_cast_parameters_as_type_with_dict(): - value = {"a": 1, "b": "test"} - cast_value = cast_parameters_as_type(value, dict) - - assert isinstance(cast_value, dict) - assert cast_value == value - - -def test_cast_parameters_as_type_with_non_special_type(): - value = "1" - cast_value = cast_parameters_as_type(value, int) - - assert isinstance(cast_value, int) - assert cast_value == 1 - - -def test_cast_parameters_as_type_with_none(): - value = None - cast_value = cast_parameters_as_type(value, None) - - assert cast_value is None - - -def test_cast_parameters_as_type_with_invalid_value(): - class MyModel(BaseModel): - a: int - - value = {"a": "test"} - with pytest.raises(ValidationError): - cast_parameters_as_type(value, MyModel) - - -def test_cast_parameters_as_type_with_invalid_type(caplog): - value = "test" - with caplog.at_level(logging.WARNING, logger="magnus"): - cast_parameters_as_type(value, list) - - assert f"Casting {value} of {type(value)} to {list} seems wrong!!" in caplog.text - - -def test_bind_args_for_pydantic_model_with_correct_params(): - class MyModel(BaseModel): - a: int - b: str - - params = {"a": 1, "b": "test"} - bound_model = bind_args_for_pydantic_model(params, MyModel) - - assert isinstance(bound_model, MyModel) - assert bound_model.a == 1 - assert bound_model.b == "test" - - -def test_bind_args_for_pydantic_model_with_extra_params(): - class MyModel(BaseModel): - a: int - b: str - - params = {"a": 1, "b": "test", "c": 2} - bound_model = bind_args_for_pydantic_model(params, MyModel) - - assert isinstance(bound_model, MyModel) - assert bound_model.a == 1 - assert bound_model.b == "test" - - -def test_bind_args_for_pydantic_model_with_missing_params(): - class MyModel(BaseModel): - a: int - b: str - - params = {"a": 1} - with pytest.raises(ValidationError): - bind_args_for_pydantic_model(params, MyModel) - - -def test_filter_arguments_for_func_with_simple_arguments(): - def func(a: int, b: str): - pass - - params = {"a": 1, "b": "test"} - bound_args = filter_arguments_for_func(func, params) - - assert bound_args == {"a": 1, "b": "test"} - - -def test_filter_arguments_for_func_with_pydantic_model_arguments(): - class MyModel(BaseModel): - a: int - b: str - - def func(inner: MyModel, c: str): - pass - - params = {"inner": {"a": 1, "b": "test"}, "c": "test"} - bound_args = filter_arguments_for_func(func, params) - - assert bound_args == {"inner": MyModel(a=1, b="test"), "c": "test"} - - -def test_filter_arguments_for_func_with_missing_arguments_but_defaults_present(): - class MyModel(BaseModel): - a: int - b: str - - def func(inner: MyModel, c: str = "test"): - pass - - params = {"inner": {"a": 1, "b": "test"}} - bound_args = filter_arguments_for_func(func, params) - - assert bound_args == {"inner": MyModel(a=1, b="test")} - - -def test_filter_arguments_for_func_with_missing_arguments_and_no_defaults(): - class MyModel(BaseModel): - a: int - b: str - - def func(inner: MyModel, c: str): - pass - - params = {"inner": {"a": 1, "b": "test"}} - with pytest.raises(ValueError, match=r"Parameter c is required for func but not provided"): - _ = filter_arguments_for_func(func, params) - - -def test_filter_arguments_for_func_with_map_variable_sent_in(): - class MyModel(BaseModel): - a: int - b: str - - params = {"inner": {"a": 1, "b": "test"}} - - def func(inner: MyModel, first: int, second: str): - pass - - bound_args = filter_arguments_for_func(func, params, map_variable={"first": 1, "second": "test"}) - assert bound_args == {"inner": MyModel(a=1, b="test"), "first": 1, "second": "test"} diff --git a/tests/magnus/test_tasks.py b/tests/magnus/test_tasks.py deleted file mode 100644 index fb33c9fc..00000000 --- a/tests/magnus/test_tasks.py +++ /dev/null @@ -1,226 +0,0 @@ -import contextlib -import os - -import pytest -from pydantic import BaseModel - -from magnus import defaults, tasks - - -@pytest.fixture -def configuration(): - return {"node_name": "dummy", "task_type": "dummy"} - - -def test_base_task_execute_command_raises_not_implemented_error(configuration): - base_execution_type = tasks.BaseTaskType(**configuration) - - with pytest.raises(NotImplementedError): - base_execution_type.execute_command() - - -def test_base_task_get_parameters_gets_from_utils(mocker, monkeypatch, configuration): - mock_get_user_set_parameters = mocker.MagicMock(configuration) - - monkeypatch.setattr(tasks.parameters, "get_user_set_parameters", mock_get_user_set_parameters) - - base_execution_type = tasks.BaseTaskType(**configuration) - - base_execution_type._get_parameters() - mock_get_user_set_parameters.assert_called_once_with(remove=False) - - -def test_base_task_set_parameters_does_nothing_if_no_parameters_sent(configuration): - base_execution_type = tasks.BaseTaskType(**configuration) - base_execution_type._set_parameters(params={}) - - -def test_base_task_set_parameters_sets_environ_vars_if_sent( - mocker, - monkeypatch, - configuration, -): - mock_os_environ = {} - - monkeypatch.setattr(tasks.os, "environ", mock_os_environ) - - base_execution_type = tasks.BaseTaskType(**configuration) - - class Parameter(BaseModel): - x: int = 10 - - base_execution_type._set_parameters(Parameter()) - - assert mock_os_environ[defaults.PARAMETER_PREFIX + "x"] == "10" - - -def test_python_task_command_raises_exception_if_function_fails(mocker, monkeypatch, configuration): - dummy_func = mocker.MagicMock(side_effect=Exception()) - - class DummyModule: - def __init__(self): - self.func = dummy_func - - monkeypatch.setattr(tasks.utils, "get_module_and_attr_names", mocker.MagicMock(return_value=("idk", "func"))) - monkeypatch.setattr(tasks.importlib, "import_module", mocker.MagicMock(return_value=DummyModule())) - - monkeypatch.setattr(tasks.BaseTaskType, "output_to_file", mocker.MagicMock(return_value=contextlib.nullcontext())) - - monkeypatch.setattr(tasks.parameters, "filter_arguments_for_func", mocker.MagicMock(return_value={"a": 1})) - - configuration["command"] = "dummy" - py_exec = tasks.PythonTaskType(**configuration) - with pytest.raises(Exception): - py_exec.execute_command() - - -def test_python_task_command_calls_with_no_parameters_if_none_sent(mocker, monkeypatch, configuration): - dummy_func = mocker.MagicMock(return_value=None) - - class DummyModule: - def __init__(self): - self.func = dummy_func - - monkeypatch.setattr(tasks.utils, "get_module_and_attr_names", mocker.MagicMock(return_value=("idk", "func"))) - monkeypatch.setattr(tasks.importlib, "import_module", mocker.MagicMock(return_value=DummyModule())) - monkeypatch.setattr(tasks.BaseTaskType, "output_to_file", mocker.MagicMock(return_value=contextlib.nullcontext())) - monkeypatch.setattr(tasks.parameters, "filter_arguments_for_func", mocker.MagicMock(return_value={})) - - configuration["command"] = "dummy" - py_exec = tasks.PythonTaskType(**configuration) - - py_exec.execute_command() - dummy_func.assert_called_once() - - -def test_python_task_command_calls_with_parameters_if_sent_by_filter(mocker, monkeypatch, configuration): - dummy_func = mocker.MagicMock(return_value=None) - - class DummyModule: - def __init__(self): - self.func = dummy_func - - monkeypatch.setattr(tasks.utils, "get_module_and_attr_names", mocker.MagicMock(return_value=("idk", "func"))) - monkeypatch.setattr(tasks.importlib, "import_module", mocker.MagicMock(return_value=DummyModule())) - - monkeypatch.setattr(tasks.BaseTaskType, "output_to_file", mocker.MagicMock(return_value=contextlib.nullcontext())) - monkeypatch.setattr(tasks.parameters, "filter_arguments_for_func", mocker.MagicMock(return_value={"a": 1})) - - configuration["command"] = "dummy" - py_exec = tasks.PythonTaskType(**configuration) - py_exec.execute_command() - dummy_func.assert_called_once_with(a=1) - - -def test_python_task_command_sends_no_mapped_variable_if_not_present_in_signature(mocker, monkeypatch, configuration): - dummy_func = mocker.MagicMock(return_value=None) - - class DummyModule: - def __init__(self): - self.func = dummy_func - - monkeypatch.setattr(tasks.utils, "get_module_and_attr_names", mocker.MagicMock(return_value=("idk", "func"))) - monkeypatch.setattr(tasks.importlib, "import_module", mocker.MagicMock(return_value=DummyModule())) - monkeypatch.setattr(tasks.BaseTaskType, "output_to_file", mocker.MagicMock(return_value=contextlib.nullcontext())) - monkeypatch.setattr(tasks.parameters, "filter_arguments_for_func", mocker.MagicMock(return_value={"a": 1})) - - configuration["command"] = "dummy" - py_exec = tasks.PythonTaskType(**configuration) - py_exec.execute_command(map_variable={"map_name": "map_value"}) - dummy_func.assert_called_once_with(a=1) - - -def test_python_task_command_sends_mapped_variable_if_present_in_signature(mocker, monkeypatch, configuration): - dummy_func = mocker.MagicMock(return_value=None) - - class DummyModule: - def __init__(self): - self.func = dummy_func - - monkeypatch.setattr(tasks.utils, "get_module_and_attr_names", mocker.MagicMock(return_value=("idk", "func"))) - monkeypatch.setattr(tasks.importlib, "import_module", mocker.MagicMock(return_value=DummyModule())) - monkeypatch.setattr(tasks.BaseTaskType, "output_to_file", mocker.MagicMock(return_value=contextlib.nullcontext())) - monkeypatch.setattr( - tasks.parameters, "filter_arguments_for_func", mocker.MagicMock(return_value={"a": 1, "map_name": "map_value"}) - ) - - configuration["command"] = "dummy" - py_exec = tasks.PythonTaskType(**configuration) - py_exec.execute_command() - dummy_func.assert_called_once_with(a=1, map_name="map_value") - - -def test_python_task_command_sets_env_variable_of_return_values(mocker, monkeypatch, configuration): - class Parameter(BaseModel): - a: int = 10 - - dummy_func = mocker.MagicMock(return_value=Parameter()) - - class DummyModule: - def __init__(self): - self.func = dummy_func - - monkeypatch.setattr(tasks.utils, "get_module_and_attr_names", mocker.MagicMock(return_value=("idk", "func"))) - monkeypatch.setattr(tasks.importlib, "import_module", mocker.MagicMock(return_value=DummyModule())) - monkeypatch.setattr(tasks.BaseTaskType, "output_to_file", mocker.MagicMock(return_value=contextlib.nullcontext())) - monkeypatch.setattr(tasks.parameters, "filter_arguments_for_func", mocker.MagicMock(return_value={"a": 1})) - - configuration["command"] = "dummy" - py_exec = tasks.PythonTaskType(**configuration) - py_exec.execute_command(map_variable="iterme") - - assert defaults.PARAMETER_PREFIX + "a" in os.environ - assert os.environ[defaults.PARAMETER_PREFIX + "a"] == "10" - - del os.environ[defaults.PARAMETER_PREFIX + "a"] - - -def test_notebook_raises_exception_if_command_is_not_a_notebook(): - with pytest.raises(Exception): - tasks.NotebookTaskType(command="path to notebook") - - -def test_notebook_raises_exception_if_ploomber_is_not_installed(mocker, monkeypatch): - task_exec = tasks.NotebookTaskType(command="test.ipynb", node_name="dummy") - - with pytest.raises(Exception): - task_exec.execute_command() - - -def test_shell_task_type_can_gather_env_vars_on_return(mocker, monkeypatch): - mock_set_params = mocker.MagicMock() - mock_output_to_file = mocker.MagicMock() - monkeypatch.setattr(tasks.ShellTaskType, "_set_parameters", mock_set_params) - monkeypatch.setattr(tasks.ShellTaskType, "output_to_file", mock_output_to_file) - - shell_task = tasks.ShellTaskType(command="export MAGNUS_PRM_x=1", node_name="dummy") - - shell_task.execute_command() - - assert mock_set_params.call_count == 1 - - _, kwargs = mock_set_params.call_args - assert kwargs["params"] == tasks.EasyModel(x="1") - - -class ParamModel(BaseModel): - x: int - - -def test_shell_task_type_can_gather_env_vars_on_return(mocker, monkeypatch): - mock_set_params = mocker.MagicMock() - mock_output_to_file = mocker.MagicMock() - monkeypatch.setattr(tasks.ShellTaskType, "_set_parameters", mock_set_params) - monkeypatch.setattr(tasks.ShellTaskType, "output_to_file", mock_output_to_file) - - shell_task = tasks.ShellTaskType( - command="export MAGNUS_PRM_x=1", - node_name="dummy", - ) - - shell_task.execute_command() - - assert mock_set_params.call_count == 1 - - _, kwargs = mock_set_params.call_args - assert kwargs["params"].x == 1 diff --git a/magnus/extensions/secrets/env_secrets/__init__.py b/tests/runnable/extensions/__init__.py similarity index 100% rename from magnus/extensions/secrets/env_secrets/__init__.py rename to tests/runnable/extensions/__init__.py diff --git a/tests/magnus/extensions/catalog/test_catalog_extension.py b/tests/runnable/extensions/catalog/test_catalog_extension.py similarity index 96% rename from tests/magnus/extensions/catalog/test_catalog_extension.py rename to tests/runnable/extensions/catalog/test_catalog_extension.py index d6389f15..b9204269 100644 --- a/tests/magnus/extensions/catalog/test_catalog_extension.py +++ b/tests/runnable/extensions/catalog/test_catalog_extension.py @@ -1,4 +1,4 @@ -from magnus.extensions.catalog import is_catalog_out_of_sync +from runnable.extensions.catalog import is_catalog_out_of_sync def test_is_catalog_out_of_sync_returns_true_for_empty_synced_catalogs(): diff --git a/tests/magnus/extensions/catalog/test_file_system.py b/tests/runnable/extensions/catalog/test_file_system.py similarity index 96% rename from tests/magnus/extensions/catalog/test_file_system.py rename to tests/runnable/extensions/catalog/test_file_system.py index b12f5261..40c03c90 100644 --- a/tests/magnus/extensions/catalog/test_file_system.py +++ b/tests/runnable/extensions/catalog/test_file_system.py @@ -2,9 +2,9 @@ import tempfile import os -from magnus import defaults -from magnus.extensions.catalog.file_system.implementation import FileSystemCatalog -import magnus.extensions.catalog.file_system.implementation as implementation +from runnable import defaults +from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog +import runnable.extensions.catalog.file_system.implementation as implementation def test_file_system_catalog_inits_default_values_if_none_config(): @@ -44,7 +44,7 @@ def test_file_system_catalog_get_copies_files_from_catalog_to_compute_folder_wit mock_context.run_log_store = mock_run_store mocker.patch( - "magnus.catalog.BaseCatalog._context", + "runnable.catalog.BaseCatalog._context", new_callable=mocker.PropertyMock, return_value=mock_context, ) @@ -73,7 +73,7 @@ def test_file_system_catalog_get_copies_files_from_catalog_to_compute_folder_wit mock_context.run_log_store = mock_run_store mocker.patch( - "magnus.catalog.BaseCatalog._context", + "runnable.catalog.BaseCatalog._context", new_callable=mocker.PropertyMock, return_value=mock_context, ) @@ -104,7 +104,7 @@ def test_file_system_catalog_put_copies_files_from_compute_folder_to_catalog_if_ mock_context.run_log_store = mock_run_store mocker.patch( - "magnus.catalog.BaseCatalog._context", + "runnable.catalog.BaseCatalog._context", new_callable=mocker.PropertyMock, return_value=mock_context, ) @@ -135,7 +135,7 @@ def test_file_system_catalog_put_copies_files_from_compute_folder_to_catalog_if_ mock_context.run_log_store = mock_run_store mocker.patch( - "magnus.catalog.BaseCatalog._context", + "runnable.catalog.BaseCatalog._context", new_callable=mocker.PropertyMock, return_value=mock_context, ) @@ -166,7 +166,7 @@ def test_file_system_catalog_put_copies_files_from_compute_folder_to_catalog_if_ mock_context.run_log_store = mock_run_store mocker.patch( - "magnus.catalog.BaseCatalog._context", + "runnable.catalog.BaseCatalog._context", new_callable=mocker.PropertyMock, return_value=mock_context, ) diff --git a/tests/magnus/extensions/catalog/test_k8s_pvc.py b/tests/runnable/extensions/catalog/test_k8s_pvc.py similarity index 77% rename from tests/magnus/extensions/catalog/test_k8s_pvc.py rename to tests/runnable/extensions/catalog/test_k8s_pvc.py index e951f357..e53c04a8 100644 --- a/tests/magnus/extensions/catalog/test_k8s_pvc.py +++ b/tests/runnable/extensions/catalog/test_k8s_pvc.py @@ -1,4 +1,4 @@ -from magnus.extensions.catalog.k8s_pvc.implementation import K8sPVCatalog +from runnable.extensions.catalog.k8s_pvc.implementation import K8sPVCatalog def test_get_catalog_location_returns_location_relative_to_mount_path(): diff --git a/tests/magnus/extensions/catalog/test_k8s_pvc_integration.py b/tests/runnable/extensions/catalog/test_k8s_pvc_integration.py similarity index 90% rename from tests/magnus/extensions/catalog/test_k8s_pvc_integration.py rename to tests/runnable/extensions/catalog/test_k8s_pvc_integration.py index 243fc7b0..b2cd0e13 100644 --- a/tests/magnus/extensions/catalog/test_k8s_pvc_integration.py +++ b/tests/runnable/extensions/catalog/test_k8s_pvc_integration.py @@ -1,6 +1,6 @@ import pytest -from magnus.extensions.catalog.k8s_pvc import integration +from runnable.extensions.catalog.k8s_pvc import integration def test_k8s_pvc_errors_for_local(): diff --git a/tests/magnus/extensions/executor/test_argo_executor.py b/tests/runnable/extensions/executor/test_argo_executor.py similarity index 98% rename from tests/magnus/extensions/executor/test_argo_executor.py rename to tests/runnable/extensions/executor/test_argo_executor.py index 3298e46a..4f6e831d 100644 --- a/tests/magnus/extensions/executor/test_argo_executor.py +++ b/tests/runnable/extensions/executor/test_argo_executor.py @@ -1,6 +1,6 @@ import pytest -from magnus.extensions.executor.argo import implementation +from runnable.extensions.executor.argo import implementation def test_secret_env_var_has_value_from_field(): diff --git a/tests/magnus/extensions/executor/test_generic_executor.py b/tests/runnable/extensions/executor/test_generic_executor.py similarity index 64% rename from tests/magnus/extensions/executor/test_generic_executor.py rename to tests/runnable/extensions/executor/test_generic_executor.py index f138f85b..db57fbda 100644 --- a/tests/magnus/extensions/executor/test_generic_executor.py +++ b/tests/runnable/extensions/executor/test_generic_executor.py @@ -1,10 +1,11 @@ -import pytest import logging -from magnus import defaults, exceptions -from magnus.extensions.executor import GenericExecutor -from magnus.extensions import executor -import magnus.extensions.executor as executor +import pytest + +import runnable.extensions.executor as executor +from runnable import defaults, exceptions +from runnable.extensions import executor +from runnable.extensions.executor import GenericExecutor @pytest.fixture(autouse=True) @@ -20,16 +21,6 @@ def mock_run_context(mocker, monkeypatch): return mock_run_context -def test_get_parameters_gets_parameters_from_parameters_file(mocker, monkeypatch, mock_run_context): - mock_run_context.parameters_file = "parameters_file" - mock_load_yaml = mocker.MagicMock(return_value={"executor": "test"}) - monkeypatch.setattr(executor.utils, "load_yaml", mock_load_yaml) - - test_executor = GenericExecutor() - assert test_executor._get_parameters() == {"executor": "test"} - mock_load_yaml.assert_called_once_with("parameters_file") - - def test_get_parameters_gets_parameters_from_user_parameters(mocker, monkeypatch, mock_run_context): mock_run_context.parameters_file = "" monkeypatch.setattr( @@ -55,63 +46,6 @@ def test_get_parameters_user_parameters_overwrites_parameters_from_parameters_fi assert test_executor._get_parameters() == {"executor": "that"} -def test_set_up_for_rerun_throws_exception_if_run_log_not_exists(mocker, monkeypatch, mock_run_context): - mock_run_log_store = mocker.MagicMock() - - mock_run_context.run_log_store = mock_run_log_store - mock_run_context.original_run_id = "original_run_id" - mock_run_log_store.get_run_log_by_id = mocker.MagicMock(side_effect=exceptions.RunLogNotFoundError("test")) - - with pytest.raises(Exception, match="Expected a run log with id: original_run_id"): - GenericExecutor()._set_up_for_re_run(parameters={}) - - -def test_set_up_for_re_run_syncs_catalog_and_parameters(mocker, monkeypatch, mock_run_context): - mock_catalog_handler_sync_between_runs = mocker.MagicMock() - mock_catalog_handler = mocker.MagicMock() - mock_catalog_handler.sync_between_runs = mock_catalog_handler_sync_between_runs - - mock_run_context.catalog_handler = mock_catalog_handler - mock_run_context.run_id = "run_id" - mock_run_context.original_run_id = "original_run_id" - - mock_attempt_run_log = mocker.MagicMock() - mock_attempt_run_log.parameters = {"ghost": "from past"} - - mock_run_log_store = mocker.MagicMock() - mock_run_log_store.get_run_log_by_id.return_value = mock_attempt_run_log - mock_run_context.run_log_store = mock_run_log_store - - parameters = {} - GenericExecutor()._set_up_for_re_run(parameters=parameters) - - mock_catalog_handler_sync_between_runs.assert_called_once_with(previous_run_id="original_run_id", run_id="run_id") - assert parameters == {"ghost": "from past"} - - -def test_set_up_for_re_run_syncs_catalog_and_updates_parameters(mocker, monkeypatch, mock_run_context): - mock_catalog_handler_sync_between_runs = mocker.MagicMock() - mock_catalog_handler = mocker.MagicMock() - mock_catalog_handler.sync_between_runs = mock_catalog_handler_sync_between_runs - - mock_run_context.catalog_handler = mock_catalog_handler - mock_run_context.run_id = "run_id" - mock_run_context.original_run_id = "original_run_id" - - mock_attempt_run_log = mocker.MagicMock() - mock_attempt_run_log.parameters = {"ghost": "from past"} - - mock_run_log_store = mocker.MagicMock() - mock_run_log_store.get_run_log_by_id.return_value = mock_attempt_run_log - mock_run_context.run_log_store = mock_run_log_store - - parameters = {"present": "now"} - GenericExecutor()._set_up_for_re_run(parameters=parameters) - - mock_catalog_handler_sync_between_runs.assert_called_once_with(previous_run_id="original_run_id", run_id="run_id") - assert parameters == {"present": "now", "ghost": "from past"} - - def test_set_up_run_log_throws_exception_if_run_log_already_exists(mocker, monkeypatch, mock_run_context): mock_run_log_store = mocker.MagicMock() @@ -139,22 +73,6 @@ def test_set_up_run_log_calls_get_parameters(mocker, monkeypatch, mock_run_conte assert mock_get_parameters.call_count == 1 -def test_set_up_run_log_calls_set_up_for_re_run(mocker, monkeypatch, mock_run_context): - mock_set_up_for_re_run = mocker.MagicMock() - monkeypatch.setattr(GenericExecutor, "_set_up_for_re_run", mock_set_up_for_re_run) - - mock_get_parameters = mocker.MagicMock() - monkeypatch.setattr(GenericExecutor, "_get_parameters", mock_get_parameters) - - mock_run_context.run_log_store.get_run_log_by_id = mocker.MagicMock( - side_effect=exceptions.RunLogNotFoundError("test") - ) - - GenericExecutor()._set_up_run_log() - - assert mock_set_up_for_re_run.call_count == 1 - - def test_set_up_run_log_calls_create_run_log(mocker, monkeypatch, mock_run_context): mock_get_parameters = mocker.MagicMock() monkeypatch.setattr(GenericExecutor, "_get_parameters", mock_get_parameters) @@ -169,8 +87,6 @@ def test_set_up_run_log_calls_create_run_log(mocker, monkeypatch, mock_run_conte mock_run_context.run_id = "test" mock_run_context.tag = "tag" mock_run_context.dag_hash = "dag_hash" - mock_run_context.use_cached = False - mock_run_context.original_run_id = "original_run_id" GenericExecutor()._set_up_run_log() @@ -179,8 +95,6 @@ def test_set_up_run_log_calls_create_run_log(mocker, monkeypatch, mock_run_conte tag="tag", status=defaults.PROCESSING, dag_hash="dag_hash", - use_cached=False, - original_run_id="original_run_id", ) @@ -236,8 +150,8 @@ def test_base_executor_prepare_for_graph_execution_calls(mocker, monkeypatch, mo base_executor.prepare_for_graph_execution() - assert mock_configure_for_traversal.call_count == 4 - assert mock_validate.call_count == 4 + assert mock_configure_for_traversal.call_count == 3 + assert mock_validate.call_count == 3 def test_base_execution_prepare_for_node_calls(mocker, monkeypatch, mock_run_context): @@ -257,8 +171,8 @@ def test_base_execution_prepare_for_node_calls(mocker, monkeypatch, mock_run_con base_executor.prepare_for_node_execution() - assert mock_configure_for_execution.call_count == 4 - assert mock_validate.call_count == 4 + assert mock_configure_for_execution.call_count == 3 + assert mock_validate.call_count == 3 def test_base_executor__sync_catalog_raises_exception_if_stage_not_in_get_or_put(mocker, monkeypatch): @@ -274,7 +188,7 @@ def test_sync_catalog_does_nothing_for_terminal_node(mocker, monkeypatch, mock_r test_executor = GenericExecutor() test_executor._context_node = mock_node - test_executor._sync_catalog("test", stage="get") + test_executor._sync_catalog(stage="get") def test_sync_catalog_does_nothing_for_no_catalog_settings(mocker, monkeypatch, mock_run_context): @@ -284,7 +198,7 @@ def test_sync_catalog_does_nothing_for_no_catalog_settings(mocker, monkeypatch, test_executor = GenericExecutor() test_executor._context_node = mock_node - test_executor._sync_catalog("test", stage="get") + test_executor._sync_catalog(stage="get") def test_sync_catalog_does_nothing_for_catalog_settings_stage_not_in(mocker, monkeypatch, mock_run_context): @@ -294,7 +208,7 @@ def test_sync_catalog_does_nothing_for_catalog_settings_stage_not_in(mocker, mon test_executor = GenericExecutor() test_executor._context_node = mock_node - test_executor._sync_catalog("test", stage="put") + test_executor._sync_catalog(stage="put") def test_sync_catalog_returns_nothing_if_no_syncing_for_node(mocker, monkeypatch, mock_run_context): @@ -305,7 +219,7 @@ def test_sync_catalog_returns_nothing_if_no_syncing_for_node(mocker, monkeypatch test_executor = GenericExecutor() test_executor._context_node = mock_node - assert test_executor._sync_catalog("test", stage="get") is None + assert test_executor._sync_catalog(stage="get") is None def test_sync_catalog_returns_empty_list_if_asked_nothing_in_stage(mocker, monkeypatch, mock_run_context): @@ -318,8 +232,8 @@ def test_sync_catalog_returns_empty_list_if_asked_nothing_in_stage(mocker, monke test_executor = GenericExecutor() test_executor._context_node = mock_node - assert test_executor._sync_catalog("test", stage="get") == [] - assert test_executor._sync_catalog("test", stage="put") == [] + assert test_executor._sync_catalog(stage="get") == [] + assert test_executor._sync_catalog(stage="put") == [] def test_sync_catalog_calls_get_from_catalog_handler(mocker, monkeypatch, mock_run_context): @@ -338,7 +252,7 @@ def test_sync_catalog_calls_get_from_catalog_handler(mocker, monkeypatch, mock_r test_executor = GenericExecutor() test_executor._context_node = mock_node - data_catalogs = test_executor._sync_catalog(step_log=mock_step_log, stage="get") + data_catalogs = test_executor._sync_catalog(stage="get") assert data_catalogs == ["data_catalog"] mock_catalog_handler_get.assert_called_once_with(name="me", run_id="run_id", compute_data_folder="compute_folder") @@ -360,7 +274,7 @@ def test_sync_catalog_calls_get_from_catalog_handler_as_per_input(mocker, monkey test_executor = GenericExecutor() test_executor._context_node = mock_node - data_catalogs = test_executor._sync_catalog(step_log=mock_step_log, stage="get") + data_catalogs = test_executor._sync_catalog(stage="get") assert data_catalogs == ["data_catalog", "data_catalog"] assert mock_catalog_handler_get.call_count == 2 @@ -382,7 +296,7 @@ def test_sync_catalog_calls_put_from_catalog_handler(mocker, monkeypatch, mock_r test_executor = GenericExecutor() test_executor._context_node = mock_node - data_catalogs = test_executor._sync_catalog(step_log=mock_step_log, stage="put") + data_catalogs = test_executor._sync_catalog(stage="put") assert data_catalogs == ["data_catalog"] mock_catalog_handler_put.assert_called_once_with( @@ -406,7 +320,7 @@ def test_sync_catalog_calls_put_from_catalog_handler_as_per_input(mocker, monkey test_executor = GenericExecutor() test_executor._context_node = mock_node - data_catalogs = test_executor._sync_catalog(step_log=mock_step_log, stage="put") + data_catalogs = test_executor._sync_catalog(stage="put") assert data_catalogs == ["data_catalog", "data_catalog"] assert mock_catalog_handler_put.call_count == 2 @@ -428,7 +342,7 @@ def test_sync_catalog_calls_put_sends_synced_catalogs_to_catalog_handler(mocker, test_executor = GenericExecutor() test_executor._context_node = mock_node - data_catalogs = test_executor._sync_catalog(step_log=mock_step_log, stage="put", synced_catalogs="in_sync") + data_catalogs = test_executor._sync_catalog(stage="put", synced_catalogs="in_sync") assert data_catalogs == ["data_catalog"] mock_catalog_handler_put.assert_called_once_with( @@ -469,60 +383,11 @@ def test_step_attempt_returns_one_by_default(): def test_step_attempt_returns_from_env(monkeypatch): test_executor = GenericExecutor() - monkeypatch.setenv("MAGNUS_STEP_ATTEMPT", "2") + monkeypatch.setenv("RUNNABLE_STEP_ATTEMPT", "2") assert test_executor.step_attempt_number == 2 -def test_base_executor__is_step_eligible_for_rerun_returns_true_if_not_use_cached(mock_run_context): - test_executor = GenericExecutor() - - mock_run_context.use_cached = False - - assert test_executor._is_step_eligible_for_rerun(node=None) - - -def test_base_executor__is_step_eligible_for_rerun_returns_true_if_step_log_not_found(mocker, mock_run_context): - mock_run_context.use_cached = True - - mock_node = mocker.MagicMock() - mock_node._get_step_log_name.return_value = "IdontExist" - - mock_run_context.run_log_store.get_step_log.side_effect = exceptions.StepLogNotFoundError( - run_id="test", name="test" - ) - - test_executor = GenericExecutor() - - assert test_executor._is_step_eligible_for_rerun(node=mock_node) - - -def test_base_executor__is_step_eligible_for_rerun_returns_true_if_step_failed(mocker, mock_run_context): - mock_run_context.use_cached = True - - mock_node = mocker.MagicMock() - mock_node._get_step_log_name.return_value = "IExist" - - mock_run_context.run_log_store.get_step_log.return_value.status = defaults.FAIL - - test_executor = GenericExecutor() - - assert test_executor._is_step_eligible_for_rerun(node=mock_node) is True - - -def test_base_executor__is_step_eligible_for_rerun_returns_false_if_step_succeeded(mocker, mock_run_context): - mock_run_context.use_cached = True - - mock_node = mocker.MagicMock() - mock_node._get_step_log_name.return_value = "IExist" - - mock_run_context.run_log_store.get_step_log.return_value.status = defaults.SUCCESS - - test_executor = GenericExecutor() - - assert test_executor._is_step_eligible_for_rerun(node=mock_node) is False - - def test_base_executor_resolve_executor_config_gives_global_config_if_node_does_not_override(mocker, mock_run_context): mock_node = mocker.MagicMock() mock_node._get_executor_config.return_value = {} @@ -590,71 +455,6 @@ def test_get_status_and_next_node_name_returns_on_failure_node_if_failed(mocker, assert test_executor._get_status_and_next_node_name(mock_node, mock_dag) == (defaults.FAIL, "me_please") -def test_execute_node_calls_store_parameter_with_update_false(mocker, monkeypatch, mock_run_context): - mock_parameters = mocker.MagicMock() - monkeypatch.setattr(executor, "parameters", mock_parameters) - - mock_run_context.run_log_store.get_parameters.return_value = {"a": 1} - - test_executor = GenericExecutor() - test_executor._sync_catalog = mocker.MagicMock() - - mock_node = mocker.MagicMock() - test_executor._execute_node(mock_node) - - args, kwargs = mock_parameters.set_user_defined_params_as_environment_variables.call_args - assert args[0] == {"a": 1} - - -def test_execute_node_raises_exception_if_node_execute_raises_one(mocker, monkeypatch, mock_run_context, caplog): - mock_run_context.run_log_store.get_parameters.return_value = {"a": 1} - test_executor = GenericExecutor() - test_executor._sync_catalog = mocker.MagicMock() - - mock_node = mocker.MagicMock() - mock_node.execute.side_effect = Exception() - - with caplog.at_level(logging.ERROR, logger="magnus") and pytest.raises(Exception): - test_executor._execute_node(mock_node) - - assert "This is clearly magnus fault, " in caplog.text - - -def test_execute_node_sets_step_log_status_to_fail_if_node_fails(mocker, monkeypatch, mock_run_context): - mock_step_log = mocker.MagicMock() - mock_run_context.run_log_store.get_step_log.return_value = mock_step_log - mock_run_context.run_log_store.get_parameters.return_value = {"a": 1} - - mock_attempt_log = mocker.MagicMock() - mock_attempt_log.status = defaults.FAIL - - mock_node = mocker.MagicMock() - mock_node.execute.return_value = mock_attempt_log - - test_executor = GenericExecutor() - test_executor._sync_catalog = mocker.MagicMock() - - test_executor._execute_node(mock_node) - - assert mock_step_log.status == defaults.FAIL - - -def test_execute_node_sets_step_log_status_to_success_if_node_succeeds(mocker, monkeypatch, mock_run_context): - mock_step_log = mocker.MagicMock() - mock_run_context.run_log_store.get_step_log.return_value = mock_step_log - mock_run_context.run_log_store.get_parameters.return_value = {"a": 1} - - mock_node = mocker.MagicMock() - mock_node.execute.return_value.status = defaults.SUCCESS - - test_executor = GenericExecutor() - test_executor._sync_catalog = mocker.MagicMock() - - test_executor._execute_node(mock_node) - - assert mock_step_log.status == defaults.SUCCESS - - def test_send_return_code_raises_exception_if_pipeline_execution_failed(mocker, mock_run_context): mock_run_context.run_log_store.get_run_log_by_id.return_value.status = defaults.FAIL diff --git a/tests/magnus/extensions/executor/test_local_container_integration.py b/tests/runnable/extensions/executor/test_local_container_integration.py similarity index 95% rename from tests/magnus/extensions/executor/test_local_container_integration.py rename to tests/runnable/extensions/executor/test_local_container_integration.py index 6b87e7b9..59937d61 100644 --- a/tests/magnus/extensions/executor/test_local_container_integration.py +++ b/tests/runnable/extensions/executor/test_local_container_integration.py @@ -1,6 +1,6 @@ from pathlib import Path -from magnus.extensions.executor.local_container import implementation +from runnable.extensions.executor.local_container import implementation def test_configure_for_traversal_populates_volumes(mocker, monkeypatch): diff --git a/tests/magnus/extensions/executor/test_local_executor.py b/tests/runnable/extensions/executor/test_local_executor.py similarity index 82% rename from tests/magnus/extensions/executor/test_local_executor.py rename to tests/runnable/extensions/executor/test_local_executor.py index d451dc30..b45df8bb 100644 --- a/tests/magnus/extensions/executor/test_local_executor.py +++ b/tests/runnable/extensions/executor/test_local_executor.py @@ -1,4 +1,4 @@ -from magnus.extensions.executor.local.implementation import LocalExecutor +from runnable.extensions.executor.local.implementation import LocalExecutor def test_local_executor_execute_node_just_calls___execute_node(mocker, monkeypatch): diff --git a/tests/magnus/extensions/__init__.py b/tests/runnable/extensions/run_log_store/__init__.py similarity index 100% rename from tests/magnus/extensions/__init__.py rename to tests/runnable/extensions/run_log_store/__init__.py diff --git a/tests/magnus/extensions/run_log_store/test_chunked_k8s_pvc_integration.py b/tests/runnable/extensions/run_log_store/test_chunked_k8s_pvc_integration.py similarity index 88% rename from tests/magnus/extensions/run_log_store/test_chunked_k8s_pvc_integration.py rename to tests/runnable/extensions/run_log_store/test_chunked_k8s_pvc_integration.py index 8b197c9f..b17c1dc8 100644 --- a/tests/magnus/extensions/run_log_store/test_chunked_k8s_pvc_integration.py +++ b/tests/runnable/extensions/run_log_store/test_chunked_k8s_pvc_integration.py @@ -1,6 +1,6 @@ import pytest -from magnus.extensions.run_log_store.chunked_k8s_pvc import integration +from runnable.extensions.run_log_store.chunked_k8s_pvc import integration def test_k8s_pvc_errors_for_local(): diff --git a/tests/magnus/extensions/run_log_store/test_file_system.py b/tests/runnable/extensions/run_log_store/test_file_system.py similarity index 95% rename from tests/magnus/extensions/run_log_store/test_file_system.py rename to tests/runnable/extensions/run_log_store/test_file_system.py index 18c476e1..8625e12a 100644 --- a/tests/magnus/extensions/run_log_store/test_file_system.py +++ b/tests/runnable/extensions/run_log_store/test_file_system.py @@ -1,9 +1,9 @@ import pytest -from magnus.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore -import magnus.extensions.run_log_store.file_system.implementation as implementation -from magnus import defaults -from magnus import exceptions +from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore +import runnable.extensions.run_log_store.file_system.implementation as implementation +from runnable import defaults +from runnable import exceptions def test_file_system_run_log_store_log_folder_name_defaults_if_not_provided(): diff --git a/tests/runnable/extensions/run_log_store/test_generic_chunked.py b/tests/runnable/extensions/run_log_store/test_generic_chunked.py new file mode 100644 index 00000000..9b003829 --- /dev/null +++ b/tests/runnable/extensions/run_log_store/test_generic_chunked.py @@ -0,0 +1 @@ +from runnable.extensions.run_log_store import generic_chunked diff --git a/tests/magnus/extensions/run_log_store/test_k8s_pvc_integration.py b/tests/runnable/extensions/run_log_store/test_k8s_pvc_integration.py similarity index 89% rename from tests/magnus/extensions/run_log_store/test_k8s_pvc_integration.py rename to tests/runnable/extensions/run_log_store/test_k8s_pvc_integration.py index a1c11cb7..64b9ab8c 100644 --- a/tests/magnus/extensions/run_log_store/test_k8s_pvc_integration.py +++ b/tests/runnable/extensions/run_log_store/test_k8s_pvc_integration.py @@ -1,6 +1,6 @@ import pytest -from magnus.extensions.run_log_store.k8s_pvc import integration +from runnable.extensions.run_log_store.k8s_pvc import integration def test_k8s_pvc_errors_for_local(): diff --git a/tests/magnus/extensions/secrets/test_dotenv.py b/tests/runnable/extensions/secrets/test_dotenv.py similarity index 93% rename from tests/magnus/extensions/secrets/test_dotenv.py rename to tests/runnable/extensions/secrets/test_dotenv.py index 5525c704..c195c162 100644 --- a/tests/magnus/extensions/secrets/test_dotenv.py +++ b/tests/runnable/extensions/secrets/test_dotenv.py @@ -1,9 +1,9 @@ import pytest -from magnus import defaults, exceptions +from runnable import defaults, exceptions -from magnus.extensions.secrets.dotenv.implementation import DotEnvSecrets -import magnus.extensions.secrets.dotenv.implementation as implementation +from runnable.extensions.secrets.dotenv.implementation import DotEnvSecrets +import runnable.extensions.secrets.dotenv.implementation as implementation def test_dot_env_secrets_defaults_to_default_location_if_none_provided(mocker, monkeypatch): diff --git a/tests/magnus/extensions/secrets/test_env_secrets_manager.py b/tests/runnable/extensions/secrets/test_env_secrets_manager.py similarity index 92% rename from tests/magnus/extensions/secrets/test_env_secrets_manager.py rename to tests/runnable/extensions/secrets/test_env_secrets_manager.py index ffb40ba7..60294d45 100644 --- a/tests/magnus/extensions/secrets/test_env_secrets_manager.py +++ b/tests/runnable/extensions/secrets/test_env_secrets_manager.py @@ -1,8 +1,8 @@ import pytest import os -from magnus.extensions.secrets.env_secrets.implementation import EnvSecretsManager -from magnus import exceptions +from runnable.extensions.secrets.env_secrets.implementation import EnvSecretsManager +from runnable import exceptions def test_env_secrets_manager_raises_error_if_name_provided_and_not_present(): diff --git a/tests/magnus/extensions/test_node_extensions.py b/tests/runnable/extensions/test_node_extensions.py similarity index 64% rename from tests/magnus/extensions/test_node_extensions.py rename to tests/runnable/extensions/test_node_extensions.py index 67d3ebee..06c43844 100644 --- a/tests/magnus/extensions/test_node_extensions.py +++ b/tests/runnable/extensions/test_node_extensions.py @@ -1,9 +1,9 @@ import pytest -from magnus import defaults -from magnus.extensions import nodes as nodes +from runnable import defaults +from runnable.extensions import nodes as nodes -from magnus.tasks import BaseTaskType +from runnable.tasks import BaseTaskType @pytest.fixture(autouse=True) @@ -47,108 +47,6 @@ def test_task_node_mocks_if_mock_is_true(mocker, monkeypatch): assert attempt_log.status == defaults.SUCCESS -def test_task_node_sets_attempt_log_fail_in_exception_of_execution(mocker, monkeypatch): - mock_attempt_log = mocker.MagicMock() - mock_context = mocker.MagicMock() - - monkeypatch.setattr(nodes.TaskNode, "_context", mock_context) - mock_context.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log) - - monkeypatch.setattr(BaseTaskType, "execute_command", mocker.MagicMock(side_effect=Exception())) - base_task = BaseTaskType(node_name="test", task_type="dummy") - - task_node = nodes.TaskNode(name="test", internal_name="test", next_node="next_node", executable=base_task) - - task_node.execute() - - assert mock_attempt_log.status == defaults.FAIL - - -def test_task_node_sets_attempt_log_success_in_no_exception_of_execution(mocker, monkeypatch): - mock_attempt_log = mocker.MagicMock() - mock_context = mocker.MagicMock() - - monkeypatch.setattr(nodes.TaskNode, "_context", mock_context) - mock_context.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log) - - monkeypatch.setattr(BaseTaskType, "execute_command", mocker.MagicMock()) - base_task = BaseTaskType(node_name="test", task_type="dummy") - task_node = nodes.TaskNode(name="test", internal_name="test", next_node="next_node", executable=base_task) - - task_node.execute() - - assert mock_attempt_log.status == defaults.SUCCESS - - -def test_fail_node_sets_branch_log_fail(mocker, monkeypatch): - mock_attempt_log = mocker.MagicMock() - mock_branch_log = mocker.MagicMock() - mock_context = mocker.MagicMock() - - monkeypatch.setattr(nodes.FailNode, "_context", mock_context) - - mock_context.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log) - mock_context.run_log_store.get_branch_log = mocker.MagicMock(return_value=mock_branch_log) - - node = nodes.FailNode(name="test", internal_name="test") - - node.execute() - - assert mock_attempt_log.status == defaults.SUCCESS - assert mock_branch_log.status == defaults.FAIL - - -def test_fail_node_sets_attempt_log_success_even_in_exception(mocker, monkeypatch): - mock_attempt_log = mocker.MagicMock() - mock_context = mocker.MagicMock() - - monkeypatch.setattr(nodes.FailNode, "_context", mock_context) - - mock_context.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log) - mock_context.run_log_store.get_branch_log = mocker.MagicMock(side_effect=Exception()) - - node = nodes.FailNode(name="test", internal_name="test") - - node.execute() - - assert mock_attempt_log.status == defaults.SUCCESS - - -def test_success_node_sets_branch_log_success(mocker, monkeypatch): - mock_attempt_log = mocker.MagicMock() - mock_branch_log = mocker.MagicMock() - mock_context = mocker.MagicMock() - - monkeypatch.setattr(nodes.SuccessNode, "_context", mock_context) - - mock_context.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log) - mock_context.run_log_store.get_branch_log = mocker.MagicMock(return_value=mock_branch_log) - - node = nodes.SuccessNode(name="test", internal_name="test") - - node.execute() - - assert mock_attempt_log.status == defaults.SUCCESS - assert mock_branch_log.status == defaults.SUCCESS - - -def test_success_node_sets_attempt_log_success_even_in_exception(mocker, monkeypatch): - mock_attempt_log = mocker.MagicMock() - mock_branch_log = mocker.MagicMock() - mock_context = mocker.MagicMock() - - monkeypatch.setattr(nodes.SuccessNode, "_context", mock_context) - - mock_context.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log) - mock_context.run_log_store.get_branch_log = mocker.MagicMock(side_effect=Exception()) - - node = nodes.SuccessNode(name="test", internal_name="test") - - node.execute() - - assert mock_attempt_log.status == defaults.SUCCESS - - def test_parallel_node_parse_from_config_creates_sub_graph(mocker, monkeypatch): graph = nodes.Graph(start_at="first", name="first_branch") @@ -328,17 +226,3 @@ def test__as_is_node_takes_anything_as_input(mocker, monkeypatch): } _ = nodes.StubNode.parse_from_config(config=config) - - -def test_as_is_node_execute_returns_success(mocker, monkeypatch): - mock_attempt_log = mocker.MagicMock() - - mock_context = mocker.MagicMock() - monkeypatch.setattr(nodes.StubNode, "_context", mock_context) - mock_context.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log) - - node = nodes.StubNode(name="test", internal_name="test", next_node="next_node") - - node.execute() - - assert mock_attempt_log.status == defaults.SUCCESS diff --git a/tests/magnus/test_catalog.py b/tests/runnable/test_catalog.py similarity index 98% rename from tests/magnus/test_catalog.py rename to tests/runnable/test_catalog.py index 29cb93b5..8660a422 100644 --- a/tests/magnus/test_catalog.py +++ b/tests/runnable/test_catalog.py @@ -1,6 +1,6 @@ import pytest -from magnus import ( +from runnable import ( catalog, # pylint: disable=import-error defaults, # pylint: disable=import-error ) diff --git a/tests/magnus/test_datastore.py b/tests/runnable/test_datastore.py similarity index 98% rename from tests/magnus/test_datastore.py rename to tests/runnable/test_datastore.py index 502a6536..996f619e 100644 --- a/tests/magnus/test_datastore.py +++ b/tests/runnable/test_datastore.py @@ -1,6 +1,6 @@ import pytest -from magnus import datastore, defaults, exceptions +from runnable import datastore, defaults, exceptions @pytest.fixture(autouse=True) @@ -321,14 +321,6 @@ def test_base_run_log_store_create_branch_log_returns_a_branch_log_object(): assert isinstance(branch_log, datastore.BranchLog) -def test_base_run_log_store_create_attempt_log_returns_a_attempt_log_object(): - run_log_store = datastore.BaseRunLogStore() - - attempt_log = run_log_store.create_attempt_log() - - assert isinstance(attempt_log, datastore.StepAttempt) - - def test_base_run_log_store_create_code_identity_object(): run_log_store = datastore.BaseRunLogStore() diff --git a/tests/magnus/test_executor.py b/tests/runnable/test_executor.py similarity index 69% rename from tests/magnus/test_executor.py rename to tests/runnable/test_executor.py index ac7e78ba..11cc8b5f 100644 --- a/tests/magnus/test_executor.py +++ b/tests/runnable/test_executor.py @@ -1,6 +1,6 @@ import pytest -from magnus import executor, defaults +from runnable import executor, defaults @pytest.fixture(autouse=True) @@ -17,19 +17,6 @@ def test_base_executor_context_refers_to_global_run_context(mocker, monkeypatch) assert base_executor._context is mock_run_context -def test_is_parallel_refers_to_config(): - base_executor = executor.BaseExecutor() - - assert base_executor._is_parallel_execution() == False - - -def test_is_parallel_refers_to_config_true(): - base_executor = executor.BaseExecutor() - base_executor.enable_parallel = True - - assert base_executor._is_parallel_execution() == True - - def test_step_attempt_number_defaults_to_1(): base_executor = executor.BaseExecutor() diff --git a/tests/magnus/test_graph.py b/tests/runnable/test_graph.py similarity index 99% rename from tests/magnus/test_graph.py rename to tests/runnable/test_graph.py index 9bfd8ba6..f2836bc5 100644 --- a/tests/magnus/test_graph.py +++ b/tests/runnable/test_graph.py @@ -1,11 +1,11 @@ import pytest -from magnus import ( +from runnable import ( defaults, # pylint: disable=import-error exceptions, # pylint: disable=import-error graph, # pylint: disable=import-error ) -from magnus.extensions.nodes import StubNode, FailNode, SuccessNode +from runnable.extensions.nodes import StubNode, FailNode, SuccessNode def get_new_graph(start_at="this", internal_branch_name="i_name"): diff --git a/tests/magnus/test_integration.py b/tests/runnable/test_integration.py similarity index 88% rename from tests/magnus/test_integration.py rename to tests/runnable/test_integration.py index 78751eff..ccf4ce42 100644 --- a/tests/magnus/test_integration.py +++ b/tests/runnable/test_integration.py @@ -2,8 +2,7 @@ import pytest - -from magnus import ( +from runnable import ( integration, # pylint: disable=import-error; pylint: disable=import-error ) @@ -88,7 +87,7 @@ def test_get_integration_handler_gives_default_integration_if_no_match(monkeypat def test_do_nothing_catalog_validate_emits_warning(caplog): extension = integration.DoNothingCatalog("none", "service") - with caplog.at_level(logging.WARNING, logger="magnus"): + with caplog.at_level(logging.INFO, logger="runnable"): extension.validate() assert "A do-nothing catalog does not hold any data and therefore cannot pass data between nodes." in caplog.text @@ -97,21 +96,12 @@ def test_do_nothing_catalog_validate_emits_warning(caplog): def test_do_nothing_secrets_validate_emits_warning(caplog): extension = integration.DoNothingSecrets("none", "service") - with caplog.at_level(logging.WARNING, logger="magnus"): + with caplog.at_level(logging.INFO, logger="runnable"): extension.validate() assert "A do-nothing secrets does not hold any secrets and therefore cannot return you any secrets." in caplog.text -def test_do_nothing_experiment_tracker_validate_emits_warning(caplog): - extension = integration.DoNothingExperimentTracker("none", "service") - - with caplog.at_level(logging.WARNING, logger="magnus"): - extension.validate() - - assert "A do-nothing experiment tracker does nothing and therefore cannot track anything." in caplog.text - - def test_buffered_run_log_store_raises_exception_for_anything_else_than_local(mocker, monkeypatch): mock_executor = mocker.MagicMock() @@ -129,7 +119,7 @@ def test_buffered_run_log_store_accepts_local(mocker, caplog): mock_executor.service_name = "local" extension = integration.BufferedRunLogStore(mock_executor, "service") - with caplog.at_level(logging.WARNING, logger="magnus"): + with caplog.at_level(logging.INFO, logger="runnable"): extension.validate() assert "Run log generated by buffered run log store are not persisted." in caplog.text diff --git a/tests/magnus/test_nodes.py b/tests/runnable/test_nodes.py similarity index 98% rename from tests/magnus/test_nodes.py rename to tests/runnable/test_nodes.py index 2399ba93..1a1f94fa 100644 --- a/tests/magnus/test_nodes.py +++ b/tests/runnable/test_nodes.py @@ -1,6 +1,6 @@ import pytest -from magnus import defaults, nodes, exceptions # pylint: disable=import-error # pylint: disable=import-error +from runnable import defaults, nodes, exceptions # pylint: disable=import-error # pylint: disable=import-error @pytest.fixture(autouse=True) @@ -288,8 +288,7 @@ def test_composite_node_execute_raises_exception(instantiable_composite_node): def test_terminal_node_get_on_failure_node_raises_exception(instantiable_terminal_node): node = nodes.TerminalNode(name="test", internal_name="test", node_type="dummy") - with pytest.raises(exceptions.TerminalNodeError): - node._get_on_failure_node() + assert node._get_on_failure_node() == "" def test_terminal_node__get_next_node_raises_exception(instantiable_terminal_node): diff --git a/tests/runnable/test_parmeters.py b/tests/runnable/test_parmeters.py new file mode 100644 index 00000000..ff4ee424 --- /dev/null +++ b/tests/runnable/test_parmeters.py @@ -0,0 +1,122 @@ +import os + +import pytest + +from pydantic import BaseModel, ValidationError + + +from runnable import defaults +from runnable.datastore import JsonParameter +from runnable.parameters import ( + get_user_set_parameters, + bind_args_for_pydantic_model, + filter_arguments_for_func, +) + + +def test_get_user_set_parameters_does_nothing_if_prefix_does_not_match(monkeypatch): + monkeypatch.setenv("random", "value") + + assert get_user_set_parameters() == {} + + +def test_get_user_set_parameters_removes_the_parameter_if_prefix_match_remove(monkeypatch): + monkeypatch.setenv(defaults.PARAMETER_PREFIX + "key", "1") + + assert defaults.PARAMETER_PREFIX + "key" in os.environ + + get_user_set_parameters(remove=True) + + assert defaults.PARAMETER_PREFIX + "key" not in os.environ + + +def test_bind_args_for_pydantic_model_with_correct_params(): + class MyModel(BaseModel): + a: int + b: str + + params = {"a": 1, "b": "test"} + bound_model = bind_args_for_pydantic_model(params, MyModel) + + assert isinstance(bound_model, MyModel) + assert bound_model.a == 1 + assert bound_model.b == "test" + + +def test_bind_args_for_pydantic_model_with_extra_params(): + class MyModel(BaseModel): + a: int + b: str + + params = {"a": 1, "b": "test", "c": 2} + bound_model = bind_args_for_pydantic_model(params, MyModel) + + assert isinstance(bound_model, MyModel) + assert bound_model.a == 1 + assert bound_model.b == "test" + + +def test_bind_args_for_pydantic_model_with_missing_params(): + class MyModel(BaseModel): + a: int + b: str + + params = {"a": 1} + with pytest.raises(ValidationError): + bind_args_for_pydantic_model(params, MyModel) + + +def test_filter_arguments_for_func_with_simple_arguments(): + def func(a: int, b: str): + pass + + params = {"a": JsonParameter(kind="json", value=1), "b": JsonParameter(kind="json", value="test")} + bound_args = filter_arguments_for_func(func, params) + + assert bound_args == {"a": 1, "b": "test"} + + +# def test_filter_arguments_for_func_with_pydantic_model_arguments(): +# class MyModel(BaseModel): +# a: int +# b: str + +# def func(inner: MyModel, c: str): +# pass + +# params = { +# "inner": {"a": JsonParameter(kind="json", value=1), "b": JsonParameter(kind="json", value="test")}, +# "c": JsonParameter(kind="json", value="test"), +# } +# bound_args = filter_arguments_for_func(func, params) + +# assert bound_args == {"inner": MyModel(a=1, b="test"), "c": "test"} + + +def test_filter_arguments_for_func_with_missing_arguments_but_defaults_present(): + def func(inner: int, c: str = "test"): + pass + + params = {"inner": JsonParameter(kind="json", value=1)} + bound_args = filter_arguments_for_func(func, params) + + assert bound_args == {"inner": 1} + + +def test_filter_arguments_for_func_with_missing_arguments_and_no_defaults(): + def func(inner: int, c: str): + pass + + params = {"inner": JsonParameter(kind="json", value=1)} + with pytest.raises(ValueError, match=r"Parameter c is required for func but not provided"): + _ = filter_arguments_for_func(func, params) + + +def test_filter_arguments_for_func_with_map_variable_sent_in(): + params = {"inner": JsonParameter(kind="json", value=1)} + + def func(inner: int, first: int, second: str): + pass + + bound_args = filter_arguments_for_func(func, params, map_variable={"first": 1, "second": "test"}) + assert bound_args == {"inner": 1, "first": 1, "second": "test"} diff --git a/tests/magnus/test_sdk.py b/tests/runnable/test_sdk.py similarity index 93% rename from tests/magnus/test_sdk.py rename to tests/runnable/test_sdk.py index ff97586d..3958c73d 100644 --- a/tests/magnus/test_sdk.py +++ b/tests/runnable/test_sdk.py @@ -1,7 +1,7 @@ import pytest -from magnus.extensions import nodes -from magnus import sdk +from runnable.extensions import nodes +from runnable import sdk def test_success_init(): diff --git a/tests/magnus/test_secrets.py b/tests/runnable/test_secrets.py similarity index 97% rename from tests/magnus/test_secrets.py rename to tests/runnable/test_secrets.py index 594ab1db..983eb3a2 100644 --- a/tests/magnus/test_secrets.py +++ b/tests/runnable/test_secrets.py @@ -1,6 +1,6 @@ import pytest -from magnus import ( +from runnable import ( secrets, # pylint: disable=import-error ) diff --git a/tests/runnable/test_tasks.py b/tests/runnable/test_tasks.py new file mode 100644 index 00000000..145efbce --- /dev/null +++ b/tests/runnable/test_tasks.py @@ -0,0 +1,21 @@ +import pytest + + +from runnable import tasks + + +@pytest.fixture +def configuration(): + return {"node_name": "dummy", "task_type": "dummy"} + + +def test_base_task_execute_command_raises_not_implemented_error(configuration): + base_execution_type = tasks.BaseTaskType(**configuration) + + with pytest.raises(NotImplementedError): + base_execution_type.execute_command() + + +def test_notebook_raises_exception_if_command_is_not_a_notebook(): + with pytest.raises(Exception): + tasks.NotebookTaskType(command="path to notebook") diff --git a/tests/magnus/test_utils.py b/tests/runnable/test_utils.py similarity index 97% rename from tests/magnus/test_utils.py rename to tests/runnable/test_utils.py index 2ffb0a98..1240ddb5 100644 --- a/tests/magnus/test_utils.py +++ b/tests/runnable/test_utils.py @@ -5,7 +5,7 @@ import pytest -from magnus import ( +from runnable import ( defaults, # pylint: disable=import-error utils, # pylint: disable=import-error ) @@ -365,7 +365,7 @@ def test_get_local_docker_image_id_returns_none_in_exception(mocker, monkeypatch assert utils.get_local_docker_image_id("test") == "" -def test_get_node_execution_command_returns_magnus_execute(mocker, monkeypatch): +def test_get_node_execution_command_returns_runnable_execute(mocker, monkeypatch): import logging mock_context = mocker.MagicMock() @@ -377,7 +377,7 @@ def test_get_node_execution_command_returns_magnus_execute(mocker, monkeypatch): monkeypatch.setattr(utils, "context", mock_context) - logger = logging.getLogger(name="magnus") + logger = logging.getLogger(name="runnable") old_level = logger.level logger.setLevel(defaults.LOG_LEVEL) @@ -390,7 +390,7 @@ def _command_friendly_name(self): test_map_variable = {"a": "b"} try: assert utils.get_node_execution_command(MockNode(), map_variable=test_map_variable) == ( - "magnus execute_single_node test_run_id test_node_id " + "runnable execute_single_node test_run_id test_node_id " f"--log-level WARNING --file test_pipeline_file --map-variable '{json.dumps(test_map_variable)}' --config-file test_configuration_file " "--parameters-file test_parameters_file --tag test_tag" ) @@ -416,7 +416,7 @@ class MockNode: def _command_friendly_name(self): return "test_node_id" - logger = logging.getLogger(name="magnus") + logger = logging.getLogger(name="runnable") old_level = logger.level logger.setLevel(defaults.LOG_LEVEL) @@ -425,7 +425,7 @@ def _command_friendly_name(self): assert utils.get_node_execution_command( MockNode(), map_variable=test_map_variable, over_write_run_id="this" ) == ( - "magnus execute_single_node this test_node_id " + "runnable execute_single_node this test_node_id " f"--log-level WARNING --file test_pipeline_file --map-variable '{json.dumps(test_map_variable)}' --config-file test_configuration_file " "--parameters-file test_parameters_file --tag test_tag" ) diff --git a/tests/scenarios/test_sdk_traversals.py b/tests/scenarios/test_sdk_traversals.py deleted file mode 100644 index 6582543b..00000000 --- a/tests/scenarios/test_sdk_traversals.py +++ /dev/null @@ -1,76 +0,0 @@ -import pytest -from rich import print - -from magnus import Stub, Pipeline, Parallel - - -@pytest.mark.no_cover -def test_sequence_next_node(): - first = Stub(name="first", next="second") - second = Stub(name="second", terminate_with_success=True) - - pipeline = Pipeline(steps=[first, second], start_at=first, add_terminal_nodes=True) - - run_log = pipeline.execute() - - assert len(run_log.steps) == 3 - - -@pytest.mark.no_cover -def test_sequence_depends_on(): - first = Stub(name="first") - second = Stub(name="second", terminate_with_success=True).depends_on(first) - - pipeline = Pipeline(steps=[first, second], start_at=first, add_terminal_nodes=True) - - run_log = pipeline.execute() - - assert len(run_log.steps) == 3 - - -@pytest.mark.no_cover -def test_sequence_rshift(): - first = Stub(name="first") - second = Stub(name="second", terminate_with_success=True) - - first >> second - - pipeline = Pipeline(steps=[first, second], start_at=first, add_terminal_nodes=True) - - run_log = pipeline.execute() - - assert len(run_log.steps) == 3 - - -@pytest.mark.no_cover -def test_sequence_lshift(): - first = Stub(name="first") - second = Stub(name="second", terminate_with_success=True) - - second << first - - pipeline = Pipeline(steps=[first, second], start_at=first, add_terminal_nodes=True) - - run_log = pipeline.execute() - - assert len(run_log.steps) == 3 - - -@pytest.mark.no_cover -def test_parallel(): - first = Stub(name="first") - second = Stub(name="second").depends_on(first) - - branch_first = Stub(name="branch_first", next="branch_second") - branch_second = Stub(name="branch_second", terminate_with_success=True) - - branch_a = Pipeline(steps=[branch_first, branch_second], start_at=branch_first, add_terminal_nodes=True) - branch_b = Pipeline(steps=[branch_first, branch_second], start_at=branch_first, add_terminal_nodes=True) - - parallel_node = Parallel(name="parallel_step", branches={"a": branch_a, "b": branch_b}, terminate_with_success=True) - parallel_node << second - - parent_pipeline = Pipeline(steps=[first, second, parallel_node], start_at=first) - run_log = parent_pipeline.execute() - - assert len(run_log.steps) == 4 diff --git a/tests/scenarios/test_traversals.py b/tests/scenarios/test_traversals.py index be45e4a6..4bbc6993 100644 --- a/tests/scenarios/test_traversals.py +++ b/tests/scenarios/test_traversals.py @@ -7,7 +7,7 @@ import pytest import ruamel.yaml -from magnus import defaults, entrypoints, utils +from runnable import defaults, entrypoints, utils yaml = ruamel.yaml.YAML() diff --git a/tests/test_examples.py b/tests/test_examples.py index ff16f576..d71971b2 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,43 +1,28 @@ -from contextlib import nullcontext, contextmanager -import pytest -from pathlib import Path -import os import importlib +import os import subprocess +from contextlib import contextmanager, nullcontext +from pathlib import Path + +import pytest -from magnus.entrypoints import execute -from magnus import exceptions +from runnable import exceptions +from runnable.entrypoints import execute # (file, is_fail?, kwargs) examples = [ ("concepts/catalog.yaml", False, {"configuration_file": "examples/configs/fs-catalog.yaml"}), - ("concepts/experiment_tracking_env.yaml", False, {}), - ("concepts/experiment_tracking_env_step.yaml", False, {}), ("concepts/map.yaml", False, {}), ("concepts/map_shell.yaml", False, {}), ("concepts/nesting.yaml", False, {}), - ("concepts/notebook_api_parameters.yaml", False, {"parameters_file": "examples/concepts/parameters.yaml"}), - ("concepts/notebook_env_parameters.yaml", False, {"parameters_file": "examples/concepts/parameters.yaml"}), ("concepts/notebook_native_parameters.yaml", False, {"parameters_file": "examples/concepts/parameters.yaml"}), ("concepts/parallel.yaml", False, {}), ("concepts/simple_notebook.yaml", False, {}), ("concepts/simple.yaml", False, {}), - ("concepts/task_shell_parameters.yaml", False, {"parameters_file": "examples/parameters_initial.yaml"}), - ("concepts/task_shell_simple.yaml", False, {}), - ("concepts/traversal.yaml", False, {}), ("catalog.yaml", False, {"configuration_file": "examples/configs/fs-catalog.yaml"}), - ("contrived.yaml", False, {}), ("default-fail.yaml", True, {}), - ("experiment_tracking_env.yaml", True, {}), - ("logging.yaml", False, {}), - ("mocking.yaml", False, {}), ("on-failure.yaml", False, {}), ("parallel-fail.yaml", True, {}), - ("parameters_env.yaml", False, {"parameters_file": "examples/parameters_initial.yaml"}), - ("parameters_flow.yaml", False, {"parameters_file": "examples/parameters_initial.yaml"}), - ("python-tasks.yaml", False, {"parameters_file": "examples/parameters_initial.yaml"}), - ("retry-fail.yaml", True, {"configuration_file": "examples/configs/fs-catalog-run_log.yaml"}), - ("retry-fixed.yaml", False, {"configuration_file": "examples/configs/fs-catalog-run_log.yaml"}), ] @@ -91,7 +76,7 @@ def test_yaml_examples_container(example): full_file_path = examples_path / file_path kwargs.pop("configuration_file", "") configuration_file = "examples/configs/local-container.yaml" - os.environ["MAGNUS_VAR_default_docker_image"] = "magnus:3.8" + os.environ["runnable_VAR_default_docker_image"] = "runnable:3.8" execute(configuration_file=configuration_file, pipeline_file=str(full_file_path), **kwargs) except exceptions.ExecutionFailedError: if not status: @@ -101,39 +86,25 @@ def test_yaml_examples_container(example): @contextmanager def secrets_env_context(): os.environ["secret"] = "secret_value" - os.environ["MAGNUS_CONFIGURATION_FILE"] = "examples/configs/secrets-env-default.yaml" + os.environ["runnable_CONFIGURATION_FILE"] = "examples/configs/secrets-env-default.yaml" yield del os.environ["secret"] - del os.environ["MAGNUS_CONFIGURATION_FILE"] + del os.environ["runnable_CONFIGURATION_FILE"] # function, success, context python_examples = [ ("catalog", False, None), - ("catalog_api", False, None), ("catalog_simple", False, None), - ("contrived", False, None), ("mocking", False, None), ("on_failure", False, None), - ("parameters_api", False, None), ("parameters", False, None), - ("python-tasks", False, None), - ("secrets", False, None), - ("secrets_env", False, secrets_env_context), + ("parameters_simple", False, None), ("concepts.catalog", False, None), - ("concepts.catalog_api", False, None), - ("concepts.catalog_object", False, None), - ("concepts.experiment_tracking_api", False, None), - ("concepts.experiment_tracking_env", False, None), - ("concepts.experiment_tracking_step", False, None), ("concepts.map", False, None), ("concepts.nesting", False, None), ("concepts.parallel", False, None), ("concepts.simple", False, None), - ("concepts.task_api_parameters", False, None), - ("concepts.task_env_parameters", False, None), - ("concepts.task_native_parameters", False, None), - ("concepts.traversal", False, None), ] @@ -143,7 +114,7 @@ def list_python_examples(): @pytest.mark.parametrize("example", list_python_examples()) -# @pytest.mark.no_cover +@pytest.mark.no_cover @pytest.mark.e2e def test_python_examples(example): print(f"Testing {example}...") diff --git a/tox.ini b/tox.ini index 1104c2bd..4503fc8e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,17 +1,17 @@ [tox] skipsdist = True isolated_build = True -envlist = python3.8, mypy +envlist = python3.9, mypy [testenv] whitelist_externals = poetry commands = poetry install -E docker -E notebook - poetry run python -m pytest -m "not e2e_container" --cov=magnus/ tests/ + poetry run python -m pytest -m "not e2e_container" --cov=runnable/ tests/ [testenv:mypy] whitelist_externals = poetry commands = poetry install -E docker -E notebook - poetry run mypy magnus + poetry run mypy runnable