From dca9e00bfd19f66a285cc61db829616f1fc1c0d4 Mon Sep 17 00:00:00 2001 From: Rick Staa Date: Wed, 31 Jul 2024 22:13:15 +0200 Subject: [PATCH] refactor(ai): apply some small improvements (#612) This commit fixes some things that were unclear and caused people confusion. --- ai/orchestrators/models-config.mdx | 4 ++-- ai/orchestrators/start-orchestrator.mdx | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/ai/orchestrators/models-config.mdx b/ai/orchestrators/models-config.mdx index 772c55ba..14bd9810 100644 --- a/ai/orchestrators/models-config.mdx +++ b/ai/orchestrators/models-config.mdx @@ -19,12 +19,12 @@ currently **recommended** models and their respective prices. [ { "pipeline": "text-to-image", - "model_id": "ByteDance/SDXL-Lightning", + "model_id": "SG161222/RealVisXL_V4.0_Lightning", "price_per_unit": 4768371 }, { "pipeline": "image-to-image", - "model_id": "ByteDance/SDXL-Lightning", + "model_id": "timbrooks/instruct-pix2pix", "price_per_unit": 4768371 }, { diff --git a/ai/orchestrators/start-orchestrator.mdx b/ai/orchestrators/start-orchestrator.mdx index 88bc920e..5a6bfc07 100644 --- a/ai/orchestrators/start-orchestrator.mdx +++ b/ai/orchestrators/start-orchestrator.mdx @@ -86,7 +86,7 @@ Please follow the steps below to start your AI Subnet Orchestrator node: - `-aiModels`: This flag sets the path to the JSON file that contains the AI models. - `-aiModelsDir`: This flag indicates the directory where the AI models are stored on the host machine. - `-aiRunnerImage`: This optional flag specifies which version of the ai-runner image is used. Example: `livepeer/ai-runner:0.0.2` - + Moreover, the `--network host` flag facilitates communication between the AI Orchestrator and the AI Runner container. Please note that since we use [docker-out-of-docker](https://tdongsi.github.io/blog/2017/04/23/docker-out-of-docker/), the `aiModelsDir` path should be defined as being on the host machine. @@ -203,8 +203,9 @@ Once the AI Subnet Orchestrator node is up and running, validate its operation by sending an AI inference request directly to the [ai-runner](https://hub.docker.com/r/livepeer/ai-runner) container. The most straightforward way to do this is through the -[swagger UI](https://fastapi.tiangolo.com/features/) interface, accessible at -`http://localhost:8000/docs`. +[Swagger UI](https://fastapi.tiangolo.com/features/) interface, accessible at +`http://localhost:8000/docs` if you have loaded the `text-to-image` pipeline. Note +that other pipelines will have different endpoints. ![Swagger UI interface](/images/ai/swagger_ui.png)