From 7ece0d4d8b00bcb981713aed158814b8f5d5a28f Mon Sep 17 00:00:00 2001 From: siddharthsambharia-portkey Date: Fri, 20 Dec 2024 17:21:54 +0530 Subject: [PATCH 1/6] portkey integration v1 --- distributions/portkey/build.yaml | 17 ++ distributions/portkey/compose.yaml | 0 distributions/portkey/run.yaml | 77 +++++++ .../remote/inference/portkey/__init__.py | 16 ++ .../remote/inference/portkey/config.py | 32 +++ .../remote/inference/portkey/portkey.py | 190 ++++++++++++++++++ 6 files changed, 332 insertions(+) create mode 100644 distributions/portkey/build.yaml create mode 100644 distributions/portkey/compose.yaml create mode 100644 distributions/portkey/run.yaml create mode 100644 llama_stack/providers/remote/inference/portkey/__init__.py create mode 100644 llama_stack/providers/remote/inference/portkey/config.py create mode 100644 llama_stack/providers/remote/inference/portkey/portkey.py diff --git a/distributions/portkey/build.yaml b/distributions/portkey/build.yaml new file mode 100644 index 0000000000..d173be6ffd --- /dev/null +++ b/distributions/portkey/build.yaml @@ -0,0 +1,17 @@ +version: '2' +name: portkey +distribution_spec: + description: Use Portkey for running LLM inference + docker_image: null + providers: + inference: + - remote::portkey + safety: + - inline::llama-guard + memory: + - inline::meta-reference + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/distributions/portkey/compose.yaml b/distributions/portkey/compose.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/distributions/portkey/run.yaml b/distributions/portkey/run.yaml new file mode 100644 index 0000000000..b7ac72d6b5 --- /dev/null +++ b/distributions/portkey/run.yaml @@ -0,0 +1,77 @@ +version: '2' +image_name: portkey +docker_image: null +conda_env: portkey +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: portkey + provider_type: remote::portkey + config: + base_url: https://api.portkey.ai + api_key: ${env.PORTKEY_API_KEY} + - provider_id: sentence-transformers + provider_type: inline::sentence-transformers + config: {} + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + memory: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/portkey}/faiss_store.db + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/portkey}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + service_name: ${env.OTEL_SERVICE_NAME:llama-stack} + sinks: ${env.TELEMETRY_SINKS:console,sqlite} + sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/portkey/trace_store.db} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/portkey}/registry.db +models: +- metadata: {} + model_id: meta-llama/Llama-3.1-8B-Instruct + provider_id: portkey + provider_model_id: llama3.1-8b + model_type: llm +- metadata: {} + model_id: meta-llama/Llama-3.3-70B-Instruct + provider_id: portkey + provider_model_id: llama-3.3-70b + model_type: llm +- metadata: + embedding_dimension: 384 + model_id: all-MiniLM-L6-v2 + provider_id: sentence-transformers + provider_model_id: null + model_type: embedding +shields: +- params: null + shield_id: meta-llama/Llama-Guard-3-8B + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/providers/remote/inference/portkey/__init__.py b/llama_stack/providers/remote/inference/portkey/__init__.py new file mode 100644 index 0000000000..aaabc055af --- /dev/null +++ b/llama_stack/providers/remote/inference/portkey/__init__.py @@ -0,0 +1,16 @@ + +from .config import PortkeyImplConfig + + +async def get_adapter_impl(config: PortkeyImplConfig, _deps): + from .portkey import PortkeyInferenceAdapter + + assert isinstance( + config, PortkeyImplConfig + ), f"Unexpected config type: {type(config)}" + + impl = PortkeyInferenceAdapter(config) + + await impl.initialize() + + return impl diff --git a/llama_stack/providers/remote/inference/portkey/config.py b/llama_stack/providers/remote/inference/portkey/config.py new file mode 100644 index 0000000000..144fbf6a4f --- /dev/null +++ b/llama_stack/providers/remote/inference/portkey/config.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import os +from typing import Any, Dict, Optional + +from llama_models.schema_utils import json_schema_type +from pydantic import BaseModel, Field + +DEFAULT_BASE_URL = "https://api.portkey.ai/v1" + + +@json_schema_type +class PortkeyImplConfig(BaseModel): + base_url: str = Field( + default=os.environ.get("PORTKEY_BASE_URL", DEFAULT_BASE_URL), + description="Base URL for the Portkey API", + ) + api_key: Optional[str] = Field( + default=os.environ.get("PORTKEY_API_KEY"), + description="Portkey API Key", + ) + + @classmethod + def sample_run_config(cls, **kwargs) -> Dict[str, Any]: + return { + "base_url": DEFAULT_BASE_URL, + "api_key": "${env.PORTKEY_API_KEY}", + } diff --git a/llama_stack/providers/remote/inference/portkey/portkey.py b/llama_stack/providers/remote/inference/portkey/portkey.py new file mode 100644 index 0000000000..c8ed5c4c96 --- /dev/null +++ b/llama_stack/providers/remote/inference/portkey/portkey.py @@ -0,0 +1,190 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from typing import AsyncGenerator + +from portkey_ai import AsyncPortkey + +from llama_models.llama3.api.chat_format import ChatFormat + +from llama_models.llama3.api.tokenizer import Tokenizer + +from llama_stack.apis.inference import * # noqa: F403 + +from llama_models.datatypes import CoreModelId + +from llama_stack.providers.utils.inference.model_registry import ( + build_model_alias, + ModelRegistryHelper, +) +from llama_stack.providers.utils.inference.openai_compat import ( + get_sampling_options, + process_chat_completion_response, + process_chat_completion_stream_response, + process_completion_response, + process_completion_stream_response, +) +from llama_stack.providers.utils.inference.prompt_adapter import ( + chat_completion_request_to_prompt, + completion_request_to_prompt, +) + +from .config import PortkeyImplConfig + + +model_aliases = [ + build_model_alias( + "llama3.1-8b", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_model_alias( + "llama-3.3-70b", + CoreModelId.llama3_3_70b_instruct.value, + ), +] + + +class PortkeyInferenceAdapter(ModelRegistryHelper, Inference): + def __init__(self, config: PortkeyImplConfig) -> None: + ModelRegistryHelper.__init__( + self, + model_aliases=model_aliases, + ) + self.config = config + self.formatter = ChatFormat(Tokenizer.get_instance()) + + self.client = AsyncPortkey( + base_url=self.config.base_url, api_key=self.config.api_key + ) + + async def initialize(self) -> None: + return + + async def shutdown(self) -> None: + pass + + async def completion( + self, + model_id: str, + content: InterleavedContent, + sampling_params: Optional[SamplingParams] = SamplingParams(), + response_format: Optional[ResponseFormat] = None, + stream: Optional[bool] = False, + logprobs: Optional[LogProbConfig] = None, + ) -> AsyncGenerator: + model = await self.model_store.get_model(model_id) + request = CompletionRequest( + model=model.provider_resource_id, + content=content, + sampling_params=sampling_params, + response_format=response_format, + stream=stream, + logprobs=logprobs, + ) + if stream: + return self._stream_completion( + request, + ) + else: + return await self._nonstream_completion(request) + + async def _nonstream_completion( + self, request: CompletionRequest + ) -> CompletionResponse: + params = await self._get_params(request) + + r = await self.client.completions.create(**params) + + return process_completion_response(r, self.formatter) + + async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: + params = await self._get_params(request) + + stream = await self.client.completions.create(**params) + + async for chunk in process_completion_stream_response(stream, self.formatter): + yield chunk + + async def chat_completion( + self, + model_id: str, + messages: List[Message], + sampling_params: Optional[SamplingParams] = SamplingParams(), + tools: Optional[List[ToolDefinition]] = None, + tool_choice: Optional[ToolChoice] = ToolChoice.auto, + tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json, + response_format: Optional[ResponseFormat] = None, + stream: Optional[bool] = False, + logprobs: Optional[LogProbConfig] = None, + ) -> AsyncGenerator: + model = await self.model_store.get_model(model_id) + request = ChatCompletionRequest( + model=model.provider_resource_id, + messages=messages, + sampling_params=sampling_params, + tools=tools or [], + tool_choice=tool_choice, + tool_prompt_format=tool_prompt_format, + response_format=response_format, + stream=stream, + logprobs=logprobs, + ) + + if stream: + return self._stream_chat_completion(request) + else: + return await self._nonstream_chat_completion(request) + + async def _nonstream_chat_completion( + self, request: CompletionRequest + ) -> CompletionResponse: + params = await self._get_params(request) + + r = await self.client.completions.create(**params) + + return process_chat_completion_response(r, self.formatter) + + async def _stream_chat_completion( + self, request: CompletionRequest + ) -> AsyncGenerator: + params = await self._get_params(request) + + stream = await self.client.completions.create(**params) + + async for chunk in process_chat_completion_stream_response( + stream, self.formatter + ): + yield chunk + + async def _get_params( + self, request: Union[ChatCompletionRequest, CompletionRequest] + ) -> dict: + if request.sampling_params and request.sampling_params.top_k: + raise ValueError("`top_k` not supported by Portkey") + + prompt = "" + if isinstance(request, ChatCompletionRequest): + prompt = await chat_completion_request_to_prompt( + request, self.get_llama_model(request.model), self.formatter + ) + elif isinstance(request, CompletionRequest): + prompt = await completion_request_to_prompt(request, self.formatter) + else: + raise ValueError(f"Unknown request type {type(request)}") + + return { + "model": request.model, + "prompt": prompt, + "stream": request.stream, + **get_sampling_options(request.sampling_params), + } + + async def embeddings( + self, + model_id: str, + contents: List[InterleavedContent], + ) -> EmbeddingsResponse: + raise NotImplementedError() From 71f27f6676a1f423f712bf94e75c4fc2bc747cc2 Mon Sep 17 00:00:00 2001 From: siddharthsambharia-portkey Date: Fri, 20 Dec 2024 17:31:09 +0530 Subject: [PATCH 2/6] portkey integration v2 --- .../providers/tests/inference/fixtures.py | 16 ++++ llama_stack/templates/portkey/__init__.py | 7 ++ llama_stack/templates/portkey/build.yaml | 17 ++++ llama_stack/templates/portkey/doc_template.md | 60 +++++++++++++ llama_stack/templates/portkey/portkey.py | 89 +++++++++++++++++++ llama_stack/templates/portkey/run.yaml | 77 ++++++++++++++++ 6 files changed, 266 insertions(+) create mode 100644 llama_stack/templates/portkey/__init__.py create mode 100644 llama_stack/templates/portkey/build.yaml create mode 100644 llama_stack/templates/portkey/doc_template.md create mode 100644 llama_stack/templates/portkey/portkey.py create mode 100644 llama_stack/templates/portkey/run.yaml diff --git a/llama_stack/providers/tests/inference/fixtures.py b/llama_stack/providers/tests/inference/fixtures.py index 7cc15bd9dd..9685eea50b 100644 --- a/llama_stack/providers/tests/inference/fixtures.py +++ b/llama_stack/providers/tests/inference/fixtures.py @@ -22,6 +22,7 @@ from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig from llama_stack.providers.remote.inference.ollama import OllamaImplConfig from llama_stack.providers.remote.inference.tgi import TGIImplConfig +from llama_stack.providers.remote.inference.portkey import PortkeyImplConfig from llama_stack.providers.remote.inference.together import TogetherImplConfig from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig from llama_stack.providers.tests.resolver import construct_stack_for_test @@ -82,6 +83,21 @@ def inference_cerebras() -> ProviderFixture: ], ) +@pytest.fixture(scope="session") +def inference_cerebras() -> ProviderFixture: + return ProviderFixture( + providers=[ + Provider( + provider_id="portkey", + provider_type="remote::portkey", + config=CerebrasImplConfig( + api_key=get_env_or_fail("PORTKEY_API_KEY"), + ).model_dump(), + ) + ], + ) + + @pytest.fixture(scope="session") def inference_ollama(inference_model) -> ProviderFixture: diff --git a/llama_stack/templates/portkey/__init__.py b/llama_stack/templates/portkey/__init__.py new file mode 100644 index 0000000000..9f9929b527 --- /dev/null +++ b/llama_stack/templates/portkey/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .cerebras import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/portkey/build.yaml b/llama_stack/templates/portkey/build.yaml new file mode 100644 index 0000000000..a1fe93099f --- /dev/null +++ b/llama_stack/templates/portkey/build.yaml @@ -0,0 +1,17 @@ +version: '2' +name: cerebras +distribution_spec: + description: Use Cerebras for running LLM inference + docker_image: null + providers: + inference: + - remote::cerebras + safety: + - inline::llama-guard + memory: + - inline::meta-reference + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/portkey/doc_template.md b/llama_stack/templates/portkey/doc_template.md new file mode 100644 index 0000000000..77fc6f4787 --- /dev/null +++ b/llama_stack/templates/portkey/doc_template.md @@ -0,0 +1,60 @@ +# Cerebras Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. + +{{ providers_table }} + +{% if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + +{% if default_models %} +### Models + +The following models are available by default: + +{% for model in default_models %} +- `{{ model.model_id }} ({{ model.provider_model_id }})` +{% endfor %} +{% endif %} + + +### Prerequisite: API Keys + +Make sure you have access to a Cerebras API Key. You can get one by visiting [cloud.cerebras.ai](https://cloud.cerebras.ai/). + + +## Running Llama Stack with Cerebras + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY +``` + +### Via Conda + +```bash +llama stack build --template cerebras --image-type conda +llama stack run ./run.yaml \ + --port 5001 \ + --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY +``` diff --git a/llama_stack/templates/portkey/portkey.py b/llama_stack/templates/portkey/portkey.py new file mode 100644 index 0000000000..9acb244bdd --- /dev/null +++ b/llama_stack/templates/portkey/portkey.py @@ -0,0 +1,89 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_models.sku_list import all_registered_models + +from llama_stack.apis.models.models import ModelType + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.inline.inference.sentence_transformers import ( + SentenceTransformersInferenceConfig, +) +from llama_stack.providers.remote.inference.cerebras import CerebrasImplConfig +from llama_stack.providers.remote.inference.cerebras.cerebras import model_aliases +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::cerebras"], + "safety": ["inline::llama-guard"], + "memory": ["inline::meta-reference"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="cerebras", + provider_type="remote::cerebras", + config=CerebrasImplConfig.sample_run_config(), + ) + embedding_provider = Provider( + provider_id="sentence-transformers", + provider_type="inline::sentence-transformers", + config=SentenceTransformersInferenceConfig.sample_run_config(), + ) + + core_model_to_hf_repo = { + m.descriptor(): m.huggingface_repo for m in all_registered_models() + } + default_models = [ + ModelInput( + model_id=core_model_to_hf_repo[m.llama_model], + provider_model_id=m.provider_model_id, + provider_id="cerebras", + ) + for m in model_aliases + ] + embedding_model = ModelInput( + model_id="all-MiniLM-L6-v2", + provider_id="sentence-transformers", + model_type=ModelType.embedding, + metadata={ + "embedding_dimension": 384, + }, + ) + + return DistributionTemplate( + name="cerebras", + distro_type="self_hosted", + description="Use Cerebras for running LLM inference", + docker_image=None, + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=default_models, + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider, embedding_provider], + }, + default_models=default_models + [embedding_model], + default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "CEREBRAS_API_KEY": ( + "", + "Cerebras API Key", + ), + }, + ) diff --git a/llama_stack/templates/portkey/run.yaml b/llama_stack/templates/portkey/run.yaml new file mode 100644 index 0000000000..a4b6fd7206 --- /dev/null +++ b/llama_stack/templates/portkey/run.yaml @@ -0,0 +1,77 @@ +version: '2' +image_name: portkey +docker_image: null +conda_env: portkey +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: portkey + provider_type: remote::portkey + config: + base_url: https://api.portkey.ai/v1 + api_key: ${env.PORTKEY_API_KEY} + - provider_id: sentence-transformers + provider_type: inline::sentence-transformers + config: {} + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + memory: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/portkey}/faiss_store.db + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/portkey}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + service_name: ${env.OTEL_SERVICE_NAME:llama-stack} + sinks: ${env.TELEMETRY_SINKS:console,sqlite} + sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/portkey/trace_store.db} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/portkey}/registry.db +models: +- metadata: {} + model_id: meta-llama/Llama-3.1-8B-Instruct + provider_id: portkey + provider_model_id: llama3.1-8b + model_type: llm +- metadata: {} + model_id: meta-llama/Llama-3.3-70B-Instruct + provider_id: portkey + provider_model_id: llama-3.3-70b + model_type: llm +- metadata: + embedding_dimension: 384 + model_id: all-MiniLM-L6-v2 + provider_id: sentence-transformers + provider_model_id: null + model_type: embedding +shields: +- params: null + shield_id: meta-llama/Llama-Guard-3-8B + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] From 418e0efc5ab9c6669d4dcba39d3fb47bf2d5800c Mon Sep 17 00:00:00 2001 From: siddharthsambharia-portkey Date: Fri, 20 Dec 2024 17:55:41 +0530 Subject: [PATCH 3/6] portkey integration v3 --- .../providers/remote/inference/portkey/portkey.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/llama_stack/providers/remote/inference/portkey/portkey.py b/llama_stack/providers/remote/inference/portkey/portkey.py index c8ed5c4c96..76bea24e63 100644 --- a/llama_stack/providers/remote/inference/portkey/portkey.py +++ b/llama_stack/providers/remote/inference/portkey/portkey.py @@ -44,6 +44,9 @@ "llama-3.3-70b", CoreModelId.llama3_3_70b_instruct.value, ), + build_model_alias( + "llama" + ) ] @@ -182,9 +185,9 @@ async def _get_params( **get_sampling_options(request.sampling_params), } - async def embeddings( - self, - model_id: str, - contents: List[InterleavedContent], - ) -> EmbeddingsResponse: - raise NotImplementedError() + # async def embeddings( + # self, + # model_id: str, + # contents: List[InterleavedContent], + # ) -> EmbeddingsResponse: + # raise NotImplementedError() From 9fd50e0272c011d375e290d6f8dc200fdaaba789 Mon Sep 17 00:00:00 2001 From: siddharthsambharia-portkey Date: Fri, 20 Dec 2024 18:14:27 +0530 Subject: [PATCH 4/6] portkey integration v4 --- llama_stack/providers/remote/inference/portkey/config.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/llama_stack/providers/remote/inference/portkey/config.py b/llama_stack/providers/remote/inference/portkey/config.py index 144fbf6a4f..d2add22c9a 100644 --- a/llama_stack/providers/remote/inference/portkey/config.py +++ b/llama_stack/providers/remote/inference/portkey/config.py @@ -23,6 +23,14 @@ class PortkeyImplConfig(BaseModel): default=os.environ.get("PORTKEY_API_KEY"), description="Portkey API Key", ) + virtual_key: Optional[str] = Field( + default=os.environ.get("PORTKEY_VIRTUAL_KEY"), + description="Portkey Virtual Key", + ) + config: Optional[str] = Field( + default=os.environ.get("PORTKEY_CONFIG_ID"), + description="Portkey Config ID", + ) @classmethod def sample_run_config(cls, **kwargs) -> Dict[str, Any]: From 71de927b29f447b5d5a7eaad8641f3f624c819ec Mon Sep 17 00:00:00 2001 From: siddharthsambharia-portkey Date: Fri, 20 Dec 2024 18:55:30 +0530 Subject: [PATCH 5/6] portkey integration v5 --- distributions/dependencies.json | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/distributions/dependencies.json b/distributions/dependencies.json index 366a2a0f2a..531617e3ce 100644 --- a/distributions/dependencies.json +++ b/distributions/dependencies.json @@ -393,5 +393,33 @@ "uvicorn", "sentence-transformers --no-deps", "torch --index-url https://download.pytorch.org/whl/cpu" + ], + "portkey": [ + "aiosqlite", + "blobfile", + "portkey-ai", + "chardet", + "faiss-cpu", + "fastapi", + "fire", + "httpx", + "matplotlib", + "nltk", + "numpy", + "opentelemetry-exporter-otlp-proto-http", + "opentelemetry-sdk", + "pandas", + "pillow", + "psycopg2-binary", + "pypdf", + "redis", + "scikit-learn", + "scipy", + "sentencepiece", + "tqdm", + "transformers", + "uvicorn", + "sentence-transformers --no-deps", + "torch --index-url https://download.pytorch.org/whl/cpu" ] } From 2251643c9a73e5762c8efd3a69579ae5f4cc1a00 Mon Sep 17 00:00:00 2001 From: siddharthsambharia-portkey Date: Fri, 20 Dec 2024 19:30:17 +0530 Subject: [PATCH 6/6] portkey fix md --- llama_stack/templates/portkey/doc_template.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/llama_stack/templates/portkey/doc_template.md b/llama_stack/templates/portkey/doc_template.md index 77fc6f4787..a420c0ecb0 100644 --- a/llama_stack/templates/portkey/doc_template.md +++ b/llama_stack/templates/portkey/doc_template.md @@ -1,4 +1,4 @@ -# Cerebras Distribution +# Portkey Distribution The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. @@ -27,10 +27,10 @@ The following models are available by default: ### Prerequisite: API Keys -Make sure you have access to a Cerebras API Key. You can get one by visiting [cloud.cerebras.ai](https://cloud.cerebras.ai/). +Make sure you have access to a Portkey API Key and Virtual Key or Config ID. You can get these by visiting [app.portkey.ai](https://app.portkey.ai/). -## Running Llama Stack with Cerebras +## Running Llama Stack with Portkey You can do this via Conda (build code) or Docker which has a pre-built image. @@ -47,14 +47,19 @@ docker run \ llamastack/distribution-{{ name }} \ --yaml-config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ - --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY + --env PORTKEY_API_KEY=$PORTKEY_API_KEY + --env PORTKEY_VIRTUAL_KEY=$PORTKEY_VIRTUAL_KEY + --env PORTKEY_CONFIG_ID=$PORTKEY_CONFIG_ID + ``` ### Via Conda ```bash -llama stack build --template cerebras --image-type conda +llama stack build --template portkey --image-type conda llama stack run ./run.yaml \ --port 5001 \ - --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY + --env PORTKEY_API_KEY=$PORTKEY_API_KEY + --env PORTKEY_VIRTUAL_KEY=$PORTKEY_VIRTUAL_KEY + --env PORTKEY_CONFIG_ID=$PORTKEY_CONFIG_ID ```