Skip to content

Commit

Permalink
fix llama stack build for together & llama stack build from templates (
Browse files Browse the repository at this point in the history
…#479)

# What does this PR do?

- Fix issue w/ llama stack build using together template
<img width="669" alt="image"
src="https://github.com/user-attachments/assets/1cbef052-d902-40b9-98f8-37efb494d117">

- For builds from templates, copy over the
`templates/<template-name>/run.yaml` file to the
`~/.llama/distributions/<name>/<name>-run.yaml` instead of re-building
run config.


## Test Plan

```
$ llama stack build --template together --image-type conda
..
Build spec configuration saved at /opt/anaconda3/envs/llamastack-together/together-build.yaml
Build Successful! Next steps:
   1. Set the environment variables: LLAMASTACK_PORT, TOGETHER_API_KEY
   2. `llama stack run /Users/xiyan/.llama/distributions/llamastack-together/together-run.yaml`
```

```
$ llama stack run /Users/xiyan/.llama/distributions/llamastack-together/together-run.yaml
```

```
$ llama-stack-client models list
$ pytest -v -s -m remote agents/test_agents.py --env REMOTE_STACK_URL=http://localhost:5000 --inference-model meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
```
<img width="764" alt="image"
src="https://github.com/user-attachments/assets/b805b6c5-a316-4561-8fe3-24fc3b1f8b80">


## Sources

Please link relevant resources if necessary.


## Before submitting

- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Ran pre-commit to handle lint / formatting issues.
- [ ] Read the [contributor
guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md),
      Pull Request section?
- [ ] Updated relevant documentation.
- [ ] Wrote necessary unit or integration tests.
  • Loading branch information
yanxi0830 authored Nov 19, 2024
1 parent ea52a3e commit 6765fd7
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 4 deletions.
35 changes: 32 additions & 3 deletions llama_stack/cli/stack/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,14 @@

from llama_stack.cli.subcommand import Subcommand
from llama_stack.distribution.datatypes import * # noqa: F403
import importlib
import os
import shutil
from functools import lru_cache
from pathlib import Path

import pkg_resources

from llama_stack.distribution.distribution import get_provider_registry
from llama_stack.distribution.utils.dynamic import instantiate_class_type

Expand Down Expand Up @@ -99,7 +103,9 @@ def _run_stack_build_command(self, args: argparse.Namespace) -> None:
self.parser.error(
f"Please specify a image-type (docker | conda) for {args.template}"
)
self._run_stack_build_command_from_build_config(build_config)
self._run_stack_build_command_from_build_config(
build_config, template_name=args.template
)
return

self.parser.error(
Expand Down Expand Up @@ -248,12 +254,13 @@ def _generate_run_config(self, build_config: BuildConfig, build_dir: Path) -> No
)

def _run_stack_build_command_from_build_config(
self, build_config: BuildConfig
self, build_config: BuildConfig, template_name: Optional[str] = None
) -> None:
import json
import os

import yaml
from termcolor import cprint

from llama_stack.distribution.build import build_image
from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR
Expand All @@ -271,7 +278,29 @@ def _run_stack_build_command_from_build_config(
if return_code != 0:
return

self._generate_run_config(build_config, build_dir)
if template_name:
# copy run.yaml from template to build_dir instead of generating it again
template_path = pkg_resources.resource_filename(
"llama_stack", f"templates/{template_name}/run.yaml"
)
os.makedirs(build_dir, exist_ok=True)
run_config_file = build_dir / f"{build_config.name}-run.yaml"
shutil.copy(template_path, run_config_file)
module_name = f"llama_stack.templates.{template_name}"
module = importlib.import_module(module_name)
distribution_template = module.get_distribution_template()
cprint("Build Successful! Next steps: ", color="green")
env_vars = ", ".join(distribution_template.run_config_env_vars.keys())
cprint(
f" 1. Set the environment variables: {env_vars}",
color="green",
)
cprint(
f" 2. `llama stack run {run_config_file}`",
color="green",
)
else:
self._generate_run_config(build_config, build_dir)

def _run_template_list_cmd(self, args: argparse.Namespace) -> None:
import json
Expand Down
2 changes: 1 addition & 1 deletion llama_stack/providers/remote/inference/together/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class TogetherImplConfig(BaseModel):
)

@classmethod
def sample_run_config(cls) -> Dict[str, Any]:
def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
return {
"url": "https://api.together.xyz/v1",
"api_key": "${env.TOGETHER_API_KEY}",
Expand Down

0 comments on commit 6765fd7

Please sign in to comment.