Skip to content

Commit

Permalink
Merge pull request #758 from airtai/dev
Browse files Browse the repository at this point in the history
Update Packages (#756)
  • Loading branch information
harishmohanraj authored Oct 24, 2024
2 parents 1a4401c + 52b2615 commit 319611f
Show file tree
Hide file tree
Showing 13 changed files with 91 additions and 86 deletions.
2 changes: 2 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -49,5 +49,7 @@ ENV PATH="${PATH}:/root/.local/bin:${FLYCTL_INSTALL}/bin"

EXPOSE ${PORT}

# nosemgrep
ENTRYPOINT []
# nosemgrep
CMD [ "/usr/bin/bash", "-c", "./run-server.sh" ]
2 changes: 2 additions & 0 deletions auth_callout/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,7 @@ RUN npm install -g pnpm && ls -lah
RUN pnpm install --force

# Define the command to run your app using pnpm serve
# nosemgrep
ENTRYPOINT []
# nosemgrep
CMD ["/bin/sh", "-c", "./run-auth-callout.sh"]
4 changes: 2 additions & 2 deletions auth_callout/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
],
"devDependencies": {
"tsx": "^4.19.1",
"typescript": "^5.6.2",
"vite": "^5.4.5",
"typescript": "^5.6.3",
"vite": "^5.4.9",
"vite-plugin-checker": "^0.8.0"
},
"scripts": {
Expand Down
6 changes: 3 additions & 3 deletions auth_callout/packages/auth-service/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@
"type": "module",
"main": "./lib/index.js",
"devDependencies": {
"@types/node": "^22.5.5"
"@types/node": "^22.7.7"
},
"dependencies": {
"@prisma/client": "^5.19.0",
"@prisma/client": "^5.21.1",
"nats": "^2.28.2",
"nats-jwt": "^0.0.9",
"nkeys.js": "^1.1.0",
"prisma": "^5.19.0"
"prisma": "^5.21.1"
},
"scripts": {
"serve": "tsx src/service.ts"
Expand Down
110 changes: 55 additions & 55 deletions auth_callout/pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion fastagency_studio/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,9 @@ async def get_models_schemas() -> Schemas:

async def validate_toolbox(toolbox: Toolbox) -> None:
try:
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(
timeout=30.0
) as client: # Set timeout to 30 seconds
resp = await client.get(toolbox.openapi_url) # type: ignore[arg-type]
except Exception as e:
raise HTTPException(status_code=422, detail="OpenAPI URL is invalid") from e
Expand Down
2 changes: 1 addition & 1 deletion fastagency_studio/models/agents/web_surfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from asyncer import syncify
from autogen.agentchat import AssistantAgent as AutoGenAssistantAgent
from autogen.agentchat import ConversableAgent as AutoGenConversableAgent
from fastagency.runtime.autogen.tools import WebSurferTool
from fastagency.runtimes.autogen.tools import WebSurferTool
from typing_extensions import TypeAlias

from ..base import Field, Model
Expand Down
35 changes: 17 additions & 18 deletions fastagency_studio/models/llms/together.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,49 +12,47 @@
"TogetherAI",
]

# retrieve the models from the API on Sep 27, 2024
# retrieve the models from the API on Oct 24, 2024
together_model_string = {
"Code Llama Instruct (34B)": "togethercomputer/CodeLlama-34b-Instruct",
"Upstage SOLAR Instruct v1 (11B)": "upstage/SOLAR-10.7B-Instruct-v1.0",
"Nous Hermes-2 Yi (34B)": "NousResearch/Nous-Hermes-2-Yi-34B",
"Llama3 8B Chat HF INT4": "togethercomputer/Llama-3-8b-chat-hf-int4",
"Gemma Instruct (2B)": "google/gemma-2b-it",
"MythoMax-L2 (13B)": "Gryphe/MythoMax-L2-13b",
"Mistral (7B) Instruct": "mistralai/Mistral-7B-Instruct-v0.1",
"Mistral (7B) Instruct v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
"Meta Llama Guard 3 11B Vision Turbo": "meta-llama/Llama-Guard-3-11B-Vision-Turbo",
"Meta Llama Vision Free": "meta-llama/Llama-Vision-Free",
"Qwen 1.5 Chat (72B)": "Qwen/Qwen1.5-72B-Chat",
"DeepSeek LLM Chat (67B)": "deepseek-ai/deepseek-llm-67b-chat",
"Qwen 2 Instruct (72B)": "Qwen/Qwen2-72B-Instruct",
"Meta Llama 3.2 11B Vision Instruct Turbo": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
"Meta Llama 3.2 3B Instruct Turbo": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
"Togethercomputer Llama3 8B Instruct Int8": "togethercomputer/Llama-3-8b-chat-hf-int8",
"Mistral (7B) Instruct v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
"Qwen 1.5 Chat (110B)": "Qwen/Qwen1.5-110B-Chat",
"LLaMA-2 Chat (13B)": "togethercomputer/llama-2-13b-chat",
"Gemma-2 Instruct (27B)": "google/gemma-2-27b-it",
"Meta Llama 3 70B Instruct Turbo": "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
"Meta Llama 3 70B Instruct Lite": "meta-llama/Meta-Llama-3-70B-Instruct-Lite",
"Gemma-2 Instruct (9B)": "google/gemma-2-9b-it",
"Meta Llama 3.2 90B Vision Instruct Turbo": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
"Gemma-2 Instruct (27B)": "google/gemma-2-27b-it",
"Meta Llama 3 8B Instruct Reference": "meta-llama/Llama-3-8b-chat-hf",
"Meta Llama 3.1 70B Instruct Turbo": "albert/meta-llama-3-1-70b-instruct-turbo",
"WizardLM-2 (8x22B)": "microsoft/WizardLM-2-8x22B",
"Mixtral-8x7B Instruct v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"Meta Llama 3.1 405B Instruct Turbo": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
"Meta Llama 3 70B Instruct Reference": "meta-llama/Llama-3-70b-chat-hf",
"WizardLM-2 (8x22B)": "microsoft/WizardLM-2-8x22B",
"Meta Llama 3.2 11B Vision Instruct Turbo": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
"LLaVa-Next (Mistral-7B)": "llava-hf/llava-v1.6-mistral-7b-hf",
"Meta Llama 3.1 405B Instruct Turbo": "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro",
"DBRX Instruct": "databricks/dbrx-instruct",
"Nous Hermes 2 - Mixtral 8x7B-DPO ": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Meta Llama 3 70B Instruct Reference": "meta-llama/Llama-3-70b-chat-hf",
"Qwen2.5 7B Instruct Turbo": "Qwen/Qwen2.5-7B-Instruct-Turbo",
"Meta Llama 3.2 3B Instruct Turbo": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
"Qwen2.5 72B Instruct Turbo": "Qwen/Qwen2.5-72B-Instruct-Turbo",
"Meta Llama 3 8B Instruct Turbo": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
"Meta Llama 3 8B Instruct Lite": "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
"Meta Llama 3.1 8B Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Reference",
"Meta Llama 3.1 8B Instruct Turbo": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
"Mixtral-8x22B Instruct v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
"Gryphe MythoMax L2 Lite (13B)": "Gryphe/MythoMax-L2-13b-Lite",
"Meta Llama 3.2 90B Vision Instruct Turbo": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
"Hermes 3 - Llama-3.1 405B": "NousResearch/Hermes-3-Llama-3.1-405B-Turbo",
"Qwen 1.5 Chat (110B)": "Qwen/Qwen1.5-110B-Chat",
"Meta Llama 3.1 70B Instruct Turbo": "albert/meta-llama-3-1-70b-instruct-turbo",
"LLaMA-2 Chat (7B)": "togethercomputer/llama-2-7b-chat",
"Qwen 1.5 Chat (72B)": "Qwen/Qwen1.5-72B-Chat",
"Meta Llama Vision Free": "meta-llama/Llama-Vision-Free",
"Gemma-2 Instruct (9B)": "google/gemma-2-9b-it",
"Mixtral-8x22B Instruct v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
"WizardLM v1.2 (13B)": "WizardLM/WizardLM-13B-V1.2",
"Koala (7B)": "togethercomputer/Koala-7B",
"Qwen 2 Instruct (1.5B)": "Qwen/Qwen2-1.5B-Instruct",
Expand Down Expand Up @@ -108,6 +106,7 @@
"OpenHermes-2.5-Mistral (7B)": "teknium/OpenHermes-2p5-Mistral-7B",
"Qwen 1.5 Chat (4B)": "Qwen/Qwen1.5-4B-Chat",
"carson ml318br": "carson/ml318br",
"Meta Llama 3.1 8B Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Reference",
"Llama-3 70B Instruct Gradient 1048K": "gradientai/Llama-3-70B-Instruct-Gradient-1048k",
"Meta Llama 3.1 70B Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
}
Expand Down
2 changes: 1 addition & 1 deletion fastagency_studio/models/toolboxes/toolbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ async def create_autogen(
my_model = await cls.from_db(model_id)

# Download OpenAPI spec
with httpx.Client() as httpx_client:
with httpx.Client(timeout=30.0) as httpx_client: # Set timeout to 30 seconds
response = httpx_client.get(my_model.openapi_url) # type: ignore[arg-type]
response.raise_for_status()
openapi_spec = response.text
Expand Down
2 changes: 1 addition & 1 deletion fastagency_studio/saas_app_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
def _make_request(
url: str, headers: dict[str, str]
) -> Union[dict[str, Any], list[dict[str, Any]]]:
with httpx.Client() as httpx_client:
with httpx.Client(timeout=30.0) as httpx_client: # Set timeout to 30 seconds
response = httpx_client.get(url, headers=headers) # type: ignore[arg-type]
response.raise_for_status()
ret_val = response.json()
Expand Down
Loading

0 comments on commit 319611f

Please sign in to comment.