From b73b2e02ee93249067fd2f6c8a17d97466b1b7bc Mon Sep 17 00:00:00 2001 From: Peli de Halleux Date: Fri, 2 Aug 2024 07:45:20 -0700 Subject: [PATCH] github models support (#600) * support for github models * add gh for github * missing env * typo * add gh cli --- .devcontainer/devcontainer.json | 4 +- .github/workflows/github-models.yml | 43 +++++++++++++++++ .../docs/getting-started/configuration.mdx | 47 ++++++++++++++++++- packages/core/src/connection.ts | 31 ++++++++++++ packages/core/src/constants.ts | 12 ++++- packages/core/src/models.test.ts | 7 +++ packages/vscode/src/lmaccess.ts | 6 +++ 7 files changed, 146 insertions(+), 4 deletions(-) create mode 100644 .github/workflows/github-models.yml diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 1f67c4b4dd..b40ddc9a96 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -9,7 +9,6 @@ "dbaeumer.vscode-eslint", "bierner.markdown-mermaid", "yoavbls.pretty-ts-errors", - "kejun.markdown-alert", "astro-build.astro-vscode", "unifiedjs.vscode-mdx", "streetsidesoftware.code-spell-checker", @@ -18,8 +17,9 @@ } }, "features": { + "ghcr.io/ghcr.io/devcontainers/features/github-cli:1.0.13": {}, "ghcr.io/devcontainers/features/docker-in-docker:2": {}, "ghcr.io/devcontainers/features/azure-cli:1.2.5": {}, - "ghcr.io/devcontainers/features/python:1.6.2": {} + "ghcr.io/devcontainers/features/python:1.6.3": {} } } diff --git a/.github/workflows/github-models.yml b/.github/workflows/github-models.yml new file mode 100644 index 0000000000..6fb651cf4f --- /dev/null +++ b/.github/workflows/github-models.yml @@ -0,0 +1,43 @@ +name: github models smoke tests +on: + workflow_dispatch: + pull_request: + paths: + - yarn.lock + - ".github/workflows/github-models.yml" + - "packages/core/**/*" + - "packages/cli/**/*" + - "packages/samples/**/*" + push: + branches: + - main + paths: + - yarn.lock + - ".github/workflows/github-models.yml" + - "packages/core/**/*" + - "packages/cli/**/*" + - "packages/samples/**/*" +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-github-models + cancel-in-progress: true +jobs: + tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + fetch-depth: 0 + - uses: actions/setup-node@v4 + with: + node-version: "20" + cache: yarn + - run: yarn install --frozen-lockfile + - name: typecheck + run: yarn typecheck + - name: compile + run: yarn compile + - name: run summarize github-gpt4o + run: yarn test:summarize --model github:gpt-4o --out ./temp + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/docs/src/content/docs/getting-started/configuration.mdx b/docs/src/content/docs/getting-started/configuration.mdx index 762b1d0576..22964a1fe6 100644 --- a/docs/src/content/docs/getting-started/configuration.mdx +++ b/docs/src/content/docs/getting-started/configuration.mdx @@ -140,6 +140,51 @@ GENAISCRIPT_DEFAULT_MODEL=openai:gpt-4o ::: +## GitHub Models + + +The [GitHub Models](https://github.com/marketplace/models) provider, `github`, allows running models through the GitHub Marketplace. +This provider is useful for prototyping and subject to [rate limits](https://docs.github.com/en/github-models/prototyping-with-ai-models#rate-limits) +depending on your subscription. + + + +
    + +
  1. + +Open the [GitHub Marketplace](https://github.com/marketplace/models) and find the model you want to use. + +
  2. + +
  3. + +Click **Get Started** and follow the instructions to configure the github token, or start a codespace! + +
  4. + +
  5. + +Copy the model name from the Javascript/Python samples + +```js "Phi-3-mini-4k-instruct" +const modelName = "Phi-3-mini-4k-instruct"; +``` + +to configure your script. + +```js +script({ + model: "github:Phi-3-mini-4k-instruct" +}) +``` + +
  6. + +
+ +
+ ## Azure OpenAI @@ -269,7 +314,7 @@ script({ -## GitHub Copilot Models +## GitHub Copilot in Visual Studio Code If you have access to **GitHub Copilot in Visual Studio Code**, GenAIScript will be able to leverage those [language models](https://code.visualstudio.com/api/extension-guides/language-model) as well. diff --git a/packages/core/src/connection.ts b/packages/core/src/connection.ts index 0c035e0aaa..7f1f6ecfe7 100644 --- a/packages/core/src/connection.ts +++ b/packages/core/src/connection.ts @@ -3,18 +3,21 @@ import { DEFAULT_TEMPERATURE, DOCS_CONFIGURATION_AICI_URL, DOCS_CONFIGURATION_AZURE_OPENAI_URL, + DOCS_CONFIGURATION_GITHUB_URL, DOCS_CONFIGURATION_LITELLM_URL, DOCS_CONFIGURATION_LLAMAFILE_URL, DOCS_CONFIGURATION_LOCALAI_URL, DOCS_CONFIGURATION_OLLAMA_URL, DOCS_CONFIGURATION_OPENAI_URL, DOT_ENV_FILENAME, + GITHUB_MODELS_BASE, LITELLM_API_BASE, LLAMAFILE_API_BASE, LOCALAI_API_BASE, MODEL_PROVIDER_AICI, MODEL_PROVIDER_AZURE, MODEL_PROVIDER_CLIENT, + MODEL_PROVIDER_GITHUB, MODEL_PROVIDER_LITELLM, MODEL_PROVIDER_LLAMAFILE, MODEL_PROVIDER_OLLAMA, @@ -99,6 +102,24 @@ export async function parseTokenFromEnv( } } + if (provider === MODEL_PROVIDER_GITHUB) { + const token = env.GITHUB_TOKEN + if (!token) throw new Error("GITHUB_TOKEN must be set") + const type = "openai" + const base = GITHUB_MODELS_BASE + return { + provider, + model, + base, + type, + token, + source: "env: GITHUB_TOKEN", + curlHeaders: { + Authorization: `Bearer $GITHUB_TOKEN`, + }, + } + } + if (provider === MODEL_PROVIDER_AZURE) { const tokenVar = env.AZURE_OPENAI_API_KEY ? "AZURE_OPENAI_API_KEY" @@ -298,6 +319,16 @@ OPENAI_API_TYPE="localai" model: `${MODEL_PROVIDER_OPENAI}:gpt-3.5-turbo`, } + if (provider === MODEL_PROVIDER_GITHUB) + return { + config: ` + ## GitHub Models ${DOCS_CONFIGURATION_GITHUB_URL} + # use "${MODEL_PROVIDER_GITHUB}:" in script({ model: ... }) + GITHUB_TOKEN="${PLACEHOLDER_API_KEY}" + `, + model: `${MODEL_PROVIDER_GITHUB}:gpt-4o`, + } + return { config: ` ## OpenAI ${DOCS_CONFIGURATION_OPENAI_URL} diff --git a/packages/core/src/constants.ts b/packages/core/src/constants.ts index dad1cb2d88..6b40314780 100644 --- a/packages/core/src/constants.ts +++ b/packages/core/src/constants.ts @@ -89,6 +89,7 @@ export const EMOJI_FAIL = "❌" export const EMOJI_UNDEFINED = "?" export const MODEL_PROVIDER_OPENAI = "openai" +export const MODEL_PROVIDER_GITHUB = "github" export const MODEL_PROVIDER_AZURE = "azure" export const MODEL_PROVIDER_OLLAMA = "ollama" export const MODEL_PROVIDER_LLAMAFILE = "llamafile" @@ -98,12 +99,16 @@ export const MODEL_PROVIDER_CLIENT = "client" export const TRACE_FILE_PREVIEW_MAX_LENGTH = 240 +export const GITHUB_MODELS_BASE = "https://models.inference.ai.azure.com" + export const DOCS_CONFIGURATION_URL = "https://microsoft.github.io/genaiscript/getting-started/configuration/" export const DOCS_CONFIGURATION_OPENAI_URL = "https://microsoft.github.io/genaiscript/getting-started/configuration/#openai" +export const DOCS_CONFIGURATION_GITHUB_URL = + "https://microsoft.github.io/genaiscript/getting-started/configuration/#github" export const DOCS_CONFIGURATION_AZURE_OPENAI_URL = - "https://microsoft.github.io/genaiscript/getting-started/configuration/#azure-openai" + "https://microsoft.github.io/genaiscript/getting-started/configuration/#azure" export const DOCS_CONFIGURATION_OLLAMA_URL = "https://microsoft.github.io/genaiscript/getting-started/configuration/#ollama" export const DOCS_CONFIGURATION_LLAMAFILE_URL = @@ -121,6 +126,11 @@ export const MODEL_PROVIDERS = Object.freeze([ detail: "OpenAI or compatible", url: DOCS_CONFIGURATION_OPENAI_URL, }, + { + id: MODEL_PROVIDER_GITHUB, + detail: "GitHub Models", + url: DOCS_CONFIGURATION_GITHUB_URL, + }, { id: MODEL_PROVIDER_AZURE, detail: "Azure OpenAI deployment", diff --git a/packages/core/src/models.test.ts b/packages/core/src/models.test.ts index 9edb8429f3..550f9e5da8 100644 --- a/packages/core/src/models.test.ts +++ b/packages/core/src/models.test.ts @@ -3,6 +3,7 @@ import { parseModelIdentifier } from "./models" import assert from "node:assert" import { MODEL_PROVIDER_AICI, + MODEL_PROVIDER_GITHUB, MODEL_PROVIDER_LLAMAFILE, MODEL_PROVIDER_OLLAMA, MODEL_PROVIDER_OPENAI, @@ -38,6 +39,12 @@ describe("parseModelIdentifier", () => { assert(family === "*") assert(model === "llamafile") }) + test("github:gpt4", () => { + const { provider, model, family } = parseModelIdentifier("github:gpt4") + assert(provider === MODEL_PROVIDER_GITHUB) + assert(model === "gpt4") + assert(family === "gpt4") + }) test("gpt4", () => { const { provider, model, family } = parseModelIdentifier("gpt4") assert(provider === MODEL_PROVIDER_OPENAI) diff --git a/packages/vscode/src/lmaccess.ts b/packages/vscode/src/lmaccess.ts index 7fcb3aa798..b87d1c1a31 100644 --- a/packages/vscode/src/lmaccess.ts +++ b/packages/vscode/src/lmaccess.ts @@ -9,6 +9,7 @@ import { MODEL_PROVIDER_LITELLM, MODEL_PROVIDER_OPENAI, MODEL_PROVIDER_CLIENT, + MODEL_PROVIDER_GITHUB, } from "../../core/src/constants" import { APIType } from "../../core/src/host" import { parseModelIdentifier } from "../../core/src/models" @@ -64,6 +65,11 @@ async function generateLanguageModelConfiguration( provider: MODEL_PROVIDER_AZURE, apiType: "azure", }, + { + label: "GitHub Models", + detail: `Use a GitHub Models with a GitHub subscription.`, + provider: MODEL_PROVIDER_GITHUB, + }, { label: "LocalAI", description: "https://localai.io/",