Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FEAT: Add OpenRouter model provider #35

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@
"pauseForSourceMap": false,
"outFiles": [
"${workspaceFolder}/extensions/vscode/out/extension.js",
"/Users/natesesti/.pearai/config.ts"
"${env:USERPROFILE}/.pearai/config.ts",
"${env:HOME}/.pearai/config.ts"
],
"preLaunchTask": "vscode-extension:build",
"env": {
Expand Down
32 changes: 18 additions & 14 deletions core/config/load.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import * as JSONC from "comment-json";
import * as fs from "fs";
import path from "path";
import { homedir } from "os";
import {
slashCommandFromDescription,
slashFromCustomCommand,
Expand Down Expand Up @@ -73,7 +74,7 @@ function resolveSerializedConfig(filepath: string): SerializedContinueConfig {

// Replace "pearai-server" with "pearai_server" at the beginning
// This is to make v0.0.3 backwards compatible with v0.0.2
content = content.replace(/"pearai-server"/g, '"pearai_server"');
content = content.replace(/"pearai-server"/g, "pearai_server");

const config = JSONC.parse(content) as unknown as SerializedContinueConfig;
if (config.env && Array.isArray(config.env)) {
Expand Down Expand Up @@ -330,7 +331,6 @@ async function intermediateToFinalConfig(
} else {
// Remove free trial models
models = models.filter((model) => model.providerName !== "free-trial");
console.log("Models:", models);
}

// Tab autocomplete model
Expand Down Expand Up @@ -547,8 +547,13 @@ function escapeSpacesInPath(p: string): string {
}

async function buildConfigTs() {
if (!fs.existsSync(getConfigTsPath())) {
return undefined;
const homeDir = homedir();
const configTsPath = path.join(homeDir, ".pearai", "config.ts");

if (!fs.existsSync(configTsPath)) {
console.error(`Config file does not exist: ${configTsPath}`);

return;
}

try {
Expand All @@ -557,17 +562,16 @@ async function buildConfigTs() {
`${escapeSpacesInPath(path.dirname(process.execPath))}/esbuild${
getTarget().startsWith("win32") ? ".exe" : ""
} ${escapeSpacesInPath(
getConfigTsPath(),
configTsPath,
)} --bundle --outfile=${escapeSpacesInPath(
getConfigJsPath(),
)} --platform=node --format=cjs --sourcemap --external:fetch --external:fs --external:path --external:os --external:child_process`,
);
} else {
// Dynamic import esbuild so potentially disastrous errors can be caught
const esbuild = await import("esbuild");

await esbuild.build({
entryPoints: [getConfigTsPath()],
entryPoints: [configTsPath],
bundle: true,
platform: "node",
format: "cjs",
Expand All @@ -577,15 +581,16 @@ async function buildConfigTs() {
});
}
} catch (e) {
console.log(
console.error(
`Build error. Please check your ~/.pearai/config.ts file: ${e}`,
);
return undefined;
return;
}

if (!fs.existsSync(getConfigJsPath())) {
return undefined;
return;
}

return fs.readFileSync(getConfigJsPath(), "utf8");
}
function addDefaults(config: SerializedContinueConfig): void {
Expand Down Expand Up @@ -614,7 +619,7 @@ function addDefaultModels(config: SerializedContinueConfig): void {

function addDefaultCustomCommands(config: SerializedContinueConfig): void {
const defaultCommands = defaultCustomCommands;
defaultCommands.forEach(defaultCommand => {
defaultCommands.forEach((defaultCommand) => {
if (!config.customCommands) {
config.customCommands = [];
}
Expand Down Expand Up @@ -650,7 +655,6 @@ function addDefaultSlashCommands(config: SerializedContinueConfig): void {
});
}


async function loadFullConfigNode(
ide: IDE,
workspaceConfigs: ContinueRcJson[],
Expand Down Expand Up @@ -688,7 +692,7 @@ async function loadFullConfigNode(
}
intermediate = module.modifyConfig(intermediate);
} catch (e) {
console.log("Error loading config.ts: ", e);
console.error("Error loading config.ts: ", e);
}
}

Expand All @@ -705,7 +709,7 @@ async function loadFullConfigNode(
}
intermediate = module.modifyConfig(intermediate);
} catch (e) {
console.log("Error loading remotely set config.js: ", e);
console.error("Error loading remotely set config.js: ", e);
}
}

Expand Down
1 change: 1 addition & 0 deletions core/index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -621,6 +621,7 @@ type ModelProvider =
| "openai-aiohttp"
| "msty"
| "watsonx"
| "openrouter"
| "pearai_server"
| "aider"
| "perplexity"
Expand Down
9 changes: 6 additions & 3 deletions core/llm/autodetect.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ const PROVIDER_HANDLES_TEMPLATING: ModelProvider[] = [
"bedrock",
"continue-proxy",
"mistral",
"pearai_server"
"pearai_server",
];

const PROVIDER_SUPPORTS_IMAGES: ModelProvider[] = [
Expand All @@ -57,6 +57,7 @@ const PROVIDER_SUPPORTS_IMAGES: ModelProvider[] = [
"bedrock",
"continue-proxy",
"pearai_server",
"openrouter",
];

const MODEL_SUPPORTS_IMAGES: string[] = [
Expand All @@ -79,9 +80,11 @@ function modelSupportsImages(
provider: ModelProvider,
model: string,
title: string | undefined,
capabilities: ModelCapability | undefined
capabilities: ModelCapability | undefined,
): boolean {
if (capabilities?.uploadImage !== undefined) return capabilities.uploadImage
if (capabilities?.uploadImage !== undefined) {
return capabilities.uploadImage;
}
if (!PROVIDER_SUPPORTS_IMAGES.includes(provider)) {
return false;
}
Expand Down
17 changes: 17 additions & 0 deletions core/llm/llms/OpenRouter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import { LLMOptions, ModelProvider } from "../../index.js";
import { osModelsEditPrompt } from "../templates/edit.js";
import OpenAI from "./OpenAI.js";

class OpenRouter extends OpenAI {
static providerName: ModelProvider = "openrouter";
static defaultOptions: Partial<LLMOptions> = {
apiBase: "https://openrouter.ai/api/v1/",
model: "",
promptTemplates: {
edit: osModelsEditPrompt,
},
useLegacyCompletionsEndpoint: false,
};
}

export default OpenRouter;
41 changes: 21 additions & 20 deletions core/llm/llms/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import Mistral from "./Mistral.js";
import Msty from "./Msty.js";
import Ollama from "./Ollama.js";
import OpenAI from "./OpenAI.js";
import OpenRouter from "./OpenRouter.js";
import Replicate from "./Replicate.js";
import TextGenWebUI from "./TextGenWebUI.js";
import Together from "./Together.js";
Expand All @@ -39,7 +40,6 @@ import ContinueProxy from "./stubs/ContinueProxy.js";
import PearAIServer from "./PearAIServer.js";
import Aider from "./Aider.js";


function convertToLetter(num: number): string {
let result = "";
while (num > 0) {
Expand Down Expand Up @@ -106,34 +106,35 @@ export async function renderTemplatedString(
}

const LLMs = [
Aider,
Anthropic,
Azure,
Bedrock,
Cloudflare,
Cohere,
ContinueProxy,
Deepseek,
DeepInfra,
Fireworks,
Flowise,
FreeTrial,
Gemini,
Llamafile,
Ollama,
Replicate,
TextGenWebUI,
Together,
HuggingFaceTGI,
Groq,
HuggingFaceInferenceAPI,
HuggingFaceTGI,
LlamaCpp,
OpenAI,
Llamafile,
LMStudio,
Mistral,
Bedrock,
DeepInfra,
Flowise,
Groq,
Fireworks,
ContinueProxy,
Cloudflare,
Deepseek,
Msty,
Azure,
WatsonX,
Ollama,
OpenAI,
OpenRouter,
PearAIServer,
Aider,
Replicate,
TextGenWebUI,
Together,
WatsonX,
];

export async function llmFromDescription(
Expand Down Expand Up @@ -206,4 +207,4 @@ export function llmFromProviderAndOptions(
}

return new cls(llmOptions);
}
}
6 changes: 4 additions & 2 deletions docs/static/schemas/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,8 @@
"msty",
"watsonx",
"pearai_server",
"aider"
"aider",
"openrouter"
],
"markdownEnumDescriptions": [
"### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://trypear.ai/reference/Model%20Providers/openai)",
Expand All @@ -215,7 +216,8 @@
"### Deepseek\n Deepseek's API provides the best pricing for their state-of-the-art Deepseek Coder models. To get started, obtain an API key from [their console](https://platform.deepseek.com/api_keys)",
"### Azure OpenAI\n Azure OpenAI lets you securely run OpenAI's models on Azure. To get started, follow the steps [here](https://trypear.ai/reference/Model%20Providers/azure)",
"### Msty\nMsty is the simplest way to get started with online or local LLMs on all desktop platforms - Windows, Mac, and Linux. No fussing around, one-click and you are up and running. To get started, follow these steps:\n1. Download from [Msty.app](https://msty.app/), open the application, and click 'Setup Local AI'.\n2. Go to the Local AI Module page and download a model of your choice.\n3. Once the model has finished downloading, you can start asking questions through Continue.\n> [Reference](https://pearai.dev/docs/reference/Model%20Providers/Msty)",
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks."
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks.",
"### OpenRouter\nOpenRouter offers a single API to access almost any language model. To get started, obtain an API key from [their console](https://openrouter.ai/settings/keys)."
],
"type": "string"
},
Expand Down
6 changes: 4 additions & 2 deletions extensions/intellij/src/main/resources/config_schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,8 @@
"msty",
"watsonx",
"pearai_server",
"aider"
"aider",
"openrouter"
],
"markdownEnumDescriptions": [
"### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://trypear.ai/reference/Model%20Providers/openai)",
Expand All @@ -215,7 +216,8 @@
"### Deepseek\n Deepseek's API provides the best pricing for their state-of-the-art Deepseek Coder models. To get started, obtain an API key from [their console](https://platform.deepseek.com/api_keys)",
"### Azure OpenAI\n Azure OpenAI lets you securely run OpenAI's models on Azure. To get started, follow the steps [here](https://trypear.ai/reference/Model%20Providers/azure)",
"### Msty\nMsty is the simplest way to get started with online or local LLMs on all desktop platforms - Windows, Mac, and Linux. No fussing around, one-click and you are up and running. To get started, follow these steps:\n1. Download from [Msty.app](https://msty.app/), open the application, and click 'Setup Local AI'.\n2. Go to the Local AI Module page and download a model of your choice.\n3. Once the model has finished downloading, you can start asking questions through Continue.\n> [Reference](https://pearai.dev/docs/reference/Model%20Providers/Msty)",
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks."
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks.",
"### OpenRouter\nOpenRouter offers a single API to access almost any language model. To get started, obtain an API key from [their console](https://openrouter.ai/settings/keys)."
],
"type": "string"
},
Expand Down
6 changes: 4 additions & 2 deletions extensions/vscode/config_schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,8 @@
"msty",
"watsonx",
"pearai_server",
"aider"
"aider",
"openrouter"
],
"markdownEnumDescriptions": [
"### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://trypear.ai/reference/Model%20Providers/openai)",
Expand All @@ -215,7 +216,8 @@
"### Deepseek\n Deepseek's API provides the best pricing for their state-of-the-art Deepseek Coder models. To get started, obtain an API key from [their console](https://platform.deepseek.com/api_keys)",
"### Azure OpenAI\n Azure OpenAI lets you securely run OpenAI's models on Azure. To get started, follow the steps [here](https://trypear.ai/reference/Model%20Providers/azure)",
"### Msty\nMsty is the simplest way to get started with online or local LLMs on all desktop platforms - Windows, Mac, and Linux. No fussing around, one-click and you are up and running. To get started, follow these steps:\n1. Download from [Msty.app](https://msty.app/), open the application, and click 'Setup Local AI'.\n2. Go to the Local AI Module page and download a model of your choice.\n3. Once the model has finished downloading, you can start asking questions through Continue.\n> [Reference](https://pearai.dev/docs/reference/Model%20Providers/Msty)",
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks."
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks.",
"### OpenRouter\nOpenRouter offers a single API to access almost any language model. To get started, obtain an API key from [their console](https://openrouter.ai/settings/keys)."
],
"type": "string"
},
Expand Down
8 changes: 5 additions & 3 deletions extensions/vscode/continue_rc_schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,8 @@
"msty",
"watsonx",
"pearai_server",
"aider"
"aider",
"openrouter"
],
"markdownEnumDescriptions": [
"### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://trypear.ai/reference/Model%20Providers/openai)",
Expand All @@ -218,7 +219,8 @@
"### Deepseek\n Deepseek's API provides the best pricing for their state-of-the-art Deepseek Coder models. To get started, obtain an API key from [their console](https://platform.deepseek.com/api_keys)",
"### Azure OpenAI\n Azure OpenAI lets you securely run OpenAI's models on Azure. To get started, follow the steps [here](https://trypear.ai/reference/Model%20Providers/azure)",
"### Msty\nMsty is the simplest way to get started with online or local LLMs on all desktop platforms - Windows, Mac, and Linux. No fussing around, one-click and you are up and running. To get started, follow these steps:\n1. Download from [Msty.app](https://msty.app/), open the application, and click 'Setup Local AI'.\n2. Go to the Local AI Module page and download a model of your choice.\n3. Once the model has finished downloading, you can start asking questions through Continue.\n> [Reference](https://pearai.dev/docs/reference/Model%20Providers/Msty)",
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks."
"### WatsonX\nWatsonx, developed by IBM, offers a variety of pre-trained AI foundation models that can be used for natural language processing (NLP), computer vision, and speech recognition tasks.",
"### OpenRouter\nOpenRouter offers a single API to access almost any language model. To get started, obtain an API key from [their console](https://openrouter.ai/settings/keys)."
],
"type": "string"
},
Expand Down Expand Up @@ -2779,4 +2781,4 @@
}
}
}
}
}
Loading