diff --git a/package-lock.json b/package-lock.json index 684f0dd..19ff56c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,15 +1,16 @@ { "name": "arcana", - "version": "1.6.13", + "version": "1.6.16", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "arcana", - "version": "1.6.13", + "version": "1.6.16", "license": "MIT", "dependencies": { "@langchain/anthropic": "^0.3.8", + "@langchain/core": "^0.3.19", "@langchain/google-genai": "^0.1.4", "@langchain/openai": "^0.3.14", "@reduxjs/toolkit": "^1.9.5", @@ -1869,7 +1870,6 @@ "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.3.19.tgz", "integrity": "sha512-pJVOAHShefu1SRO8uhzUs0Pexah/Ib66WETLMScIC2w9vXlpwQy3DzXJPJ5X7ixry9N666jYO5cHtM2Z1DnQIQ==", "license": "MIT", - "peer": true, "dependencies": { "ansi-styles": "^5.0.0", "camelcase": "6", @@ -1892,7 +1892,6 @@ "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.2.8.tgz", "integrity": "sha512-wKVNZoYtd8EqQWUEsfDZlZ77rH7vVqgNtONXRwynUp7ZFMFUIPhSlqE9pXqrmYPE8ZTBFj7diag2lFgUuaOEKw==", "license": "MIT", - "peer": true, "dependencies": { "@types/uuid": "^10.0.0", "commander": "^10.0.1", diff --git a/package.json b/package.json index bb33831..34715ab 100644 --- a/package.json +++ b/package.json @@ -39,6 +39,7 @@ }, "dependencies": { "@langchain/anthropic": "^0.3.8", + "@langchain/core": "^0.3.19", "@langchain/google-genai": "^0.1.4", "@langchain/openai": "^0.3.14", "@reduxjs/toolkit": "^1.9.5", diff --git a/src/include/ai/LLM.ts b/src/include/ai/LLM.ts index 0eedeb2..359828e 100644 --- a/src/include/ai/LLM.ts +++ b/src/include/ai/LLM.ts @@ -3,6 +3,7 @@ import { ChatAnthropic } from '@langchain/anthropic'; import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; import { AgentSettings, AvailableModels } from '../ArcanaSettings'; import { TokenTextSplitter } from 'langchain/text_splitter'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; type Provider = 'openai' | 'anthropic' | 'gemini'; @@ -32,7 +33,7 @@ function getAPIKeyForProvider(settings: AgentSettings, provider: Provider): stri return null; } -export function getLLM(settings: AgentSettings, streaming = true) { +export function getLLM(settings: AgentSettings, streaming = true): BaseChatModel { const model = settings.MODEL_TYPE; const temperature = settings.TEMPERATURE; const topP = settings.TOP_P; @@ -51,7 +52,6 @@ export function getLLM(settings: AgentSettings, streaming = true) { temperature: temperature, topP: topP, streaming: streaming, - maxRetries: 0, }); case 'openai': return new ChatOpenAI({ @@ -60,7 +60,6 @@ export function getLLM(settings: AgentSettings, streaming = true) { temperature: temperature, topP: topP, streaming: streaming, - maxRetries: 0, }); case 'gemini': return new ChatGoogleGenerativeAI({ @@ -69,7 +68,6 @@ export function getLLM(settings: AgentSettings, streaming = true) { temperature: temperature, topP: topP, streaming: streaming, - maxRetries: 0, }); } }