Skip to content

Commit

Permalink
feat: update dependencies and LLM configuration
Browse files Browse the repository at this point in the history
Upgrade @langchain/core to version ^0.3.19 to ensure access
to latest features and fixes. Remove maxRetries property for
simplified error handling in LLM function. Fix type definition
for getLLM function to return BaseChatModel type.
  • Loading branch information
A-F-V authored and gitbutler-client committed Dec 3, 2024
1 parent 647107f commit 72830d3
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 8 deletions.
7 changes: 3 additions & 4 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
},
"dependencies": {
"@langchain/anthropic": "^0.3.8",
"@langchain/core": "^0.3.19",
"@langchain/google-genai": "^0.1.4",
"@langchain/openai": "^0.3.14",
"@reduxjs/toolkit": "^1.9.5",
Expand Down
6 changes: 2 additions & 4 deletions src/include/ai/LLM.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { ChatAnthropic } from '@langchain/anthropic';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { AgentSettings, AvailableModels } from '../ArcanaSettings';
import { TokenTextSplitter } from 'langchain/text_splitter';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';

type Provider = 'openai' | 'anthropic' | 'gemini';

Expand Down Expand Up @@ -32,7 +33,7 @@ function getAPIKeyForProvider(settings: AgentSettings, provider: Provider): stri
return null;
}

export function getLLM(settings: AgentSettings, streaming = true) {
export function getLLM(settings: AgentSettings, streaming = true): BaseChatModel {
const model = settings.MODEL_TYPE;
const temperature = settings.TEMPERATURE;
const topP = settings.TOP_P;
Expand All @@ -51,7 +52,6 @@ export function getLLM(settings: AgentSettings, streaming = true) {
temperature: temperature,
topP: topP,
streaming: streaming,
maxRetries: 0,
});
case 'openai':
return new ChatOpenAI({
Expand All @@ -60,7 +60,6 @@ export function getLLM(settings: AgentSettings, streaming = true) {
temperature: temperature,
topP: topP,
streaming: streaming,
maxRetries: 0,
});
case 'gemini':
return new ChatGoogleGenerativeAI({
Expand All @@ -69,7 +68,6 @@ export function getLLM(settings: AgentSettings, streaming = true) {
temperature: temperature,
topP: topP,
streaming: streaming,
maxRetries: 0,
});
}
}
Expand Down

0 comments on commit 72830d3

Please sign in to comment.