diff --git a/vscode/package-lock.json b/vscode/package-lock.json index 41b815fa..04d61583 100644 --- a/vscode/package-lock.json +++ b/vscode/package-lock.json @@ -8,7 +8,7 @@ "name": "fluttergpt", "version": "0.3.9", "dependencies": { - "@google/generative-ai": "^0.1.1", + "@google/generative-ai": "0.11.1", "@vscode/extension-telemetry": "^0.8.1", "axios": "^1.4.0", "dotenv": "^16.3.1", @@ -269,9 +269,9 @@ } }, "node_modules/@google/generative-ai": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.1.2.tgz", - "integrity": "sha512-54easvKJpqnsW48LcplF/8bnLKvXeYe34rfMMdgqMVZEQmLOLEKZvbif4mFmyVwoMg1/6MPLwD703RLuSJKLJw==", + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.11.1.tgz", + "integrity": "sha512-ZiUiJJbl55TXcvu73+Kf/bUhzcRTH/bsGBeYZ9ULqU0imXg3POcd+NVYM9j+TGq4MA73UYwHPmJHwmy+QZEzyQ==", "engines": { "node": ">=18.0.0" } diff --git a/vscode/package.json b/vscode/package.json index 2c534076..77fdcf8f 100644 --- a/vscode/package.json +++ b/vscode/package.json @@ -68,7 +68,7 @@ "properties": { "fluttergpt.apiKey": { "type": "string", - "markdownDescription": "Gemini API KEY(https://makersuite.google.com/)" + "markdownDescription": "CommandDash Gemini API KEY(https://makersuite.google.com/)" } } }, @@ -293,7 +293,7 @@ "typescript": "^5.0.4" }, "dependencies": { - "@google/generative-ai": "^0.1.1", + "@google/generative-ai": "0.11.1", "@vscode/extension-telemetry": "^0.8.1", "axios": "^1.4.0", "dotenv": "^16.3.1", diff --git a/vscode/src/repository/gemini-repository.ts b/vscode/src/repository/gemini-repository.ts index 0e6c791c..02e6cff7 100644 --- a/vscode/src/repository/gemini-repository.ts +++ b/vscode/src/repository/gemini-repository.ts @@ -78,21 +78,26 @@ export class GeminiRepository extends GenerationRepository { let lastMessage = prompt.pop(); // Count the tokens in the prompt - const model = this.genAI.getGenerativeModel({ model: "gemini-pro" }); + const model = this.genAI.getGenerativeModel({ model: "gemini-pro" }); //TODO: upgrade this to flash model let promptText = ""; prompt.forEach(p => promptText += p.parts); const { totalTokens } = await model.countTokens(promptText); console.log("Total input tokens: " + totalTokens); // Check if the token count exceeds the limit - if (totalTokens > 30720) { + if (totalTokens > 1040384) { throw Error('Input prompt exceeds the maximum token limit.'); } - const chat = this.genAI.getGenerativeModel({ model: "gemini-pro", generationConfig: { temperature: 0.0, topP: 0.2 } }).startChat( + const chat = this.genAI.getGenerativeModel({ model: "gemini-1.5-flash-latest", generationConfig: { temperature: 0.0, topP: 0.2 } }).startChat( { - history: prompt, generationConfig: { - maxOutputTokens: 2048, + history: prompt.map(p => { + return { + role: p.role, + parts: [{ text: p.parts }] + }; + }), generationConfig: { + maxOutputTokens: 8192, }, } );