Skip to content

Commit

Permalink
Merge pull request #275 from CommandDash/feat/gemini-1.5-flash
Browse files Browse the repository at this point in the history
Upgrade to Gemini 1.5 Flash ⚡️
  • Loading branch information
samyakkkk authored May 15, 2024
2 parents 31ebdc6 + 78ac110 commit ae24cf2
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 11 deletions.
8 changes: 4 additions & 4 deletions vscode/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions vscode/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
"properties": {
"fluttergpt.apiKey": {
"type": "string",
"markdownDescription": "Gemini API KEY(https://makersuite.google.com/)"
"markdownDescription": "CommandDash Gemini API KEY(https://makersuite.google.com/)"
}
}
},
Expand Down Expand Up @@ -293,7 +293,7 @@
"typescript": "^5.0.4"
},
"dependencies": {
"@google/generative-ai": "^0.1.1",
"@google/generative-ai": "0.11.1",
"@vscode/extension-telemetry": "^0.8.1",
"axios": "^1.4.0",
"dotenv": "^16.3.1",
Expand Down
15 changes: 10 additions & 5 deletions vscode/src/repository/gemini-repository.ts
Original file line number Diff line number Diff line change
Expand Up @@ -78,21 +78,26 @@ export class GeminiRepository extends GenerationRepository {
let lastMessage = prompt.pop();

// Count the tokens in the prompt
const model = this.genAI.getGenerativeModel({ model: "gemini-pro" });
const model = this.genAI.getGenerativeModel({ model: "gemini-pro" }); //TODO: upgrade this to flash model
let promptText = "";
prompt.forEach(p => promptText += p.parts);
const { totalTokens } = await model.countTokens(promptText);
console.log("Total input tokens: " + totalTokens);

// Check if the token count exceeds the limit
if (totalTokens > 30720) {
if (totalTokens > 1040384) {
throw Error('Input prompt exceeds the maximum token limit.');
}

const chat = this.genAI.getGenerativeModel({ model: "gemini-pro", generationConfig: { temperature: 0.0, topP: 0.2 } }).startChat(
const chat = this.genAI.getGenerativeModel({ model: "gemini-1.5-flash-latest", generationConfig: { temperature: 0.0, topP: 0.2 } }).startChat(
{
history: prompt, generationConfig: {
maxOutputTokens: 2048,
history: prompt.map(p => {
return {
role: p.role,
parts: [{ text: p.parts }]
};
}), generationConfig: {
maxOutputTokens: 8192,
},
}
);
Expand Down

0 comments on commit ae24cf2

Please sign in to comment.