Skip to content

Commit

Permalink
add support for mistral api (#610)
Browse files Browse the repository at this point in the history
* add support for mistral api

* update docs to show support for Mistral

* add default temp to all providers, suggest different results per provider

---------

Co-authored-by: timothycarambat <[email protected]>
  • Loading branch information
shatfield4 and timothycarambat authored Jan 17, 2024
1 parent 90df375 commit c2c8fe9
Show file tree
Hide file tree
Showing 25 changed files with 412 additions and 22 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ Some cool features of AnythingLLM
- [LM Studio (all models)](https://lmstudio.ai)
- [LocalAi (all models)](https://localai.io/)
- [Together AI (chat models)](https://www.together.ai/)
- [Mistral](https://mistral.ai/)

**Supported Embedding models:**

Expand Down
4 changes: 4 additions & 0 deletions docker/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ GID='1000'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'

# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'

###########################################
######## Embedding API SElECTION ##########
###########################################
Expand Down
103 changes: 103 additions & 0 deletions frontend/src/components/LLMSelection/MistralOptions/index.jsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import { useState, useEffect } from "react";
import System from "@/models/system";

export default function MistralOptions({ settings }) {
const [inputValue, setInputValue] = useState(settings?.MistralApiKey);
const [mistralKey, setMistralKey] = useState(settings?.MistralApiKey);

return (
<div className="flex gap-x-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Mistral API Key
</label>
<input
type="password"
name="MistralApiKey"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Mistral API Key"
defaultValue={settings?.MistralApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
onChange={(e) => setInputValue(e.target.value)}
onBlur={() => setMistralKey(inputValue)}
/>
</div>
<MistralModelSelection settings={settings} apiKey={mistralKey} />
</div>
);
}

function MistralModelSelection({ apiKey, settings }) {
const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true);

useEffect(() => {
async function findCustomModels() {
if (!apiKey) {
setCustomModels([]);
setLoading(false);
return;
}
setLoading(true);
const { models } = await System.customModels(
"mistral",
typeof apiKey === "boolean" ? null : apiKey
);
setCustomModels(models || []);
setLoading(false);
}
findCustomModels();
}, [apiKey]);

if (loading || customModels.length == 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="MistralModelPref"
disabled={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
{!!apiKey
? "-- loading available models --"
: "-- waiting for API key --"}
</option>
</select>
</div>
);
}

return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="MistralModelPref"
required={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{customModels.length > 0 && (
<optgroup label="Available Mistral Models">
{customModels.map((model) => {
return (
<option
key={model.id}
value={model.id}
selected={settings?.MistralModelPref === model.id}
>
{model.id}
</option>
);
})}
</optgroup>
)}
</select>
</div>
);
}
18 changes: 14 additions & 4 deletions frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,21 @@ function castToType(key, value) {
return definitions[key].cast(value);
}

function recommendedSettings(provider = null) {
switch (provider) {
case "mistral":
return { temp: 0 };
default:
return { temp: 0.7 };
}
}

export default function WorkspaceSettings({ active, workspace, settings }) {
const { slug } = useParams();
const formEl = useRef(null);
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
const defaults = recommendedSettings(settings?.LLMProvider);

const handleUpdate = async (e) => {
setSaving(true);
Expand Down Expand Up @@ -143,20 +153,20 @@ export default function WorkspaceSettings({ active, workspace, settings }) {
This setting controls how "random" or dynamic your chat
responses will be.
<br />
The higher the number (2.0 maximum) the more random and
The higher the number (1.0 maximum) the more random and
incoherent.
<br />
<i>Recommended: 0.7</i>
<i>Recommended: {defaults.temp}</i>
</p>
</div>
<input
name="openAiTemp"
type="number"
min={0.0}
max={2.0}
max={1.0}
step={0.1}
onWheel={(e) => e.target.blur()}
defaultValue={workspace?.openAiTemp ?? 0.7}
defaultValue={workspace?.openAiTemp ?? defaults.temp}
className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
placeholder="0.7"
required={true}
Expand Down
Binary file added frontend/src/media/llmprovider/mistral.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
11 changes: 10 additions & 1 deletion frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
Expand All @@ -21,9 +22,10 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { MagnifyingGlass } from "@phosphor-icons/react";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";

export default function GeneralLLMPreference() {
const [saving, setSaving] = useState(false);
Expand Down Expand Up @@ -134,6 +136,13 @@ export default function GeneralLLMPreference() {
options: <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
},
{
name: "Native",
value: "native",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import ChromaLogo from "@/media/vectordbs/chroma.png";
import PineconeLogo from "@/media/vectordbs/pinecone.png";
import LanceDbLogo from "@/media/vectordbs/lancedb.png";
Expand Down Expand Up @@ -91,6 +92,13 @@ const LLM_SELECTION_PRIVACY = {
],
logo: TogetherAILogo,
},
mistral: {
name: "Mistral",
description: [
"Your prompts and document text used in response creation are visible to Mistral",
],
logo: MistralLogo,
},
};

const VECTOR_DB_PRIVACY = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
Expand All @@ -17,6 +18,7 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
import paths from "@/utils/paths";
Expand Down Expand Up @@ -109,6 +111,13 @@ export default function LLMPreference({
options: <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
},
{
name: "Native",
value: "native",
Expand Down
4 changes: 4 additions & 0 deletions server/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'

# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'

###########################################
######## Embedding API SElECTION ##########
###########################################
Expand Down
12 changes: 12 additions & 0 deletions server/models/systemSettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,18 @@ const SystemSettings = {
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
...(llmProvider === "mistral"
? {
MistralApiKey: !!process.env.MISTRAL_API_KEY,
MistralModelPref: process.env.MISTRAL_MODEL_PREF,

// For embedding credentials when mistral is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
...(llmProvider === "native"
? {
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
Expand Down
1 change: 1 addition & 0 deletions server/utils/AiProviders/anthropic/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ class AnthropicLLM {
);
this.embedder = embedder;
this.answerKey = v4().split("-")[0];
this.defaultTemp = 0.7;
}

streamingEnabled() {
Expand Down
5 changes: 3 additions & 2 deletions server/utils/AiProviders/azureOpenAi/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class AzureOpenAiLLM {
"No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!"
);
this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder;
this.defaultTemp = 0.7;
}

#appendContext(contextTexts = []) {
Expand Down Expand Up @@ -93,7 +94,7 @@ class AzureOpenAiLLM {
);
const textResponse = await this.openai
.getChatCompletions(this.model, messages, {
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
})
.then((res) => {
Expand Down Expand Up @@ -130,7 +131,7 @@ class AzureOpenAiLLM {
this.model,
messages,
{
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
}
);
Expand Down
1 change: 1 addition & 0 deletions server/utils/AiProviders/gemini/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ class GeminiLLM {
"INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; // not used for Gemini
}

#appendContext(contextTexts = []) {
Expand Down
5 changes: 3 additions & 2 deletions server/utils/AiProviders/lmStudio/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class LMStudioLLM {
"INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7;
}

#appendContext(contextTexts = []) {
Expand Down Expand Up @@ -85,7 +86,7 @@ class LMStudioLLM {
const textResponse = await this.lmstudio
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
Expand Down Expand Up @@ -122,7 +123,7 @@ class LMStudioLLM {
const streamRequest = await this.lmstudio.createChatCompletion(
{
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
stream: true,
messages: await this.compressMessages(
Expand Down
5 changes: 3 additions & 2 deletions server/utils/AiProviders/localAi/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class LocalAiLLM {
"INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7;
}

#appendContext(contextTexts = []) {
Expand Down Expand Up @@ -85,7 +86,7 @@ class LocalAiLLM {
const textResponse = await this.openai
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
Expand Down Expand Up @@ -123,7 +124,7 @@ class LocalAiLLM {
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
Expand Down
Loading

0 comments on commit c2c8fe9

Please sign in to comment.