Skip to content

Commit

Permalink
Merge branch 'main' into google-updates
Browse files Browse the repository at this point in the history
  • Loading branch information
danny-avila authored Dec 18, 2024
2 parents 6b12cd9 + 22a87b6 commit 468f220
Show file tree
Hide file tree
Showing 184 changed files with 7,099 additions and 1,136 deletions.
3 changes: 2 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ BINGAI_TOKEN=user_provided
# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken

# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
Expand All @@ -143,7 +144,7 @@ GOOGLE_KEY=user_provided
# GOOGLE_AUTH_HEADER=true

# Gemini API (AI Studio)
# GOOGLE_MODELS=gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
# GOOGLE_MODELS=gemini-2.0-flash-exp,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision

# Vertex AI
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
Expand Down
40 changes: 40 additions & 0 deletions .eslintrc.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ module.exports = {
'client/dist/**/*',
'client/public/**/*',
'e2e/playwright-report/**/*',
'packages/mcp/types/**/*',
'packages/mcp/dist/**/*',
'packages/mcp/test_bundle/**/*',
'api/demo/**/*',
'packages/data-provider/types/**/*',
'packages/data-provider/dist/**/*',
'packages/data-provider/test_bundle/**/*',
Expand Down Expand Up @@ -136,6 +140,30 @@ module.exports = {
},
],
},
{
files: './api/demo/**/*.ts',
overrides: [
{
files: '**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './packages/data-provider/tsconfig.json',
},
},
],
},
{
files: './packages/mcp/**/*.ts',
overrides: [
{
files: '**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './packages/mcp/tsconfig.json',
},
},
],
},
{
files: './config/translations/**/*.ts',
parser: '@typescript-eslint/parser',
Expand All @@ -149,6 +177,18 @@ module.exports = {
project: './packages/data-provider/tsconfig.spec.json',
},
},
{
files: ['./api/demo/specs/**/*.ts'],
parserOptions: {
project: './packages/data-provider/tsconfig.spec.json',
},
},
{
files: ['./packages/mcp/specs/**/*.ts'],
parserOptions: {
project: './packages/mcp/tsconfig.spec.json',
},
},
],
settings: {
react: {
Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/backend-review.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,11 @@ jobs:
- name: Install dependencies
run: npm ci

- name: Install Data Provider
- name: Install Data Provider Package
run: npm run build:data-provider

- name: Install MCP Package
run: npm run build:mcp

- name: Create empty auth.json file
run: |
Expand Down
3 changes: 2 additions & 1 deletion .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
"env": {
"NODE_ENV": "production"
},
"console": "integratedTerminal"
"console": "integratedTerminal",
"envFile": "${workspaceFolder}/.env"
}
]
}
12 changes: 11 additions & 1 deletion Dockerfile.multi
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ RUN npm config set fetch-retry-maxtimeout 600000 && \
npm config set fetch-retry-mintimeout 15000
COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/mcp/package*.json ./packages/mcp/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
RUN npm ci
Expand All @@ -21,6 +22,14 @@ COPY packages/data-provider ./
RUN npm run build
RUN npm prune --production

# Build mcp package
FROM base AS mcp-build
WORKDIR /app/packages/mcp
COPY packages/mcp ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
RUN npm prune --production

# Client build
FROM base AS client-build
WORKDIR /app/client
Expand All @@ -36,9 +45,10 @@ WORKDIR /app
COPY api ./api
COPY config ./config
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist
COPY --from=client-build /app/client/dist ./client/dist
WORKDIR /app/api
RUN npm prune --production
EXPOSE 3080
ENV HOST=0.0.0.0
CMD ["node", "server/index.js"]
CMD ["node", "server/index.js"]
99 changes: 66 additions & 33 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,42 +38,75 @@
</a>
</p>

# 📃 Features

- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
- 🤖 AI model selection:
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Plugins, Assistants API (including Azure Assistants)
- ✅ Compatible across both **[Remote & Local AI services](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):**
- groq, Ollama, Cohere, Mistral AI, Apple MLX, koboldcpp, OpenRouter, together.ai, Perplexity, ShuttleAI, and more
- 🪄 Generative UI with **[Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3)**
- Create React, HTML code, and Mermaid diagrams right in chat
- 💾 Create, Save, & Share Custom Presets
- 🔀 Switch between AI Endpoints and Presets, mid-chat
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
- 🌿 Fork Messages & Conversations for Advanced Context control
- 💬 Multimodal Chat:
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o` and `gpt-4o-mini`), and Gemini Vision 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
- Non-OpenAI Agents in Active Development 🚧
- 🌎 Multilingual UI:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
# ✨ Features

- 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features

- 🤖 **AI Model Selection**:
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Assistants API (incl. Azure)
- [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required
- Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):
- Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai,
- OpenRouter, Perplexity, ShuttleAI, Deepseek, Qwen, and more

- 🔧 **[Code Interpreter API](https://www.librechat.ai/docs/features/code_interpreter)**:
- Secure, Sandboxed Execution in Python, Node.js (JS/TS), Go, C/C++, Java, PHP, Rust, and Fortran
- Seamless File Handling: Upload, process, and download files directly
- No Privacy Concerns: Fully isolated and secure execution

- 🔦 **Agents & Tools Integration**:
- **[LibreChat Agents](https://www.librechat.ai/docs/features/agents)**:
- No-Code Custom Assistants: Build specialized, AI-driven helpers without coding
- Flexible & Extensible: Attach tools like DALL-E-3, file search, code execution, and more
- Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, and more
- Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions

- 🪄 **Generative UI with Code Artifacts**:
- [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat

- 💾 **Presets & Context Management**:
- Create, Save, & Share Custom Presets
- Switch between AI Endpoints and Presets mid-chat
- Edit, Resubmit, and Continue Messages with Conversation branching
- [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control

- 💬 **Multimodal & File Interactions**:
- Upload and analyze images with Claude 3, GPT-4o, o1, Llama-Vision, and Gemini 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️

- 🌎 **Multilingual UI**:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
- 🎨 Customizable Dropdown & Interface: Adapts to both power users and newcomers
- 📧 Verify your email to ensure secure access
- 🗣️ Chat hands-free with Speech-to-Text and Text-to-Speech magic
- Automatically send and play Audio

- 🎨 **Customizable Interface**:
- Customizable Dropdown & Interface that adapts to both power users and newcomers

- 📧 **Secure Access**:
- Verify your email to ensure secure access

- 🗣️ **Speech & Audio**:
- Chat hands-free with Speech-to-Text and Text-to-Speech
- Automatically send and play Audio
- Supports OpenAI, Azure OpenAI, and Elevenlabs
- 📥 Import Conversations from LibreChat, ChatGPT, Chatbot UI
- 📤 Export conversations as screenshots, markdown, text, json
- 🔍 Search all messages/conversations
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
- ⚙️ Configure Proxy, Reverse Proxy, Docker, & many Deployment options:

- 📥 **Import & Export Conversations**:
- Import Conversations from LibreChat, ChatGPT, Chatbot UI
- Export conversations as screenshots, markdown, text, json

- 🔍 **Search & Discovery**:
- Search all messages/conversations

- 👥 **Multi-User & Secure**:
- Multi-User, Secure Authentication with OAuth2 & Email Login Support
- Built-in Moderation, and Token spend tools

- ⚙️ **Configuration & Deployment**:
- Configure Proxy, Reverse Proxy, Docker, & many Deployment options
- Use completely local or deploy on the cloud
- 📖 Completely Open-Source & Built in Public
- 🧑‍🤝‍🧑 Community-driven development, support, and feedback

- 📖 **Open-Source & Community**:
- Completely Open-Source & Built in Public
- Community-driven development, support, and feedback

[For a thorough review of our features, see our docs here](https://docs.librechat.ai/) 📚

Expand Down
6 changes: 6 additions & 0 deletions api/app/clients/OpenAIClient.js
Original file line number Diff line number Diff line change
Expand Up @@ -423,6 +423,7 @@ class OpenAIClient extends BaseClient {
promptPrefix: this.options.promptPrefix,
resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
modelLabel: this.options.modelLabel,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
spec: this.options.spec,
Expand Down Expand Up @@ -1324,6 +1325,11 @@ ${convo}
/** @type {(value: void | PromiseLike<void>) => void} */
let streamResolve;

if (this.isO1Model === true && this.azure && modelOptions.stream) {
delete modelOptions.stream;
delete modelOptions.stop;
}

if (modelOptions.stream) {
streamPromise = new Promise((resolve) => {
streamResolve = resolve;
Expand Down
1 change: 1 addition & 0 deletions api/app/clients/PluginsClient.js
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ class PluginsClient extends OpenAIClient {
return {
artifacts: this.options.artifacts,
chatGptLabel: this.options.chatGptLabel,
modelLabel: this.options.modelLabel,
promptPrefix: this.options.promptPrefix,
tools: this.options.tools,
...this.modelOptions,
Expand Down
1 change: 1 addition & 0 deletions api/app/clients/specs/OpenAIClient.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -412,6 +412,7 @@ describe('OpenAIClient', () => {
it('should return the correct save options', () => {
const options = client.getSaveOptions();
expect(options).toHaveProperty('chatGptLabel');
expect(options).toHaveProperty('modelLabel');
expect(options).toHaveProperty('promptPrefix');
});
});
Expand Down
46 changes: 28 additions & 18 deletions api/app/clients/tools/util/fileSearch.js
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,10 @@ const primeFiles = async (options) => {
* @param {Object} options
* @param {ServerRequest} options.req
* @param {Array<{ file_id: string; filename: string }>} options.files
* @param {string} [options.entity_id]
* @returns
*/
const createFileSearchTool = async ({ req, files }) => {
const createFileSearchTool = async ({ req, files, entity_id }) => {
return tool(
async ({ query }) => {
if (files.length === 0) {
Expand All @@ -62,27 +63,36 @@ const createFileSearchTool = async ({ req, files }) => {
if (!jwtToken) {
return 'There was an error authenticating the file search request.';
}

/**
*
* @param {import('librechat-data-provider').TFile} file
* @returns {{ file_id: string, query: string, k: number, entity_id?: string }}
*/
const createQueryBody = (file) => {
const body = {
file_id: file.file_id,
query,
k: 5,
};
if (!entity_id) {
return body;
}
body.entity_id = entity_id;
logger.debug(`[${Tools.file_search}] RAG API /query body`, body);
return body;
};

const queryPromises = files.map((file) =>
axios
.post(
`${process.env.RAG_API_URL}/query`,
{
file_id: file.file_id,
query,
k: 5,
},
{
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
},
.post(`${process.env.RAG_API_URL}/query`, createQueryBody(file), {
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
},
)
})
.catch((error) => {
logger.error(
`Error encountered in \`file_search\` while querying file_id ${file._id}:`,
error,
);
logger.error('Error encountered in `file_search` while querying file:', error);
return null;
}),
);
Expand Down
Loading

0 comments on commit 468f220

Please sign in to comment.