diff --git a/context_chat_backend/chain/ingest/injest.py b/context_chat_backend/chain/ingest/injest.py index f3cc607..2c7e379 100644 --- a/context_chat_backend/chain/ingest/injest.py +++ b/context_chat_backend/chain/ingest/injest.py @@ -1,4 +1,5 @@ import re +import threading from logging import error as log_error from fastapi.datastructures import UploadFile @@ -12,6 +13,8 @@ from .mimetype_list import SUPPORTED_MIMETYPES +embed_lock = threading.Lock() + def _allowed_file(file: UploadFile) -> bool: return file.headers.get('type', default='') in SUPPORTED_MIMETYPES @@ -148,8 +151,9 @@ def _process_sources(vectordb: BaseVectorDB, config: TConfig, sources: list[Uplo if len(split_documents) == 0: continue - user_client = vectordb.get_user_client(user_id) - doc_ids = user_client.add_documents(split_documents) + with embed_lock: + user_client = vectordb.get_user_client(user_id) + doc_ids = user_client.add_documents(split_documents) # does not do per document error checking success &= len(split_documents) == len(doc_ids) diff --git a/context_chat_backend/controller.py b/context_chat_backend/controller.py index e93a843..28b400f 100644 --- a/context_chat_backend/controller.py +++ b/context_chat_backend/controller.py @@ -341,7 +341,6 @@ def execute_query(query: Query) -> LLMOutput: @app.post('/query') @enabled_guard(app) def _(query: Query) -> LLMOutput: - global llm_lock print('query:', query, flush=True) if app_config['llm'][0] == 'nc_texttotext':