Skip to content

Commit

Permalink
fix: lock embedding model forward pass
Browse files Browse the repository at this point in the history
Signed-off-by: Anupam Kumar <[email protected]>
  • Loading branch information
kyteinsky committed Sep 19, 2024
1 parent 5e22e17 commit a5cecae
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 3 deletions.
8 changes: 6 additions & 2 deletions context_chat_backend/chain/ingest/injest.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import re
import threading
from logging import error as log_error

from fastapi.datastructures import UploadFile
Expand All @@ -12,6 +13,8 @@
from .mimetype_list import SUPPORTED_MIMETYPES


embed_lock = threading.Lock()

def _allowed_file(file: UploadFile) -> bool:
return file.headers.get('type', default='') in SUPPORTED_MIMETYPES

Expand Down Expand Up @@ -148,8 +151,9 @@ def _process_sources(vectordb: BaseVectorDB, config: TConfig, sources: list[Uplo
if len(split_documents) == 0:
continue

user_client = vectordb.get_user_client(user_id)
doc_ids = user_client.add_documents(split_documents)
with embed_lock:
user_client = vectordb.get_user_client(user_id)
doc_ids = user_client.add_documents(split_documents)

# does not do per document error checking
success &= len(split_documents) == len(doc_ids)
Expand Down
1 change: 0 additions & 1 deletion context_chat_backend/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,6 @@ def execute_query(query: Query) -> LLMOutput:
@app.post('/query')
@enabled_guard(app)
def _(query: Query) -> LLMOutput:
global llm_lock
print('query:', query, flush=True)

if app_config['llm'][0] == 'nc_texttotext':
Expand Down

0 comments on commit a5cecae

Please sign in to comment.