Skip to content

Commit

Permalink
add property source and id to respose
Browse files Browse the repository at this point in the history
  • Loading branch information
glorenzo972 committed Apr 6, 2024
1 parent d00ad17 commit 57eb94f
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 11 deletions.
3 changes: 1 addition & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ FROM python:3.10

WORKDIR /tiledesk-llm

COPY .environ /tiledesk-llm/.environ
COPY log_conf.yaml /tiledesk-llm/log_conf.yaml
COPY pyproject.toml /tiledesk-llm/pyproject.toml
COPY ./tilellm /tiledesk-llm/tilellm
Expand All @@ -19,5 +18,5 @@ EXPOSE 8000
COPY entrypoint.sh /tiledesk-llm/entrypoint.sh
RUN chmod +x /tiledesk-llm/entrypoint.sh

ENTRYPOINT ["/tiledesk-llm/entrypoint.sh", "--redis_url", "redis://redis:6379/0"]
ENTRYPOINT ["/tiledesk-llm/entrypoint.sh"]

1 change: 1 addition & 0 deletions tilellm/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ async def create_scrape_item_main(item: ItemSingle, redis_client: aioredis.clien

@app.post("/api/qa")
async def post_ask_with_memory_main(question_answer:QuestionAnswer ):
print(question_answer)
logger.debug(question_answer)
result = ask_with_memory(question_answer)
logger.debug(result)
Expand Down
22 changes: 13 additions & 9 deletions tilellm/controller/openai_controller.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import fastapi
from langchain.chains import ConversationalRetrievalChain, LLMChain # Per la conversazione va usata questa classe
from langchain_core.prompts import PromptTemplate, SystemMessagePromptTemplate
from langchain_openai import ChatOpenAI
Expand All @@ -15,7 +16,6 @@ def ask_with_memory(question_answer):

try:
logger.info(question_answer)

#question = str
#namespace: str
#gptkey: str
Expand Down Expand Up @@ -50,9 +50,10 @@ def ask_with_memory(question_answer):

retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': question_answer.top_k, 'namespace':question_answer.namespace})

mydocs = retriever.get_relevant_documents( question_answer.question)
#mydocs = retriever.get_relevant_documents( question_answer.question)
#from pprint import pprint
#pprint(mydocs)
#pprint(len(mydocs))




Expand Down Expand Up @@ -89,7 +90,7 @@ def ask_with_memory(question_answer):
crc = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=retriever,
return_source_documents=True)

result = crc.invoke({'question': question_answer.question, 'chat_history': question_answer_list})

docs = result["source_documents"]
Expand All @@ -102,6 +103,8 @@ def ask_with_memory(question_answer):

ids = list(set(ids))
sources = list(set(sources))
source = " ".join(sources)
id = ids[0]

logger.info(result)

Expand All @@ -116,17 +119,19 @@ def ask_with_memory(question_answer):

result_to_return = RetrievalResult(
answer=result['answer'],
sources=sources,
namespace=question_answer.namespace,
sources=sources,
ids=ids,
source= source,
id=id,
prompt_token_size=prompt_token_size,
success=success,
error_message = None,
chat_history_dict = chat_history_dict

)


return result_to_return.dict()
except Exception as e:
import traceback
traceback.print_exc()
Expand All @@ -143,10 +148,9 @@ def ask_with_memory(question_answer):
chat_history_dict = chat_history_dict

)
raise fastapi.exceptions.HTTPException(status_code=400, detail=result_to_return.model_dump())


#print("Prompt tokens:", openai_callback_handler.prompt_tokens)
#print("Completion tokens:", openai_callback_handler.total_cost)
return result_to_return.dict()

def add_pc_item(item):
return pinecone_add_item(item)
Expand Down
2 changes: 2 additions & 0 deletions tilellm/models/item_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,8 @@ def top_k_range(cls, v):
class RetrievalResult(BaseModel):
answer:str = Field(default="No answer")
sources: Optional[List[str]]|None =None
source:str |None= None
id:str |None= None
namespace: str
ids: Optional[List[str]]|None =None
prompt_token_size: int = Field(default=0)
Expand Down
2 changes: 2 additions & 0 deletions tilellm/shared/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,12 @@
PINECONE_API_KEY = None
PINECONE_INDEX = None
PINECONE_TEXT_KEY = None

def populate_constant():
global PINECONE_API_KEY, PINECONE_INDEX, PINECONE_TEXT_KEY
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
PINECONE_INDEX = os.environ.get("PINECONE_INDEX")
PINECONE_TEXT_KEY = os.environ.get("PINECONE_TEXT_KEY")



0 comments on commit 57eb94f

Please sign in to comment.