From adade60c6a7e08d7d67fda6cfcc42e9665971044 Mon Sep 17 00:00:00 2001 From: potthoffjan Date: Tue, 16 Jul 2024 18:48:19 +0200 Subject: [PATCH] lint Signed-off-by: potthoffjan --- .../combined_chain.py | 21 ++++++++++++------- ...d_chain copy.py => combined_chain_copy.py} | 11 +++++----- 2 files changed, 18 insertions(+), 14 deletions(-) rename src/backend/RAG/LangChain_Implementation/{combined_chain copy.py => combined_chain_copy.py} (98%) diff --git a/src/backend/RAG/LangChain_Implementation/combined_chain.py b/src/backend/RAG/LangChain_Implementation/combined_chain.py index 1d68b85..12f03cb 100644 --- a/src/backend/RAG/LangChain_Implementation/combined_chain.py +++ b/src/backend/RAG/LangChain_Implementation/combined_chain.py @@ -91,13 +91,13 @@ def hist_aware_answers(llm_list, input_string, message_history): which might reference context in the chat history, formulate a standalone question \ which can be understood without the chat history. Do NOT answer the question, \ just reformulate it if needed and otherwise return it as is.""" - + # add in custom user info: ----------------------------- # custom_istructions = get_custom_instructions_callable() # user_info = " " # if custom_istructions: - # user_info = f"""Here is some information about the user, including the user's name, - # their profile description and style instructions on how they want you to answer stylewise: + # user_info = f"""Here is some information about the user, including the user's name, + # their profile description and style instructions on how they want you to answer stylewise: # User Name: {custom_istructions['name']} # Style Instrctions: {custom_istructions['styleInstructions']} # Personal Info: {custom_istructions['personalInstructions']} @@ -111,14 +111,13 @@ def hist_aware_answers(llm_list, input_string, message_history): to answer accurately. write your response in markdown form and also add reference url so user can know from which source you are answering the questions. """ - - context_str =""" + + context_str = """ CONTEXT: {context} """ - # health_ai_template = f'{init_prompt}{agent_str}{user_info}{context_str}' health_ai_template = f'{init_prompt}{agent_str}{context_str}' @@ -150,10 +149,16 @@ def hist_aware_answers(llm_list, input_string, message_history): ('human', '{input}'), ] ) - history_aware_retriever = create_history_aware_retriever(llm, retriever, contextualize_q_prompt) + history_aware_retriever = create_history_aware_retriever( + llm, retriever, contextualize_q_prompt + ) qa_prompt = ChatPromptTemplate.from_messages( - [('system', health_ai_template), MessagesPlaceholder('chat_history'), ('human', '{input}')] + [ + ('system', health_ai_template), + MessagesPlaceholder('chat_history'), + ('human', '{input}'), + ] ) question_answer_chain = create_stuff_documents_chain(llm, qa_prompt) diff --git a/src/backend/RAG/LangChain_Implementation/combined_chain copy.py b/src/backend/RAG/LangChain_Implementation/combined_chain_copy.py similarity index 98% rename from src/backend/RAG/LangChain_Implementation/combined_chain copy.py rename to src/backend/RAG/LangChain_Implementation/combined_chain_copy.py index 1a0ceba..4c49910 100644 --- a/src/backend/RAG/LangChain_Implementation/combined_chain copy.py +++ b/src/backend/RAG/LangChain_Implementation/combined_chain_copy.py @@ -139,13 +139,13 @@ def hist_aware_answers(llm_list, input_string, message_history): which might reference context in the chat history, formulate a standalone question \ which can be understood without the chat history. Do NOT answer the question, \ just reformulate it if needed and otherwise return it as is.""" - + # add in custom user info: ----------------------------- # custom_istructions = get_custom_instructions_callable() # user_info = " " # if custom_istructions: - # user_info = f"""Here is some information about the user, including the user's name, - # their profile description and style instructions on how they want you to answer stylewise: + # user_info = f"""Here is some information about the user, including the user's name, + # their profile description and style instructions on how they want you to answer stylewise: # User Name: {custom_istructions['name']} # Style Instrctions: {custom_istructions['styleInstructions']} # Personal Info: {custom_istructions['personalInstructions']} @@ -159,14 +159,13 @@ def hist_aware_answers(llm_list, input_string, message_history): to answer accurately. write your response in markdown form and also add reference url so user can know from which source you are answering the questions. """ - - context_str =""" + + context_str = """ CONTEXT: {context} """ - health_ai_template = f'{init_prompt}{agent_str}{user_info}{context_str}' # Parallel processing