This repository has been archived by the owner on Oct 23, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
langchain_example.py
82 lines (67 loc) · 2.99 KB
/
langchain_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Python script to test out Langchain via the quickstart tutorial: https://python.langchain.com/v0.1/docs/get_started/quickstart/
from dotenv import load_dotenv
load_dotenv()
# LLM
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
# RAG
from langchain_community.document_loaders import WebBaseLoader # Scrape web
from langchain_openai import OpenAIEmbeddings # Create embeddings
from langchain_community.vectorstores import FAISS # Store in vector DB
from langchain_text_splitters import RecursiveCharacterTextSplitter # Split text into docs
from langchain.chains.combine_documents import create_stuff_documents_chain # Send docs to LLM
from langchain.chains import create_retrieval_chain # Let the retriever find the relevant docs from the vector DB
# Chat history
from langchain.chains import create_history_aware_retriever
from langchain_core.prompts import MessagesPlaceholder
from langchain_core.messages import HumanMessage, AIMessage
llm = ChatOpenAI()
embeddings = OpenAIEmbeddings()
# Simple
prompt = ChatPromptTemplate.from_messages([
("system", "You are a world class technical documentation writer."),
("user", "{input}")
])
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
print(chain.invoke({"input":"how can langsmith help with testing?"}))
# RAG
loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
vector = FAISS.from_documents(documents, embeddings)
prompt = ChatPromptTemplate.from_template("""Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}""")
# take incoming questions, look up relevant docs, and pass those docs and
# the question to the LLM and ask it to anser the origin question.
document_chain = create_stuff_documents_chain(llm, prompt)
retriever = vector.as_retriever()
retrieval_chain = create_retrieval_chain(retriever, document_chain)
response = retrieval_chain.invoke({"input": "How can I use Langsmith for testing?"})
print(response)
print("\n\n#####\n\n")
print(response["answer"])
# Chat history
prompt = ChatPromptTemplate.from_messages([
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
("user", "Given the above conversation, generate a search query to look up to get information relevant to the conversation")
])
retriever_chain = create_history_aware_retriever(llm, retriever, prompt)
chat_history = [
HumanMessage(content="Can LangSmith help test my LLM applications?"),
AIMessage(content="Yes!")
]
response = retriever_chain.invoke({
"chat_history": chat_history,
"input": "Tell me how"
})
print("\n\n### CHAT HISTORY ###\n\n")
print(response)
print("\n\n#####\n\n")
print(response[1]["answer"])