-
Notifications
You must be signed in to change notification settings - Fork 0
/
initialization.py
78 lines (67 loc) · 3.51 KB
/
initialization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import streamlit as st
from langchain.embeddings import OllamaEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import NLTKTextSplitter
from langchain.memory import ConversationBufferMemory
from langchain.chains import RetrievalQA, ConversationChain
from prompts.prompts import templates
from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOllama
from PyPDF2 import PdfReader
from prompts.prompt_selector import prompt_sector
def embedding(text):
"""Generate embeddings"""
text_splitter = NLTKTextSplitter()
texts = text_splitter.split_text(text)
embeddings = OllamaEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
return docsearch
def resume_reader(resume):
pdf_reader = PdfReader(resume)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
return text
def initialize_session_state(template=None, position=None):
"""Initialize session states"""
if 'jd' in st.session_state:
st.session_state.docsearch = embedding(st.session_state.jd)
else:
st.session_state.docsearch = embedding(resume_reader(st.session_state.resume))
st.session_state.retriever = st.session_state.docsearch.as_retriever(search_type="similarity")
if 'jd' in st.session_state:
Interview_Prompt = PromptTemplate(input_variables=["context", "question"], template=template)
st.session_state.chain_type_kwargs = {"prompt": Interview_Prompt}
else:
st.session_state.chain_type_kwargs = prompt_sector(position, templates)
st.session_state.memory = ConversationBufferMemory()
st.session_state.history = []
st.session_state.token_count = 0
llm = ChatOllama(model_name="ollama-3b", temperature=0.6)
st.session_state.guideline = RetrievalQA.from_chain_type(llm=llm,
chain_type_kwargs=st.session_state.chain_type_kwargs, chain_type='stuff',
retriever=st.session_state.retriever, memory=st.session_state.memory).run(
"Create an interview guideline and prepare only one question for each topic.")
llm = ChatOllama(model_name="ollama-13b", temperature=0.8)
PROMPT = PromptTemplate(input_variables=["history", "input"], template="""I want you to act as an interviewer strictly following the guideline in the current conversation.
Ask me questions and wait for my answers like a real person.
Do not write explanations.
Ask question like a real person, only one question at a time.
Do not ask the same question.
Do not repeat the question.
Do ask follow-up questions if necessary.
You name is GPTInterviewer.
I want you to only reply as an interviewer.
Do not write all the conversation at once.
If there is an error, point it out.
Current Conversation:
{history}
Candidate: {input}
AI: """)
st.session_state.screen = ConversationChain(prompt=PROMPT, llm=llm, memory=st.session_state.memory)
llm = ChatOllama(model_name="ollama-3b", temperature=0.5)
st.session_state.feedback = ConversationChain(
prompt=PromptTemplate(input_variables=["history", "input"], template=templates.feedback_template),
llm=llm,
memory=st.session_state.memory,
)